]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.10.1-201307141150.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.10.1-201307141150.patch
CommitLineData
88fe2261
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..3dd8184 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -2195,6 +2199,18 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 the specified number of seconds. This is to be used if
248 your oopses keep scrolling off the screen.
249
250+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
251+ virtualization environments that don't cope well with the
252+ expand down segment used by UDEREF on X86-32 or the frequent
253+ page table updates on X86-64.
254+
255+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
256+
257+ pax_extra_latent_entropy
258+ Enable a very simple form of latent entropy extraction
259+ from the first 4GB of memory as the bootmem allocator
260+ passes the memory pages to the buddy allocator.
261+
262 pcbit= [HW,ISDN]
263
264 pcd. [PARIDE]
265diff --git a/Makefile b/Makefile
266index b75cc30..6abd111 100644
267--- a/Makefile
268+++ b/Makefile
269@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
270
271 HOSTCC = gcc
272 HOSTCXX = g++
273-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
274-HOSTCXXFLAGS = -O2
275+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
276+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
277+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
278
279 # Decide whether to build built-in, modular, or both.
280 # Normally, just do built-in.
281@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
282 # Rules shared between *config targets and build targets
283
284 # Basic helpers built in scripts/
285-PHONY += scripts_basic
286-scripts_basic:
287+PHONY += scripts_basic gcc-plugins
288+scripts_basic: gcc-plugins
289 $(Q)$(MAKE) $(build)=scripts/basic
290 $(Q)rm -f .tmp_quiet_recordmcount
291
292@@ -576,6 +577,65 @@ else
293 KBUILD_CFLAGS += -O2
294 endif
295
296+ifndef DISABLE_PAX_PLUGINS
297+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
298+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
299+else
300+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
301+endif
302+ifneq ($(PLUGINCC),)
303+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
304+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
305+endif
306+ifdef CONFIG_PAX_MEMORY_STACKLEAK
307+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
308+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
309+endif
310+ifdef CONFIG_KALLOCSTAT_PLUGIN
311+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
312+endif
313+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
314+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
315+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
316+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
317+endif
318+ifdef CONFIG_CHECKER_PLUGIN
319+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
320+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
321+endif
322+endif
323+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
324+ifdef CONFIG_PAX_SIZE_OVERFLOW
325+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
326+endif
327+ifdef CONFIG_PAX_LATENT_ENTROPY
328+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
329+endif
330+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
331+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
332+endif
333+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
334+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
335+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
336+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
337+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
338+ifeq ($(KBUILD_EXTMOD),)
339+gcc-plugins:
340+ $(Q)$(MAKE) $(build)=tools/gcc
341+else
342+gcc-plugins: ;
343+endif
344+else
345+gcc-plugins:
346+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
347+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
348+else
349+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
350+endif
351+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
352+endif
353+endif
354+
355 include $(srctree)/arch/$(SRCARCH)/Makefile
356
357 ifdef CONFIG_READABLE_ASM
358@@ -733,7 +793,7 @@ export mod_sign_cmd
359
360
361 ifeq ($(KBUILD_EXTMOD),)
362-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
363+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
364
365 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
366 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
367@@ -782,6 +842,8 @@ endif
368
369 # The actual objects are generated when descending,
370 # make sure no implicit rule kicks in
371+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
372+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
373 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
374
375 # Handle descending into subdirectories listed in $(vmlinux-dirs)
376@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
377 # Error messages still appears in the original language
378
379 PHONY += $(vmlinux-dirs)
380-$(vmlinux-dirs): prepare scripts
381+$(vmlinux-dirs): gcc-plugins prepare scripts
382 $(Q)$(MAKE) $(build)=$@
383
384 # Store (new) KERNELRELASE string in include/config/kernel.release
385@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
386 $(Q)$(MAKE) $(build)=.
387
388 # All the preparing..
389+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
390 prepare: prepare0
391
392 # Generate some files
393@@ -942,6 +1005,8 @@ all: modules
394 # using awk while concatenating to the final file.
395
396 PHONY += modules
397+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
398+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
399 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
400 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
401 @$(kecho) ' Building modules, stage 2.';
402@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
403
404 # Target to prepare building external modules
405 PHONY += modules_prepare
406-modules_prepare: prepare scripts
407+modules_prepare: gcc-plugins prepare scripts
408
409 # Target to install modules
410 PHONY += modules_install
411@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
412 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
413 signing_key.priv signing_key.x509 x509.genkey \
414 extra_certificates signing_key.x509.keyid \
415- signing_key.x509.signer
416+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
417
418 # clean - Delete most, but leave enough to build external modules
419 #
420@@ -1063,6 +1128,7 @@ distclean: mrproper
421 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
422 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
423 -o -name '.*.rej' \
424+ -o -name '.*.rej' -o -name '*.so' \
425 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
426 -type f -print | xargs rm -f
427
428@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
429 $(module-dirs): crmodverdir $(objtree)/Module.symvers
430 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
431
432+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
433+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
434 modules: $(module-dirs)
435 @$(kecho) ' Building modules, stage 2.';
436 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
437@@ -1359,17 +1427,21 @@ else
438 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
439 endif
440
441-%.s: %.c prepare scripts FORCE
442+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
443+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
444+%.s: %.c gcc-plugins prepare scripts FORCE
445 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
446 %.i: %.c prepare scripts FORCE
447 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
448-%.o: %.c prepare scripts FORCE
449+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
450+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
451+%.o: %.c gcc-plugins prepare scripts FORCE
452 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
453 %.lst: %.c prepare scripts FORCE
454 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
455-%.s: %.S prepare scripts FORCE
456+%.s: %.S gcc-plugins prepare scripts FORCE
457 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
458-%.o: %.S prepare scripts FORCE
459+%.o: %.S gcc-plugins prepare scripts FORCE
460 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
461 %.symtypes: %.c prepare scripts FORCE
462 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
463@@ -1379,11 +1451,15 @@ endif
464 $(cmd_crmodverdir)
465 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
466 $(build)=$(build-dir)
467-%/: prepare scripts FORCE
468+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
469+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
470+%/: gcc-plugins prepare scripts FORCE
471 $(cmd_crmodverdir)
472 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
473 $(build)=$(build-dir)
474-%.ko: prepare scripts FORCE
475+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
476+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
477+%.ko: gcc-plugins prepare scripts FORCE
478 $(cmd_crmodverdir)
479 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
480 $(build)=$(build-dir) $(@:.ko=.o)
481diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
482index c2cbe4f..f7264b4 100644
483--- a/arch/alpha/include/asm/atomic.h
484+++ b/arch/alpha/include/asm/atomic.h
485@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
486 #define atomic_dec(v) atomic_sub(1,(v))
487 #define atomic64_dec(v) atomic64_sub(1,(v))
488
489+#define atomic64_read_unchecked(v) atomic64_read(v)
490+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
491+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
492+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
493+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
494+#define atomic64_inc_unchecked(v) atomic64_inc(v)
495+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
496+#define atomic64_dec_unchecked(v) atomic64_dec(v)
497+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
498+
499 #define smp_mb__before_atomic_dec() smp_mb()
500 #define smp_mb__after_atomic_dec() smp_mb()
501 #define smp_mb__before_atomic_inc() smp_mb()
502diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
503index ad368a9..fbe0f25 100644
504--- a/arch/alpha/include/asm/cache.h
505+++ b/arch/alpha/include/asm/cache.h
506@@ -4,19 +4,19 @@
507 #ifndef __ARCH_ALPHA_CACHE_H
508 #define __ARCH_ALPHA_CACHE_H
509
510+#include <linux/const.h>
511
512 /* Bytes per L1 (data) cache line. */
513 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
514-# define L1_CACHE_BYTES 64
515 # define L1_CACHE_SHIFT 6
516 #else
517 /* Both EV4 and EV5 are write-through, read-allocate,
518 direct-mapped, physical.
519 */
520-# define L1_CACHE_BYTES 32
521 # define L1_CACHE_SHIFT 5
522 #endif
523
524+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
525 #define SMP_CACHE_BYTES L1_CACHE_BYTES
526
527 #endif
528diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
529index 968d999..d36b2df 100644
530--- a/arch/alpha/include/asm/elf.h
531+++ b/arch/alpha/include/asm/elf.h
532@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
533
534 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
535
536+#ifdef CONFIG_PAX_ASLR
537+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
538+
539+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
540+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
541+#endif
542+
543 /* $0 is set by ld.so to a pointer to a function which might be
544 registered using atexit. This provides a mean for the dynamic
545 linker to call DT_FINI functions for shared libraries that have
546diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
547index bc2a0da..8ad11ee 100644
548--- a/arch/alpha/include/asm/pgalloc.h
549+++ b/arch/alpha/include/asm/pgalloc.h
550@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
551 pgd_set(pgd, pmd);
552 }
553
554+static inline void
555+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
556+{
557+ pgd_populate(mm, pgd, pmd);
558+}
559+
560 extern pgd_t *pgd_alloc(struct mm_struct *mm);
561
562 static inline void
563diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
564index 81a4342..348b927 100644
565--- a/arch/alpha/include/asm/pgtable.h
566+++ b/arch/alpha/include/asm/pgtable.h
567@@ -102,6 +102,17 @@ struct vm_area_struct;
568 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
569 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
570 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
571+
572+#ifdef CONFIG_PAX_PAGEEXEC
573+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
574+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
575+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
576+#else
577+# define PAGE_SHARED_NOEXEC PAGE_SHARED
578+# define PAGE_COPY_NOEXEC PAGE_COPY
579+# define PAGE_READONLY_NOEXEC PAGE_READONLY
580+#endif
581+
582 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
583
584 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
585diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
586index 2fd00b7..cfd5069 100644
587--- a/arch/alpha/kernel/module.c
588+++ b/arch/alpha/kernel/module.c
589@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
590
591 /* The small sections were sorted to the end of the segment.
592 The following should definitely cover them. */
593- gp = (u64)me->module_core + me->core_size - 0x8000;
594+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
595 got = sechdrs[me->arch.gotsecindex].sh_addr;
596
597 for (i = 0; i < n; i++) {
598diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
599index b9e37ad..44c24e7 100644
600--- a/arch/alpha/kernel/osf_sys.c
601+++ b/arch/alpha/kernel/osf_sys.c
602@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
603 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
604
605 static unsigned long
606-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
607- unsigned long limit)
608+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
609+ unsigned long limit, unsigned long flags)
610 {
611 struct vm_unmapped_area_info info;
612+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
613
614 info.flags = 0;
615 info.length = len;
616@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
617 info.high_limit = limit;
618 info.align_mask = 0;
619 info.align_offset = 0;
620+ info.threadstack_offset = offset;
621 return vm_unmapped_area(&info);
622 }
623
624@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
625 merely specific addresses, but regions of memory -- perhaps
626 this feature should be incorporated into all ports? */
627
628+#ifdef CONFIG_PAX_RANDMMAP
629+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
630+#endif
631+
632 if (addr) {
633- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
634+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
635 if (addr != (unsigned long) -ENOMEM)
636 return addr;
637 }
638
639 /* Next, try allocating at TASK_UNMAPPED_BASE. */
640- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
641- len, limit);
642+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
643+
644 if (addr != (unsigned long) -ENOMEM)
645 return addr;
646
647 /* Finally, try allocating in low memory. */
648- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
649+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
650
651 return addr;
652 }
653diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
654index 0c4132d..88f0d53 100644
655--- a/arch/alpha/mm/fault.c
656+++ b/arch/alpha/mm/fault.c
657@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
658 __reload_thread(pcb);
659 }
660
661+#ifdef CONFIG_PAX_PAGEEXEC
662+/*
663+ * PaX: decide what to do with offenders (regs->pc = fault address)
664+ *
665+ * returns 1 when task should be killed
666+ * 2 when patched PLT trampoline was detected
667+ * 3 when unpatched PLT trampoline was detected
668+ */
669+static int pax_handle_fetch_fault(struct pt_regs *regs)
670+{
671+
672+#ifdef CONFIG_PAX_EMUPLT
673+ int err;
674+
675+ do { /* PaX: patched PLT emulation #1 */
676+ unsigned int ldah, ldq, jmp;
677+
678+ err = get_user(ldah, (unsigned int *)regs->pc);
679+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
680+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
681+
682+ if (err)
683+ break;
684+
685+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
686+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
687+ jmp == 0x6BFB0000U)
688+ {
689+ unsigned long r27, addr;
690+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
691+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
692+
693+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
694+ err = get_user(r27, (unsigned long *)addr);
695+ if (err)
696+ break;
697+
698+ regs->r27 = r27;
699+ regs->pc = r27;
700+ return 2;
701+ }
702+ } while (0);
703+
704+ do { /* PaX: patched PLT emulation #2 */
705+ unsigned int ldah, lda, br;
706+
707+ err = get_user(ldah, (unsigned int *)regs->pc);
708+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
709+ err |= get_user(br, (unsigned int *)(regs->pc+8));
710+
711+ if (err)
712+ break;
713+
714+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
715+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
716+ (br & 0xFFE00000U) == 0xC3E00000U)
717+ {
718+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
719+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
720+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
721+
722+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
723+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
724+ return 2;
725+ }
726+ } while (0);
727+
728+ do { /* PaX: unpatched PLT emulation */
729+ unsigned int br;
730+
731+ err = get_user(br, (unsigned int *)regs->pc);
732+
733+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
734+ unsigned int br2, ldq, nop, jmp;
735+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
736+
737+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
738+ err = get_user(br2, (unsigned int *)addr);
739+ err |= get_user(ldq, (unsigned int *)(addr+4));
740+ err |= get_user(nop, (unsigned int *)(addr+8));
741+ err |= get_user(jmp, (unsigned int *)(addr+12));
742+ err |= get_user(resolver, (unsigned long *)(addr+16));
743+
744+ if (err)
745+ break;
746+
747+ if (br2 == 0xC3600000U &&
748+ ldq == 0xA77B000CU &&
749+ nop == 0x47FF041FU &&
750+ jmp == 0x6B7B0000U)
751+ {
752+ regs->r28 = regs->pc+4;
753+ regs->r27 = addr+16;
754+ regs->pc = resolver;
755+ return 3;
756+ }
757+ }
758+ } while (0);
759+#endif
760+
761+ return 1;
762+}
763+
764+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
765+{
766+ unsigned long i;
767+
768+ printk(KERN_ERR "PAX: bytes at PC: ");
769+ for (i = 0; i < 5; i++) {
770+ unsigned int c;
771+ if (get_user(c, (unsigned int *)pc+i))
772+ printk(KERN_CONT "???????? ");
773+ else
774+ printk(KERN_CONT "%08x ", c);
775+ }
776+ printk("\n");
777+}
778+#endif
779
780 /*
781 * This routine handles page faults. It determines the address,
782@@ -133,8 +251,29 @@ retry:
783 good_area:
784 si_code = SEGV_ACCERR;
785 if (cause < 0) {
786- if (!(vma->vm_flags & VM_EXEC))
787+ if (!(vma->vm_flags & VM_EXEC)) {
788+
789+#ifdef CONFIG_PAX_PAGEEXEC
790+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
791+ goto bad_area;
792+
793+ up_read(&mm->mmap_sem);
794+ switch (pax_handle_fetch_fault(regs)) {
795+
796+#ifdef CONFIG_PAX_EMUPLT
797+ case 2:
798+ case 3:
799+ return;
800+#endif
801+
802+ }
803+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
804+ do_group_exit(SIGKILL);
805+#else
806 goto bad_area;
807+#endif
808+
809+ }
810 } else if (!cause) {
811 /* Allow reads even for write-only mappings */
812 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
813diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
814index 136f263..f471277 100644
815--- a/arch/arm/Kconfig
816+++ b/arch/arm/Kconfig
817@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
818
819 config UACCESS_WITH_MEMCPY
820 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
821- depends on MMU
822+ depends on MMU && !PAX_MEMORY_UDEREF
823 default y if CPU_FEROCEON
824 help
825 Implement faster copy_to_user and clear_user methods for CPU
826diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
827index da1c77d..2ee6056 100644
828--- a/arch/arm/include/asm/atomic.h
829+++ b/arch/arm/include/asm/atomic.h
830@@ -17,17 +17,35 @@
831 #include <asm/barrier.h>
832 #include <asm/cmpxchg.h>
833
834+#ifdef CONFIG_GENERIC_ATOMIC64
835+#include <asm-generic/atomic64.h>
836+#endif
837+
838 #define ATOMIC_INIT(i) { (i) }
839
840 #ifdef __KERNEL__
841
842+#define _ASM_EXTABLE(from, to) \
843+" .pushsection __ex_table,\"a\"\n"\
844+" .align 3\n" \
845+" .long " #from ", " #to"\n" \
846+" .popsection"
847+
848 /*
849 * On ARM, ordinary assignment (str instruction) doesn't clear the local
850 * strex/ldrex monitor on some implementations. The reason we can use it for
851 * atomic_set() is the clrex or dummy strex done on every exception return.
852 */
853 #define atomic_read(v) (*(volatile int *)&(v)->counter)
854+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
855+{
856+ return v->counter;
857+}
858 #define atomic_set(v,i) (((v)->counter) = (i))
859+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
860+{
861+ v->counter = i;
862+}
863
864 #if __LINUX_ARM_ARCH__ >= 6
865
866@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
867 int result;
868
869 __asm__ __volatile__("@ atomic_add\n"
870+"1: ldrex %1, [%3]\n"
871+" adds %0, %1, %4\n"
872+
873+#ifdef CONFIG_PAX_REFCOUNT
874+" bvc 3f\n"
875+"2: bkpt 0xf103\n"
876+"3:\n"
877+#endif
878+
879+" strex %1, %0, [%3]\n"
880+" teq %1, #0\n"
881+" bne 1b"
882+
883+#ifdef CONFIG_PAX_REFCOUNT
884+"\n4:\n"
885+ _ASM_EXTABLE(2b, 4b)
886+#endif
887+
888+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
889+ : "r" (&v->counter), "Ir" (i)
890+ : "cc");
891+}
892+
893+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
894+{
895+ unsigned long tmp;
896+ int result;
897+
898+ __asm__ __volatile__("@ atomic_add_unchecked\n"
899 "1: ldrex %0, [%3]\n"
900 " add %0, %0, %4\n"
901 " strex %1, %0, [%3]\n"
902@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
903 smp_mb();
904
905 __asm__ __volatile__("@ atomic_add_return\n"
906+"1: ldrex %1, [%3]\n"
907+" adds %0, %1, %4\n"
908+
909+#ifdef CONFIG_PAX_REFCOUNT
910+" bvc 3f\n"
911+" mov %0, %1\n"
912+"2: bkpt 0xf103\n"
913+"3:\n"
914+#endif
915+
916+" strex %1, %0, [%3]\n"
917+" teq %1, #0\n"
918+" bne 1b"
919+
920+#ifdef CONFIG_PAX_REFCOUNT
921+"\n4:\n"
922+ _ASM_EXTABLE(2b, 4b)
923+#endif
924+
925+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
926+ : "r" (&v->counter), "Ir" (i)
927+ : "cc");
928+
929+ smp_mb();
930+
931+ return result;
932+}
933+
934+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
935+{
936+ unsigned long tmp;
937+ int result;
938+
939+ smp_mb();
940+
941+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
942 "1: ldrex %0, [%3]\n"
943 " add %0, %0, %4\n"
944 " strex %1, %0, [%3]\n"
945@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
946 int result;
947
948 __asm__ __volatile__("@ atomic_sub\n"
949+"1: ldrex %1, [%3]\n"
950+" subs %0, %1, %4\n"
951+
952+#ifdef CONFIG_PAX_REFCOUNT
953+" bvc 3f\n"
954+"2: bkpt 0xf103\n"
955+"3:\n"
956+#endif
957+
958+" strex %1, %0, [%3]\n"
959+" teq %1, #0\n"
960+" bne 1b"
961+
962+#ifdef CONFIG_PAX_REFCOUNT
963+"\n4:\n"
964+ _ASM_EXTABLE(2b, 4b)
965+#endif
966+
967+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
968+ : "r" (&v->counter), "Ir" (i)
969+ : "cc");
970+}
971+
972+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
973+{
974+ unsigned long tmp;
975+ int result;
976+
977+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
978 "1: ldrex %0, [%3]\n"
979 " sub %0, %0, %4\n"
980 " strex %1, %0, [%3]\n"
981@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
982 smp_mb();
983
984 __asm__ __volatile__("@ atomic_sub_return\n"
985-"1: ldrex %0, [%3]\n"
986-" sub %0, %0, %4\n"
987+"1: ldrex %1, [%3]\n"
988+" subs %0, %1, %4\n"
989+
990+#ifdef CONFIG_PAX_REFCOUNT
991+" bvc 3f\n"
992+" mov %0, %1\n"
993+"2: bkpt 0xf103\n"
994+"3:\n"
995+#endif
996+
997 " strex %1, %0, [%3]\n"
998 " teq %1, #0\n"
999 " bne 1b"
1000+
1001+#ifdef CONFIG_PAX_REFCOUNT
1002+"\n4:\n"
1003+ _ASM_EXTABLE(2b, 4b)
1004+#endif
1005+
1006 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1007 : "r" (&v->counter), "Ir" (i)
1008 : "cc");
1009@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1010 return oldval;
1011 }
1012
1013+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1014+{
1015+ unsigned long oldval, res;
1016+
1017+ smp_mb();
1018+
1019+ do {
1020+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1021+ "ldrex %1, [%3]\n"
1022+ "mov %0, #0\n"
1023+ "teq %1, %4\n"
1024+ "strexeq %0, %5, [%3]\n"
1025+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1026+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1027+ : "cc");
1028+ } while (res);
1029+
1030+ smp_mb();
1031+
1032+ return oldval;
1033+}
1034+
1035 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1036 {
1037 unsigned long tmp, tmp2;
1038@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1039
1040 return val;
1041 }
1042+
1043+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1044+{
1045+ return atomic_add_return(i, v);
1046+}
1047+
1048 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1049+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1050+{
1051+ (void) atomic_add_return(i, v);
1052+}
1053
1054 static inline int atomic_sub_return(int i, atomic_t *v)
1055 {
1056@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1057 return val;
1058 }
1059 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1060+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1061+{
1062+ (void) atomic_sub_return(i, v);
1063+}
1064
1065 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1066 {
1067@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1068 return ret;
1069 }
1070
1071+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1072+{
1073+ return atomic_cmpxchg(v, old, new);
1074+}
1075+
1076 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1077 {
1078 unsigned long flags;
1079@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1080 #endif /* __LINUX_ARM_ARCH__ */
1081
1082 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1083+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1084+{
1085+ return xchg(&v->counter, new);
1086+}
1087
1088 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1089 {
1090@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1091 }
1092
1093 #define atomic_inc(v) atomic_add(1, v)
1094+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1095+{
1096+ atomic_add_unchecked(1, v);
1097+}
1098 #define atomic_dec(v) atomic_sub(1, v)
1099+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1100+{
1101+ atomic_sub_unchecked(1, v);
1102+}
1103
1104 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1105+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1106+{
1107+ return atomic_add_return_unchecked(1, v) == 0;
1108+}
1109 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1110 #define atomic_inc_return(v) (atomic_add_return(1, v))
1111+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1112+{
1113+ return atomic_add_return_unchecked(1, v);
1114+}
1115 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1116 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1117
1118@@ -241,6 +428,14 @@ typedef struct {
1119 u64 __aligned(8) counter;
1120 } atomic64_t;
1121
1122+#ifdef CONFIG_PAX_REFCOUNT
1123+typedef struct {
1124+ u64 __aligned(8) counter;
1125+} atomic64_unchecked_t;
1126+#else
1127+typedef atomic64_t atomic64_unchecked_t;
1128+#endif
1129+
1130 #define ATOMIC64_INIT(i) { (i) }
1131
1132 #ifdef CONFIG_ARM_LPAE
1133@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1134 return result;
1135 }
1136
1137+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1138+{
1139+ u64 result;
1140+
1141+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1142+" ldrd %0, %H0, [%1]"
1143+ : "=&r" (result)
1144+ : "r" (&v->counter), "Qo" (v->counter)
1145+ );
1146+
1147+ return result;
1148+}
1149+
1150 static inline void atomic64_set(atomic64_t *v, u64 i)
1151 {
1152 __asm__ __volatile__("@ atomic64_set\n"
1153@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1154 : "r" (&v->counter), "r" (i)
1155 );
1156 }
1157+
1158+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1159+{
1160+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1161+" strd %2, %H2, [%1]"
1162+ : "=Qo" (v->counter)
1163+ : "r" (&v->counter), "r" (i)
1164+ );
1165+}
1166 #else
1167 static inline u64 atomic64_read(const atomic64_t *v)
1168 {
1169@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1170 return result;
1171 }
1172
1173+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1174+{
1175+ u64 result;
1176+
1177+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1178+" ldrexd %0, %H0, [%1]"
1179+ : "=&r" (result)
1180+ : "r" (&v->counter), "Qo" (v->counter)
1181+ );
1182+
1183+ return result;
1184+}
1185+
1186 static inline void atomic64_set(atomic64_t *v, u64 i)
1187 {
1188 u64 tmp;
1189@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1190 : "r" (&v->counter), "r" (i)
1191 : "cc");
1192 }
1193+
1194+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1195+{
1196+ u64 tmp;
1197+
1198+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1199+"1: ldrexd %0, %H0, [%2]\n"
1200+" strexd %0, %3, %H3, [%2]\n"
1201+" teq %0, #0\n"
1202+" bne 1b"
1203+ : "=&r" (tmp), "=Qo" (v->counter)
1204+ : "r" (&v->counter), "r" (i)
1205+ : "cc");
1206+}
1207+
1208 #endif
1209
1210 static inline void atomic64_add(u64 i, atomic64_t *v)
1211@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1212 __asm__ __volatile__("@ atomic64_add\n"
1213 "1: ldrexd %0, %H0, [%3]\n"
1214 " adds %0, %0, %4\n"
1215+" adcs %H0, %H0, %H4\n"
1216+
1217+#ifdef CONFIG_PAX_REFCOUNT
1218+" bvc 3f\n"
1219+"2: bkpt 0xf103\n"
1220+"3:\n"
1221+#endif
1222+
1223+" strexd %1, %0, %H0, [%3]\n"
1224+" teq %1, #0\n"
1225+" bne 1b"
1226+
1227+#ifdef CONFIG_PAX_REFCOUNT
1228+"\n4:\n"
1229+ _ASM_EXTABLE(2b, 4b)
1230+#endif
1231+
1232+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1233+ : "r" (&v->counter), "r" (i)
1234+ : "cc");
1235+}
1236+
1237+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1238+{
1239+ u64 result;
1240+ unsigned long tmp;
1241+
1242+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1243+"1: ldrexd %0, %H0, [%3]\n"
1244+" adds %0, %0, %4\n"
1245 " adc %H0, %H0, %H4\n"
1246 " strexd %1, %0, %H0, [%3]\n"
1247 " teq %1, #0\n"
1248@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1249
1250 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1251 {
1252- u64 result;
1253- unsigned long tmp;
1254+ u64 result, tmp;
1255
1256 smp_mb();
1257
1258 __asm__ __volatile__("@ atomic64_add_return\n"
1259+"1: ldrexd %1, %H1, [%3]\n"
1260+" adds %0, %1, %4\n"
1261+" adcs %H0, %H1, %H4\n"
1262+
1263+#ifdef CONFIG_PAX_REFCOUNT
1264+" bvc 3f\n"
1265+" mov %0, %1\n"
1266+" mov %H0, %H1\n"
1267+"2: bkpt 0xf103\n"
1268+"3:\n"
1269+#endif
1270+
1271+" strexd %1, %0, %H0, [%3]\n"
1272+" teq %1, #0\n"
1273+" bne 1b"
1274+
1275+#ifdef CONFIG_PAX_REFCOUNT
1276+"\n4:\n"
1277+ _ASM_EXTABLE(2b, 4b)
1278+#endif
1279+
1280+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1281+ : "r" (&v->counter), "r" (i)
1282+ : "cc");
1283+
1284+ smp_mb();
1285+
1286+ return result;
1287+}
1288+
1289+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1290+{
1291+ u64 result;
1292+ unsigned long tmp;
1293+
1294+ smp_mb();
1295+
1296+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1297 "1: ldrexd %0, %H0, [%3]\n"
1298 " adds %0, %0, %4\n"
1299 " adc %H0, %H0, %H4\n"
1300@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1301 __asm__ __volatile__("@ atomic64_sub\n"
1302 "1: ldrexd %0, %H0, [%3]\n"
1303 " subs %0, %0, %4\n"
1304+" sbcs %H0, %H0, %H4\n"
1305+
1306+#ifdef CONFIG_PAX_REFCOUNT
1307+" bvc 3f\n"
1308+"2: bkpt 0xf103\n"
1309+"3:\n"
1310+#endif
1311+
1312+" strexd %1, %0, %H0, [%3]\n"
1313+" teq %1, #0\n"
1314+" bne 1b"
1315+
1316+#ifdef CONFIG_PAX_REFCOUNT
1317+"\n4:\n"
1318+ _ASM_EXTABLE(2b, 4b)
1319+#endif
1320+
1321+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1322+ : "r" (&v->counter), "r" (i)
1323+ : "cc");
1324+}
1325+
1326+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1327+{
1328+ u64 result;
1329+ unsigned long tmp;
1330+
1331+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1332+"1: ldrexd %0, %H0, [%3]\n"
1333+" subs %0, %0, %4\n"
1334 " sbc %H0, %H0, %H4\n"
1335 " strexd %1, %0, %H0, [%3]\n"
1336 " teq %1, #0\n"
1337@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1338
1339 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1340 {
1341- u64 result;
1342- unsigned long tmp;
1343+ u64 result, tmp;
1344
1345 smp_mb();
1346
1347 __asm__ __volatile__("@ atomic64_sub_return\n"
1348-"1: ldrexd %0, %H0, [%3]\n"
1349-" subs %0, %0, %4\n"
1350-" sbc %H0, %H0, %H4\n"
1351+"1: ldrexd %1, %H1, [%3]\n"
1352+" subs %0, %1, %4\n"
1353+" sbcs %H0, %H1, %H4\n"
1354+
1355+#ifdef CONFIG_PAX_REFCOUNT
1356+" bvc 3f\n"
1357+" mov %0, %1\n"
1358+" mov %H0, %H1\n"
1359+"2: bkpt 0xf103\n"
1360+"3:\n"
1361+#endif
1362+
1363 " strexd %1, %0, %H0, [%3]\n"
1364 " teq %1, #0\n"
1365 " bne 1b"
1366+
1367+#ifdef CONFIG_PAX_REFCOUNT
1368+"\n4:\n"
1369+ _ASM_EXTABLE(2b, 4b)
1370+#endif
1371+
1372 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1373 : "r" (&v->counter), "r" (i)
1374 : "cc");
1375@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1376 return oldval;
1377 }
1378
1379+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1380+{
1381+ u64 oldval;
1382+ unsigned long res;
1383+
1384+ smp_mb();
1385+
1386+ do {
1387+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1388+ "ldrexd %1, %H1, [%3]\n"
1389+ "mov %0, #0\n"
1390+ "teq %1, %4\n"
1391+ "teqeq %H1, %H4\n"
1392+ "strexdeq %0, %5, %H5, [%3]"
1393+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1394+ : "r" (&ptr->counter), "r" (old), "r" (new)
1395+ : "cc");
1396+ } while (res);
1397+
1398+ smp_mb();
1399+
1400+ return oldval;
1401+}
1402+
1403 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1404 {
1405 u64 result;
1406@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1407
1408 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1409 {
1410- u64 result;
1411- unsigned long tmp;
1412+ u64 result, tmp;
1413
1414 smp_mb();
1415
1416 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1417-"1: ldrexd %0, %H0, [%3]\n"
1418-" subs %0, %0, #1\n"
1419-" sbc %H0, %H0, #0\n"
1420+"1: ldrexd %1, %H1, [%3]\n"
1421+" subs %0, %1, #1\n"
1422+" sbcs %H0, %H1, #0\n"
1423+
1424+#ifdef CONFIG_PAX_REFCOUNT
1425+" bvc 3f\n"
1426+" mov %0, %1\n"
1427+" mov %H0, %H1\n"
1428+"2: bkpt 0xf103\n"
1429+"3:\n"
1430+#endif
1431+
1432 " teq %H0, #0\n"
1433-" bmi 2f\n"
1434+" bmi 4f\n"
1435 " strexd %1, %0, %H0, [%3]\n"
1436 " teq %1, #0\n"
1437 " bne 1b\n"
1438-"2:"
1439+"4:\n"
1440+
1441+#ifdef CONFIG_PAX_REFCOUNT
1442+ _ASM_EXTABLE(2b, 4b)
1443+#endif
1444+
1445 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1446 : "r" (&v->counter)
1447 : "cc");
1448@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1449 " teq %0, %5\n"
1450 " teqeq %H0, %H5\n"
1451 " moveq %1, #0\n"
1452-" beq 2f\n"
1453+" beq 4f\n"
1454 " adds %0, %0, %6\n"
1455-" adc %H0, %H0, %H6\n"
1456+" adcs %H0, %H0, %H6\n"
1457+
1458+#ifdef CONFIG_PAX_REFCOUNT
1459+" bvc 3f\n"
1460+"2: bkpt 0xf103\n"
1461+"3:\n"
1462+#endif
1463+
1464 " strexd %2, %0, %H0, [%4]\n"
1465 " teq %2, #0\n"
1466 " bne 1b\n"
1467-"2:"
1468+"4:\n"
1469+
1470+#ifdef CONFIG_PAX_REFCOUNT
1471+ _ASM_EXTABLE(2b, 4b)
1472+#endif
1473+
1474 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1475 : "r" (&v->counter), "r" (u), "r" (a)
1476 : "cc");
1477@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1478
1479 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1480 #define atomic64_inc(v) atomic64_add(1LL, (v))
1481+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1482 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1483+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1484 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1485 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1486 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1487+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1488 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1489 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1490 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1491diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1492index 75fe66b..ba3dee4 100644
1493--- a/arch/arm/include/asm/cache.h
1494+++ b/arch/arm/include/asm/cache.h
1495@@ -4,8 +4,10 @@
1496 #ifndef __ASMARM_CACHE_H
1497 #define __ASMARM_CACHE_H
1498
1499+#include <linux/const.h>
1500+
1501 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1502-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1503+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1504
1505 /*
1506 * Memory returned by kmalloc() may be used for DMA, so we must make
1507@@ -24,5 +26,6 @@
1508 #endif
1509
1510 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1511+#define __read_only __attribute__ ((__section__(".data..read_only")))
1512
1513 #endif
1514diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1515index 17d0ae8..014e350 100644
1516--- a/arch/arm/include/asm/cacheflush.h
1517+++ b/arch/arm/include/asm/cacheflush.h
1518@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1519 void (*dma_unmap_area)(const void *, size_t, int);
1520
1521 void (*dma_flush_range)(const void *, const void *);
1522-};
1523+} __no_const;
1524
1525 /*
1526 * Select the calling method
1527diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1528index 6dcc164..b14d917 100644
1529--- a/arch/arm/include/asm/checksum.h
1530+++ b/arch/arm/include/asm/checksum.h
1531@@ -37,7 +37,19 @@ __wsum
1532 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1533
1534 __wsum
1535-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1536+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1537+
1538+static inline __wsum
1539+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1540+{
1541+ __wsum ret;
1542+ pax_open_userland();
1543+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1544+ pax_close_userland();
1545+ return ret;
1546+}
1547+
1548+
1549
1550 /*
1551 * Fold a partial checksum without adding pseudo headers
1552diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1553index 4f009c1..466c59b 100644
1554--- a/arch/arm/include/asm/cmpxchg.h
1555+++ b/arch/arm/include/asm/cmpxchg.h
1556@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1557
1558 #define xchg(ptr,x) \
1559 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1560+#define xchg_unchecked(ptr,x) \
1561+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1562
1563 #include <asm-generic/cmpxchg-local.h>
1564
1565diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1566index 6ddbe44..b5e38b1 100644
1567--- a/arch/arm/include/asm/domain.h
1568+++ b/arch/arm/include/asm/domain.h
1569@@ -48,18 +48,37 @@
1570 * Domain types
1571 */
1572 #define DOMAIN_NOACCESS 0
1573-#define DOMAIN_CLIENT 1
1574 #ifdef CONFIG_CPU_USE_DOMAINS
1575+#define DOMAIN_USERCLIENT 1
1576+#define DOMAIN_KERNELCLIENT 1
1577 #define DOMAIN_MANAGER 3
1578+#define DOMAIN_VECTORS DOMAIN_USER
1579 #else
1580+
1581+#ifdef CONFIG_PAX_KERNEXEC
1582 #define DOMAIN_MANAGER 1
1583+#define DOMAIN_KERNEXEC 3
1584+#else
1585+#define DOMAIN_MANAGER 1
1586+#endif
1587+
1588+#ifdef CONFIG_PAX_MEMORY_UDEREF
1589+#define DOMAIN_USERCLIENT 0
1590+#define DOMAIN_UDEREF 1
1591+#define DOMAIN_VECTORS DOMAIN_KERNEL
1592+#else
1593+#define DOMAIN_USERCLIENT 1
1594+#define DOMAIN_VECTORS DOMAIN_USER
1595+#endif
1596+#define DOMAIN_KERNELCLIENT 1
1597+
1598 #endif
1599
1600 #define domain_val(dom,type) ((type) << (2*(dom)))
1601
1602 #ifndef __ASSEMBLY__
1603
1604-#ifdef CONFIG_CPU_USE_DOMAINS
1605+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1606 static inline void set_domain(unsigned val)
1607 {
1608 asm volatile(
1609@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1610 isb();
1611 }
1612
1613-#define modify_domain(dom,type) \
1614- do { \
1615- struct thread_info *thread = current_thread_info(); \
1616- unsigned int domain = thread->cpu_domain; \
1617- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1618- thread->cpu_domain = domain | domain_val(dom, type); \
1619- set_domain(thread->cpu_domain); \
1620- } while (0)
1621-
1622+extern void modify_domain(unsigned int dom, unsigned int type);
1623 #else
1624 static inline void set_domain(unsigned val) { }
1625 static inline void modify_domain(unsigned dom, unsigned type) { }
1626diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1627index 38050b1..9d90e8b 100644
1628--- a/arch/arm/include/asm/elf.h
1629+++ b/arch/arm/include/asm/elf.h
1630@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1631 the loader. We need to make sure that it is out of the way of the program
1632 that it will "exec", and that there is sufficient room for the brk. */
1633
1634-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1635+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1636+
1637+#ifdef CONFIG_PAX_ASLR
1638+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1639+
1640+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1641+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1642+#endif
1643
1644 /* When the program starts, a1 contains a pointer to a function to be
1645 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1646@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1647 extern void elf_set_personality(const struct elf32_hdr *);
1648 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1649
1650-struct mm_struct;
1651-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1652-#define arch_randomize_brk arch_randomize_brk
1653-
1654 #endif
1655diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1656index de53547..52b9a28 100644
1657--- a/arch/arm/include/asm/fncpy.h
1658+++ b/arch/arm/include/asm/fncpy.h
1659@@ -81,7 +81,9 @@
1660 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1661 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1662 \
1663+ pax_open_kernel(); \
1664 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1665+ pax_close_kernel(); \
1666 flush_icache_range((unsigned long)(dest_buf), \
1667 (unsigned long)(dest_buf) + (size)); \
1668 \
1669diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1670index e42cf59..7b94b8f 100644
1671--- a/arch/arm/include/asm/futex.h
1672+++ b/arch/arm/include/asm/futex.h
1673@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1674 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1675 return -EFAULT;
1676
1677+ pax_open_userland();
1678+
1679 smp_mb();
1680 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1681 "1: ldrex %1, [%4]\n"
1682@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1683 : "cc", "memory");
1684 smp_mb();
1685
1686+ pax_close_userland();
1687+
1688 *uval = val;
1689 return ret;
1690 }
1691@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1692 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1693 return -EFAULT;
1694
1695+ pax_open_userland();
1696+
1697 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1698 "1: " TUSER(ldr) " %1, [%4]\n"
1699 " teq %1, %2\n"
1700@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1701 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1702 : "cc", "memory");
1703
1704+ pax_close_userland();
1705+
1706 *uval = val;
1707 return ret;
1708 }
1709@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1710 return -EFAULT;
1711
1712 pagefault_disable(); /* implies preempt_disable() */
1713+ pax_open_userland();
1714
1715 switch (op) {
1716 case FUTEX_OP_SET:
1717@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1718 ret = -ENOSYS;
1719 }
1720
1721+ pax_close_userland();
1722 pagefault_enable(); /* subsumes preempt_enable() */
1723
1724 if (!ret) {
1725diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1726index 83eb2f7..ed77159 100644
1727--- a/arch/arm/include/asm/kmap_types.h
1728+++ b/arch/arm/include/asm/kmap_types.h
1729@@ -4,6 +4,6 @@
1730 /*
1731 * This is the "bare minimum". AIO seems to require this.
1732 */
1733-#define KM_TYPE_NR 16
1734+#define KM_TYPE_NR 17
1735
1736 #endif
1737diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1738index 9e614a1..3302cca 100644
1739--- a/arch/arm/include/asm/mach/dma.h
1740+++ b/arch/arm/include/asm/mach/dma.h
1741@@ -22,7 +22,7 @@ struct dma_ops {
1742 int (*residue)(unsigned int, dma_t *); /* optional */
1743 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1744 const char *type;
1745-};
1746+} __do_const;
1747
1748 struct dma_struct {
1749 void *addr; /* single DMA address */
1750diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1751index 2fe141f..192dc01 100644
1752--- a/arch/arm/include/asm/mach/map.h
1753+++ b/arch/arm/include/asm/mach/map.h
1754@@ -27,13 +27,16 @@ struct map_desc {
1755 #define MT_MINICLEAN 6
1756 #define MT_LOW_VECTORS 7
1757 #define MT_HIGH_VECTORS 8
1758-#define MT_MEMORY 9
1759+#define MT_MEMORY_RWX 9
1760 #define MT_ROM 10
1761-#define MT_MEMORY_NONCACHED 11
1762+#define MT_MEMORY_NONCACHED_RX 11
1763 #define MT_MEMORY_DTCM 12
1764 #define MT_MEMORY_ITCM 13
1765 #define MT_MEMORY_SO 14
1766 #define MT_MEMORY_DMA_READY 15
1767+#define MT_MEMORY_RW 16
1768+#define MT_MEMORY_RX 17
1769+#define MT_MEMORY_NONCACHED_RW 18
1770
1771 #ifdef CONFIG_MMU
1772 extern void iotable_init(struct map_desc *, int);
1773diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1774index 12f71a1..04e063c 100644
1775--- a/arch/arm/include/asm/outercache.h
1776+++ b/arch/arm/include/asm/outercache.h
1777@@ -35,7 +35,7 @@ struct outer_cache_fns {
1778 #endif
1779 void (*set_debug)(unsigned long);
1780 void (*resume)(void);
1781-};
1782+} __no_const;
1783
1784 #ifdef CONFIG_OUTER_CACHE
1785
1786diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1787index 812a494..71fc0b6 100644
1788--- a/arch/arm/include/asm/page.h
1789+++ b/arch/arm/include/asm/page.h
1790@@ -114,7 +114,7 @@ struct cpu_user_fns {
1791 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1792 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1793 unsigned long vaddr, struct vm_area_struct *vma);
1794-};
1795+} __no_const;
1796
1797 #ifdef MULTI_USER
1798 extern struct cpu_user_fns cpu_user;
1799diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1800index 943504f..c37a730 100644
1801--- a/arch/arm/include/asm/pgalloc.h
1802+++ b/arch/arm/include/asm/pgalloc.h
1803@@ -17,6 +17,7 @@
1804 #include <asm/processor.h>
1805 #include <asm/cacheflush.h>
1806 #include <asm/tlbflush.h>
1807+#include <asm/system_info.h>
1808
1809 #define check_pgt_cache() do { } while (0)
1810
1811@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1812 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1813 }
1814
1815+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1816+{
1817+ pud_populate(mm, pud, pmd);
1818+}
1819+
1820 #else /* !CONFIG_ARM_LPAE */
1821
1822 /*
1823@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1824 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1825 #define pmd_free(mm, pmd) do { } while (0)
1826 #define pud_populate(mm,pmd,pte) BUG()
1827+#define pud_populate_kernel(mm,pmd,pte) BUG()
1828
1829 #endif /* CONFIG_ARM_LPAE */
1830
1831@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1832 __free_page(pte);
1833 }
1834
1835+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1836+{
1837+#ifdef CONFIG_ARM_LPAE
1838+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1839+#else
1840+ if (addr & SECTION_SIZE)
1841+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1842+ else
1843+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1844+#endif
1845+ flush_pmd_entry(pmdp);
1846+}
1847+
1848 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1849 pmdval_t prot)
1850 {
1851@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1852 static inline void
1853 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1854 {
1855- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1856+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1857 }
1858 #define pmd_pgtable(pmd) pmd_page(pmd)
1859
1860diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1861index 5cfba15..f415e1a 100644
1862--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1863+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1864@@ -20,12 +20,15 @@
1865 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1866 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1867 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1868+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1869 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1870 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1871 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1872+
1873 /*
1874 * - section
1875 */
1876+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1877 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1878 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1879 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1880@@ -37,6 +40,7 @@
1881 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1882 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1883 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1884+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1885
1886 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1887 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1888@@ -66,6 +70,7 @@
1889 * - extended small page/tiny page
1890 */
1891 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1892+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1893 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1894 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1895 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1896diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1897index f97ee02..07f1be5 100644
1898--- a/arch/arm/include/asm/pgtable-2level.h
1899+++ b/arch/arm/include/asm/pgtable-2level.h
1900@@ -125,6 +125,7 @@
1901 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1902 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1903 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1904+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1905
1906 /*
1907 * These are the memory types, defined to be compatible with
1908diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1909index 18f5cef..25b8f43 100644
1910--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1911+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1912@@ -41,6 +41,7 @@
1913 */
1914 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1915 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1916+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1917 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1918 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1919 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1920@@ -71,6 +72,7 @@
1921 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1922 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1923 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1924+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1925 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1926
1927 /*
1928diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1929index 86b8fe3..e25f975 100644
1930--- a/arch/arm/include/asm/pgtable-3level.h
1931+++ b/arch/arm/include/asm/pgtable-3level.h
1932@@ -74,6 +74,7 @@
1933 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1934 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1935 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1936+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1937 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1938 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1939 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1940@@ -82,6 +83,7 @@
1941 /*
1942 * To be used in assembly code with the upper page attributes.
1943 */
1944+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1945 #define L_PTE_XN_HIGH (1 << (54 - 32))
1946 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1947
1948diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1949index 9bcd262..fba731c 100644
1950--- a/arch/arm/include/asm/pgtable.h
1951+++ b/arch/arm/include/asm/pgtable.h
1952@@ -30,6 +30,9 @@
1953 #include <asm/pgtable-2level.h>
1954 #endif
1955
1956+#define ktla_ktva(addr) (addr)
1957+#define ktva_ktla(addr) (addr)
1958+
1959 /*
1960 * Just any arbitrary offset to the start of the vmalloc VM area: the
1961 * current 8MB value just means that there will be a 8MB "hole" after the
1962@@ -45,6 +48,9 @@
1963 #define LIBRARY_TEXT_START 0x0c000000
1964
1965 #ifndef __ASSEMBLY__
1966+extern pteval_t __supported_pte_mask;
1967+extern pmdval_t __supported_pmd_mask;
1968+
1969 extern void __pte_error(const char *file, int line, pte_t);
1970 extern void __pmd_error(const char *file, int line, pmd_t);
1971 extern void __pgd_error(const char *file, int line, pgd_t);
1972@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1973 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1974 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1975
1976+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1977+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1978+
1979+#ifdef CONFIG_PAX_KERNEXEC
1980+#include <asm/domain.h>
1981+#include <linux/thread_info.h>
1982+#include <linux/preempt.h>
1983+#endif
1984+
1985+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1986+static inline int test_domain(int domain, int domaintype)
1987+{
1988+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
1989+}
1990+#endif
1991+
1992+#ifdef CONFIG_PAX_KERNEXEC
1993+static inline unsigned long pax_open_kernel(void) {
1994+#ifdef CONFIG_ARM_LPAE
1995+ /* TODO */
1996+#else
1997+ preempt_disable();
1998+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
1999+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2000+#endif
2001+ return 0;
2002+}
2003+
2004+static inline unsigned long pax_close_kernel(void) {
2005+#ifdef CONFIG_ARM_LPAE
2006+ /* TODO */
2007+#else
2008+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2009+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2010+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2011+ preempt_enable_no_resched();
2012+#endif
2013+ return 0;
2014+}
2015+#else
2016+static inline unsigned long pax_open_kernel(void) { return 0; }
2017+static inline unsigned long pax_close_kernel(void) { return 0; }
2018+#endif
2019+
2020 /*
2021 * This is the lowest virtual address we can permit any user space
2022 * mapping to be mapped at. This is particularly important for
2023@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2024 /*
2025 * The pgprot_* and protection_map entries will be fixed up in runtime
2026 * to include the cachable and bufferable bits based on memory policy,
2027- * as well as any architecture dependent bits like global/ASID and SMP
2028- * shared mapping bits.
2029+ * as well as any architecture dependent bits like global/ASID, PXN,
2030+ * and SMP shared mapping bits.
2031 */
2032 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2033
2034@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2035 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2036 {
2037 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2038- L_PTE_NONE | L_PTE_VALID;
2039+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2040 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2041 return pte;
2042 }
2043diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2044index f3628fb..a0672dd 100644
2045--- a/arch/arm/include/asm/proc-fns.h
2046+++ b/arch/arm/include/asm/proc-fns.h
2047@@ -75,7 +75,7 @@ extern struct processor {
2048 unsigned int suspend_size;
2049 void (*do_suspend)(void *);
2050 void (*do_resume)(void *);
2051-} processor;
2052+} __do_const processor;
2053
2054 #ifndef MULTI_CPU
2055 extern void cpu_proc_init(void);
2056diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2057index 06e7d50..8a8e251 100644
2058--- a/arch/arm/include/asm/processor.h
2059+++ b/arch/arm/include/asm/processor.h
2060@@ -65,9 +65,8 @@ struct thread_struct {
2061 regs->ARM_cpsr |= PSR_ENDSTATE; \
2062 regs->ARM_pc = pc & ~1; /* pc */ \
2063 regs->ARM_sp = sp; /* sp */ \
2064- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2065- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2066- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2067+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2068+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2069 nommu_start_thread(regs); \
2070 })
2071
2072diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2073index ce0dbe7..c085b6f 100644
2074--- a/arch/arm/include/asm/psci.h
2075+++ b/arch/arm/include/asm/psci.h
2076@@ -29,7 +29,7 @@ struct psci_operations {
2077 int (*cpu_off)(struct psci_power_state state);
2078 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2079 int (*migrate)(unsigned long cpuid);
2080-};
2081+} __no_const;
2082
2083 extern struct psci_operations psci_ops;
2084
2085diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2086index d3a22be..3a69ad5 100644
2087--- a/arch/arm/include/asm/smp.h
2088+++ b/arch/arm/include/asm/smp.h
2089@@ -107,7 +107,7 @@ struct smp_operations {
2090 int (*cpu_disable)(unsigned int cpu);
2091 #endif
2092 #endif
2093-};
2094+} __no_const;
2095
2096 /*
2097 * set platform specific SMP operations
2098diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2099index 1995d1a..76693a2 100644
2100--- a/arch/arm/include/asm/thread_info.h
2101+++ b/arch/arm/include/asm/thread_info.h
2102@@ -77,9 +77,9 @@ struct thread_info {
2103 .flags = 0, \
2104 .preempt_count = INIT_PREEMPT_COUNT, \
2105 .addr_limit = KERNEL_DS, \
2106- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2107- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2108- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2109+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2110+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2111+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2112 .restart_block = { \
2113 .fn = do_no_restart_syscall, \
2114 }, \
2115@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2116 #define TIF_SYSCALL_AUDIT 9
2117 #define TIF_SYSCALL_TRACEPOINT 10
2118 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2119-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2120+/* within 8 bits of TIF_SYSCALL_TRACE
2121+ * to meet flexible second operand requirements
2122+ */
2123+#define TIF_GRSEC_SETXID 12
2124+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2125 #define TIF_USING_IWMMXT 17
2126 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2127 #define TIF_RESTORE_SIGMASK 20
2128@@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2129 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2130 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2131 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2132+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2133
2134 /* Checks for any syscall work in entry-common.S */
2135 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2136- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2137+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2138
2139 /*
2140 * Change these and you break ASM code in entry-common.S
2141diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2142index 7e1f760..d42d7f8 100644
2143--- a/arch/arm/include/asm/uaccess.h
2144+++ b/arch/arm/include/asm/uaccess.h
2145@@ -18,6 +18,7 @@
2146 #include <asm/domain.h>
2147 #include <asm/unified.h>
2148 #include <asm/compiler.h>
2149+#include <asm/pgtable.h>
2150
2151 #define VERIFY_READ 0
2152 #define VERIFY_WRITE 1
2153@@ -63,11 +64,35 @@ extern int __put_user_bad(void);
2154 static inline void set_fs(mm_segment_t fs)
2155 {
2156 current_thread_info()->addr_limit = fs;
2157- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2158+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2159 }
2160
2161 #define segment_eq(a,b) ((a) == (b))
2162
2163+static inline void pax_open_userland(void)
2164+{
2165+
2166+#ifdef CONFIG_PAX_MEMORY_UDEREF
2167+ if (segment_eq(get_fs(), USER_DS)) {
2168+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2169+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2170+ }
2171+#endif
2172+
2173+}
2174+
2175+static inline void pax_close_userland(void)
2176+{
2177+
2178+#ifdef CONFIG_PAX_MEMORY_UDEREF
2179+ if (segment_eq(get_fs(), USER_DS)) {
2180+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2181+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2182+ }
2183+#endif
2184+
2185+}
2186+
2187 #define __addr_ok(addr) ({ \
2188 unsigned long flag; \
2189 __asm__("cmp %2, %0; movlo %0, #0" \
2190@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2191
2192 #define get_user(x,p) \
2193 ({ \
2194+ int __e; \
2195 might_fault(); \
2196- __get_user_check(x,p); \
2197+ pax_open_userland(); \
2198+ __e = __get_user_check(x,p); \
2199+ pax_close_userland(); \
2200+ __e; \
2201 })
2202
2203 extern int __put_user_1(void *, unsigned int);
2204@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2205
2206 #define put_user(x,p) \
2207 ({ \
2208+ int __e; \
2209 might_fault(); \
2210- __put_user_check(x,p); \
2211+ pax_open_userland(); \
2212+ __e = __put_user_check(x,p); \
2213+ pax_close_userland(); \
2214+ __e; \
2215 })
2216
2217 #else /* CONFIG_MMU */
2218@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2219 #define __get_user(x,ptr) \
2220 ({ \
2221 long __gu_err = 0; \
2222+ pax_open_userland(); \
2223 __get_user_err((x),(ptr),__gu_err); \
2224+ pax_close_userland(); \
2225 __gu_err; \
2226 })
2227
2228 #define __get_user_error(x,ptr,err) \
2229 ({ \
2230+ pax_open_userland(); \
2231 __get_user_err((x),(ptr),err); \
2232+ pax_close_userland(); \
2233 (void) 0; \
2234 })
2235
2236@@ -312,13 +349,17 @@ do { \
2237 #define __put_user(x,ptr) \
2238 ({ \
2239 long __pu_err = 0; \
2240+ pax_open_userland(); \
2241 __put_user_err((x),(ptr),__pu_err); \
2242+ pax_close_userland(); \
2243 __pu_err; \
2244 })
2245
2246 #define __put_user_error(x,ptr,err) \
2247 ({ \
2248+ pax_open_userland(); \
2249 __put_user_err((x),(ptr),err); \
2250+ pax_close_userland(); \
2251 (void) 0; \
2252 })
2253
2254@@ -418,11 +459,44 @@ do { \
2255
2256
2257 #ifdef CONFIG_MMU
2258-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2259-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2260+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2261+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2262+
2263+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2264+{
2265+ unsigned long ret;
2266+
2267+ check_object_size(to, n, false);
2268+ pax_open_userland();
2269+ ret = ___copy_from_user(to, from, n);
2270+ pax_close_userland();
2271+ return ret;
2272+}
2273+
2274+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2275+{
2276+ unsigned long ret;
2277+
2278+ check_object_size(from, n, true);
2279+ pax_open_userland();
2280+ ret = ___copy_to_user(to, from, n);
2281+ pax_close_userland();
2282+ return ret;
2283+}
2284+
2285 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2286-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2287+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2288 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2289+
2290+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2291+{
2292+ unsigned long ret;
2293+ pax_open_userland();
2294+ ret = ___clear_user(addr, n);
2295+ pax_close_userland();
2296+ return ret;
2297+}
2298+
2299 #else
2300 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2301 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2302@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2303
2304 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2305 {
2306+ if ((long)n < 0)
2307+ return n;
2308+
2309 if (access_ok(VERIFY_READ, from, n))
2310 n = __copy_from_user(to, from, n);
2311 else /* security hole - plug it */
2312@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2313
2314 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2315 {
2316+ if ((long)n < 0)
2317+ return n;
2318+
2319 if (access_ok(VERIFY_WRITE, to, n))
2320 n = __copy_to_user(to, from, n);
2321 return n;
2322diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2323index 96ee092..37f1844 100644
2324--- a/arch/arm/include/uapi/asm/ptrace.h
2325+++ b/arch/arm/include/uapi/asm/ptrace.h
2326@@ -73,7 +73,7 @@
2327 * ARMv7 groups of PSR bits
2328 */
2329 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2330-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2331+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2332 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2333 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2334
2335diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2336index 60d3b73..e5a0f22 100644
2337--- a/arch/arm/kernel/armksyms.c
2338+++ b/arch/arm/kernel/armksyms.c
2339@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2340
2341 /* networking */
2342 EXPORT_SYMBOL(csum_partial);
2343-EXPORT_SYMBOL(csum_partial_copy_from_user);
2344+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2345 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2346 EXPORT_SYMBOL(__csum_ipv6_magic);
2347
2348@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2349 #ifdef CONFIG_MMU
2350 EXPORT_SYMBOL(copy_page);
2351
2352-EXPORT_SYMBOL(__copy_from_user);
2353-EXPORT_SYMBOL(__copy_to_user);
2354-EXPORT_SYMBOL(__clear_user);
2355+EXPORT_SYMBOL(___copy_from_user);
2356+EXPORT_SYMBOL(___copy_to_user);
2357+EXPORT_SYMBOL(___clear_user);
2358
2359 EXPORT_SYMBOL(__get_user_1);
2360 EXPORT_SYMBOL(__get_user_2);
2361diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2362index 582b405..50351b7 100644
2363--- a/arch/arm/kernel/entry-armv.S
2364+++ b/arch/arm/kernel/entry-armv.S
2365@@ -47,6 +47,87 @@
2366 9997:
2367 .endm
2368
2369+ .macro pax_enter_kernel
2370+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2371+ @ make aligned space for saved DACR
2372+ sub sp, sp, #8
2373+ @ save regs
2374+ stmdb sp!, {r1, r2}
2375+ @ read DACR from cpu_domain into r1
2376+ mov r2, sp
2377+ @ assume 8K pages, since we have to split the immediate in two
2378+ bic r2, r2, #(0x1fc0)
2379+ bic r2, r2, #(0x3f)
2380+ ldr r1, [r2, #TI_CPU_DOMAIN]
2381+ @ store old DACR on stack
2382+ str r1, [sp, #8]
2383+#ifdef CONFIG_PAX_KERNEXEC
2384+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2385+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2386+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2387+#endif
2388+#ifdef CONFIG_PAX_MEMORY_UDEREF
2389+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2390+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2391+#endif
2392+ @ write r1 to current_thread_info()->cpu_domain
2393+ str r1, [r2, #TI_CPU_DOMAIN]
2394+ @ write r1 to DACR
2395+ mcr p15, 0, r1, c3, c0, 0
2396+ @ instruction sync
2397+ instr_sync
2398+ @ restore regs
2399+ ldmia sp!, {r1, r2}
2400+#endif
2401+ .endm
2402+
2403+ .macro pax_open_userland
2404+#ifdef CONFIG_PAX_MEMORY_UDEREF
2405+ @ save regs
2406+ stmdb sp!, {r0, r1}
2407+ @ read DACR from cpu_domain into r1
2408+ mov r0, sp
2409+ @ assume 8K pages, since we have to split the immediate in two
2410+ bic r0, r0, #(0x1fc0)
2411+ bic r0, r0, #(0x3f)
2412+ ldr r1, [r0, #TI_CPU_DOMAIN]
2413+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2414+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2415+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2416+ @ write r1 to current_thread_info()->cpu_domain
2417+ str r1, [r0, #TI_CPU_DOMAIN]
2418+ @ write r1 to DACR
2419+ mcr p15, 0, r1, c3, c0, 0
2420+ @ instruction sync
2421+ instr_sync
2422+ @ restore regs
2423+ ldmia sp!, {r0, r1}
2424+#endif
2425+ .endm
2426+
2427+ .macro pax_close_userland
2428+#ifdef CONFIG_PAX_MEMORY_UDEREF
2429+ @ save regs
2430+ stmdb sp!, {r0, r1}
2431+ @ read DACR from cpu_domain into r1
2432+ mov r0, sp
2433+ @ assume 8K pages, since we have to split the immediate in two
2434+ bic r0, r0, #(0x1fc0)
2435+ bic r0, r0, #(0x3f)
2436+ ldr r1, [r0, #TI_CPU_DOMAIN]
2437+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2438+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2439+ @ write r1 to current_thread_info()->cpu_domain
2440+ str r1, [r0, #TI_CPU_DOMAIN]
2441+ @ write r1 to DACR
2442+ mcr p15, 0, r1, c3, c0, 0
2443+ @ instruction sync
2444+ instr_sync
2445+ @ restore regs
2446+ ldmia sp!, {r0, r1}
2447+#endif
2448+ .endm
2449+
2450 .macro pabt_helper
2451 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2452 #ifdef MULTI_PABORT
2453@@ -89,11 +170,15 @@
2454 * Invalid mode handlers
2455 */
2456 .macro inv_entry, reason
2457+
2458+ pax_enter_kernel
2459+
2460 sub sp, sp, #S_FRAME_SIZE
2461 ARM( stmib sp, {r1 - lr} )
2462 THUMB( stmia sp, {r0 - r12} )
2463 THUMB( str sp, [sp, #S_SP] )
2464 THUMB( str lr, [sp, #S_LR] )
2465+
2466 mov r1, #\reason
2467 .endm
2468
2469@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2470 .macro svc_entry, stack_hole=0
2471 UNWIND(.fnstart )
2472 UNWIND(.save {r0 - pc} )
2473+
2474+ pax_enter_kernel
2475+
2476 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2477+
2478 #ifdef CONFIG_THUMB2_KERNEL
2479 SPFIX( str r0, [sp] ) @ temporarily saved
2480 SPFIX( mov r0, sp )
2481@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2482 ldmia r0, {r3 - r5}
2483 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2484 mov r6, #-1 @ "" "" "" ""
2485+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2486+ @ offset sp by 8 as done in pax_enter_kernel
2487+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2488+#else
2489 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2490+#endif
2491 SPFIX( addeq r2, r2, #4 )
2492 str r3, [sp, #-4]! @ save the "real" r0 copied
2493 @ from the exception stack
2494@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2495 .macro usr_entry
2496 UNWIND(.fnstart )
2497 UNWIND(.cantunwind ) @ don't unwind the user space
2498+
2499+ pax_enter_kernel_user
2500+
2501 sub sp, sp, #S_FRAME_SIZE
2502 ARM( stmib sp, {r1 - r12} )
2503 THUMB( stmia sp, {r0 - r12} )
2504@@ -414,7 +511,9 @@ __und_usr:
2505 tst r3, #PSR_T_BIT @ Thumb mode?
2506 bne __und_usr_thumb
2507 sub r4, r2, #4 @ ARM instr at LR - 4
2508+ pax_open_userland
2509 1: ldrt r0, [r4]
2510+ pax_close_userland
2511 #ifdef CONFIG_CPU_ENDIAN_BE8
2512 rev r0, r0 @ little endian instruction
2513 #endif
2514@@ -449,10 +548,14 @@ __und_usr_thumb:
2515 */
2516 .arch armv6t2
2517 #endif
2518+ pax_open_userland
2519 2: ldrht r5, [r4]
2520+ pax_close_userland
2521 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2522 blo __und_usr_fault_16 @ 16bit undefined instruction
2523+ pax_open_userland
2524 3: ldrht r0, [r2]
2525+ pax_close_userland
2526 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2527 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2528 orr r0, r0, r5, lsl #16
2529@@ -690,7 +793,7 @@ ENTRY(__switch_to)
2530 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2531 THUMB( str sp, [ip], #4 )
2532 THUMB( str lr, [ip], #4 )
2533-#ifdef CONFIG_CPU_USE_DOMAINS
2534+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2535 ldr r6, [r2, #TI_CPU_DOMAIN]
2536 #endif
2537 set_tls r3, r4, r5
2538@@ -699,7 +802,7 @@ ENTRY(__switch_to)
2539 ldr r8, =__stack_chk_guard
2540 ldr r7, [r7, #TSK_STACK_CANARY]
2541 #endif
2542-#ifdef CONFIG_CPU_USE_DOMAINS
2543+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2544 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2545 #endif
2546 mov r5, r0
2547diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2548index bc5bc0a..d0998ca 100644
2549--- a/arch/arm/kernel/entry-common.S
2550+++ b/arch/arm/kernel/entry-common.S
2551@@ -10,18 +10,46 @@
2552
2553 #include <asm/unistd.h>
2554 #include <asm/ftrace.h>
2555+#include <asm/domain.h>
2556 #include <asm/unwind.h>
2557
2558+#include "entry-header.S"
2559+
2560 #ifdef CONFIG_NEED_RET_TO_USER
2561 #include <mach/entry-macro.S>
2562 #else
2563 .macro arch_ret_to_user, tmp1, tmp2
2564+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2565+ @ save regs
2566+ stmdb sp!, {r1, r2}
2567+ @ read DACR from cpu_domain into r1
2568+ mov r2, sp
2569+ @ assume 8K pages, since we have to split the immediate in two
2570+ bic r2, r2, #(0x1fc0)
2571+ bic r2, r2, #(0x3f)
2572+ ldr r1, [r2, #TI_CPU_DOMAIN]
2573+#ifdef CONFIG_PAX_KERNEXEC
2574+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2575+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2576+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2577+#endif
2578+#ifdef CONFIG_PAX_MEMORY_UDEREF
2579+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2580+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2581+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2582+#endif
2583+ @ write r1 to current_thread_info()->cpu_domain
2584+ str r1, [r2, #TI_CPU_DOMAIN]
2585+ @ write r1 to DACR
2586+ mcr p15, 0, r1, c3, c0, 0
2587+ @ instruction sync
2588+ instr_sync
2589+ @ restore regs
2590+ ldmia sp!, {r1, r2}
2591+#endif
2592 .endm
2593 #endif
2594
2595-#include "entry-header.S"
2596-
2597-
2598 .align 5
2599 /*
2600 * This is the fast syscall return path. We do as little as
2601@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2602
2603 .align 5
2604 ENTRY(vector_swi)
2605+
2606 sub sp, sp, #S_FRAME_SIZE
2607 stmia sp, {r0 - r12} @ Calling r0 - r12
2608 ARM( add r8, sp, #S_PC )
2609@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2610 ldr scno, [lr, #-4] @ get SWI instruction
2611 #endif
2612
2613+ /*
2614+ * do this here to avoid a performance hit of wrapping the code above
2615+ * that directly dereferences userland to parse the SWI instruction
2616+ */
2617+ pax_enter_kernel_user
2618+
2619 #ifdef CONFIG_ALIGNMENT_TRAP
2620 ldr ip, __cr_alignment
2621 ldr ip, [ip]
2622diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2623index 160f337..db67ee4 100644
2624--- a/arch/arm/kernel/entry-header.S
2625+++ b/arch/arm/kernel/entry-header.S
2626@@ -73,6 +73,60 @@
2627 msr cpsr_c, \rtemp @ switch back to the SVC mode
2628 .endm
2629
2630+ .macro pax_enter_kernel_user
2631+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2632+ @ save regs
2633+ stmdb sp!, {r0, r1}
2634+ @ read DACR from cpu_domain into r1
2635+ mov r0, sp
2636+ @ assume 8K pages, since we have to split the immediate in two
2637+ bic r0, r0, #(0x1fc0)
2638+ bic r0, r0, #(0x3f)
2639+ ldr r1, [r0, #TI_CPU_DOMAIN]
2640+#ifdef CONFIG_PAX_MEMORY_UDEREF
2641+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2642+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2643+#endif
2644+#ifdef CONFIG_PAX_KERNEXEC
2645+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2646+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2647+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2648+#endif
2649+ @ write r1 to current_thread_info()->cpu_domain
2650+ str r1, [r0, #TI_CPU_DOMAIN]
2651+ @ write r1 to DACR
2652+ mcr p15, 0, r1, c3, c0, 0
2653+ @ instruction sync
2654+ instr_sync
2655+ @ restore regs
2656+ ldmia sp!, {r0, r1}
2657+#endif
2658+ .endm
2659+
2660+ .macro pax_exit_kernel
2661+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2662+ @ save regs
2663+ stmdb sp!, {r0, r1}
2664+ @ read old DACR from stack into r1
2665+ ldr r1, [sp, #(8 + S_SP)]
2666+ sub r1, r1, #8
2667+ ldr r1, [r1]
2668+
2669+ @ write r1 to current_thread_info()->cpu_domain
2670+ mov r0, sp
2671+ @ assume 8K pages, since we have to split the immediate in two
2672+ bic r0, r0, #(0x1fc0)
2673+ bic r0, r0, #(0x3f)
2674+ str r1, [r0, #TI_CPU_DOMAIN]
2675+ @ write r1 to DACR
2676+ mcr p15, 0, r1, c3, c0, 0
2677+ @ instruction sync
2678+ instr_sync
2679+ @ restore regs
2680+ ldmia sp!, {r0, r1}
2681+#endif
2682+ .endm
2683+
2684 #ifndef CONFIG_THUMB2_KERNEL
2685 .macro svc_exit, rpsr, irq = 0
2686 .if \irq != 0
2687@@ -92,6 +146,9 @@
2688 blne trace_hardirqs_off
2689 #endif
2690 .endif
2691+
2692+ pax_exit_kernel
2693+
2694 msr spsr_cxsf, \rpsr
2695 #if defined(CONFIG_CPU_V6)
2696 ldr r0, [sp]
2697@@ -155,6 +212,9 @@
2698 blne trace_hardirqs_off
2699 #endif
2700 .endif
2701+
2702+ pax_exit_kernel
2703+
2704 ldr lr, [sp, #S_SP] @ top of the stack
2705 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2706 clrex @ clear the exclusive monitor
2707diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2708index 2adda11..7fbe958 100644
2709--- a/arch/arm/kernel/fiq.c
2710+++ b/arch/arm/kernel/fiq.c
2711@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2712 #if defined(CONFIG_CPU_USE_DOMAINS)
2713 memcpy((void *)0xffff001c, start, length);
2714 #else
2715+ pax_open_kernel();
2716 memcpy(vectors_page + 0x1c, start, length);
2717+ pax_close_kernel();
2718 #endif
2719 flush_icache_range(0xffff001c, 0xffff001c + length);
2720 if (!vectors_high())
2721diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2722index 8bac553..caee108 100644
2723--- a/arch/arm/kernel/head.S
2724+++ b/arch/arm/kernel/head.S
2725@@ -52,7 +52,9 @@
2726 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2727
2728 .macro pgtbl, rd, phys
2729- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2730+ mov \rd, #TEXT_OFFSET
2731+ sub \rd, #PG_DIR_SIZE
2732+ add \rd, \rd, \phys
2733 .endm
2734
2735 /*
2736@@ -434,7 +436,7 @@ __enable_mmu:
2737 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2738 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2739 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2740- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2741+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2742 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2743 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2744 #endif
2745diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2746index 1fd749e..47adb08 100644
2747--- a/arch/arm/kernel/hw_breakpoint.c
2748+++ b/arch/arm/kernel/hw_breakpoint.c
2749@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2750 return NOTIFY_OK;
2751 }
2752
2753-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2754+static struct notifier_block dbg_reset_nb = {
2755 .notifier_call = dbg_reset_notify,
2756 };
2757
2758diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2759index 1e9be5d..03edbc2 100644
2760--- a/arch/arm/kernel/module.c
2761+++ b/arch/arm/kernel/module.c
2762@@ -37,12 +37,37 @@
2763 #endif
2764
2765 #ifdef CONFIG_MMU
2766-void *module_alloc(unsigned long size)
2767+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2768 {
2769+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2770+ return NULL;
2771 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2772- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2773+ GFP_KERNEL, prot, -1,
2774 __builtin_return_address(0));
2775 }
2776+
2777+void *module_alloc(unsigned long size)
2778+{
2779+
2780+#ifdef CONFIG_PAX_KERNEXEC
2781+ return __module_alloc(size, PAGE_KERNEL);
2782+#else
2783+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2784+#endif
2785+
2786+}
2787+
2788+#ifdef CONFIG_PAX_KERNEXEC
2789+void module_free_exec(struct module *mod, void *module_region)
2790+{
2791+ module_free(mod, module_region);
2792+}
2793+
2794+void *module_alloc_exec(unsigned long size)
2795+{
2796+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2797+}
2798+#endif
2799 #endif
2800
2801 int
2802diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2803index 07314af..c46655c 100644
2804--- a/arch/arm/kernel/patch.c
2805+++ b/arch/arm/kernel/patch.c
2806@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2807 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2808 int size;
2809
2810+ pax_open_kernel();
2811 if (thumb2 && __opcode_is_thumb16(insn)) {
2812 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2813 size = sizeof(u16);
2814@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2815 *(u32 *)addr = insn;
2816 size = sizeof(u32);
2817 }
2818+ pax_close_kernel();
2819
2820 flush_icache_range((uintptr_t)(addr),
2821 (uintptr_t)(addr) + size);
2822diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2823index 1f2740e..b36e225 100644
2824--- a/arch/arm/kernel/perf_event_cpu.c
2825+++ b/arch/arm/kernel/perf_event_cpu.c
2826@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2827 return NOTIFY_OK;
2828 }
2829
2830-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2831+static struct notifier_block cpu_pmu_hotplug_notifier = {
2832 .notifier_call = cpu_pmu_notify,
2833 };
2834
2835diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2836index 6e8931c..82ec6a5 100644
2837--- a/arch/arm/kernel/process.c
2838+++ b/arch/arm/kernel/process.c
2839@@ -28,7 +28,6 @@
2840 #include <linux/tick.h>
2841 #include <linux/utsname.h>
2842 #include <linux/uaccess.h>
2843-#include <linux/random.h>
2844 #include <linux/hw_breakpoint.h>
2845 #include <linux/cpuidle.h>
2846 #include <linux/leds.h>
2847@@ -223,6 +222,7 @@ void machine_power_off(void)
2848
2849 if (pm_power_off)
2850 pm_power_off();
2851+ BUG();
2852 }
2853
2854 /*
2855@@ -236,7 +236,7 @@ void machine_power_off(void)
2856 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2857 * to use. Implementing such co-ordination would be essentially impossible.
2858 */
2859-void machine_restart(char *cmd)
2860+__noreturn void machine_restart(char *cmd)
2861 {
2862 smp_send_stop();
2863
2864@@ -258,8 +258,8 @@ void __show_regs(struct pt_regs *regs)
2865
2866 show_regs_print_info(KERN_DEFAULT);
2867
2868- print_symbol("PC is at %s\n", instruction_pointer(regs));
2869- print_symbol("LR is at %s\n", regs->ARM_lr);
2870+ printk("PC is at %pA\n", instruction_pointer(regs));
2871+ printk("LR is at %pA\n", regs->ARM_lr);
2872 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2873 "sp : %08lx ip : %08lx fp : %08lx\n",
2874 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2875@@ -426,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2876 return 0;
2877 }
2878
2879-unsigned long arch_randomize_brk(struct mm_struct *mm)
2880-{
2881- unsigned long range_end = mm->brk + 0x02000000;
2882- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2883-}
2884-
2885 #ifdef CONFIG_MMU
2886 /*
2887 * The vectors page is always readable from user space for the
2888@@ -441,12 +435,12 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
2889 static struct vm_area_struct gate_vma = {
2890 .vm_start = 0xffff0000,
2891 .vm_end = 0xffff0000 + PAGE_SIZE,
2892- .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
2893+ .vm_flags = VM_NONE,
2894 };
2895
2896 static int __init gate_vma_init(void)
2897 {
2898- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2899+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2900 return 0;
2901 }
2902 arch_initcall(gate_vma_init);
2903diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2904index 3653164..d83e55d 100644
2905--- a/arch/arm/kernel/psci.c
2906+++ b/arch/arm/kernel/psci.c
2907@@ -24,7 +24,7 @@
2908 #include <asm/opcodes-virt.h>
2909 #include <asm/psci.h>
2910
2911-struct psci_operations psci_ops;
2912+struct psci_operations psci_ops __read_only;
2913
2914 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2915
2916diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2917index 03deeff..741ce88 100644
2918--- a/arch/arm/kernel/ptrace.c
2919+++ b/arch/arm/kernel/ptrace.c
2920@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2921 return current_thread_info()->syscall;
2922 }
2923
2924+#ifdef CONFIG_GRKERNSEC_SETXID
2925+extern void gr_delayed_cred_worker(void);
2926+#endif
2927+
2928 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2929 {
2930 current_thread_info()->syscall = scno;
2931
2932+#ifdef CONFIG_GRKERNSEC_SETXID
2933+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2934+ gr_delayed_cred_worker();
2935+#endif
2936+
2937 /* Do the secure computing check first; failures should be fast. */
2938 if (secure_computing(scno) == -1)
2939 return -1;
2940diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2941index b4b1d39..efdc9be 100644
2942--- a/arch/arm/kernel/setup.c
2943+++ b/arch/arm/kernel/setup.c
2944@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2945 unsigned int elf_hwcap __read_mostly;
2946 EXPORT_SYMBOL(elf_hwcap);
2947
2948+pteval_t __supported_pte_mask __read_only;
2949+pmdval_t __supported_pmd_mask __read_only;
2950
2951 #ifdef MULTI_CPU
2952-struct processor processor __read_mostly;
2953+struct processor processor;
2954 #endif
2955 #ifdef MULTI_TLB
2956-struct cpu_tlb_fns cpu_tlb __read_mostly;
2957+struct cpu_tlb_fns cpu_tlb __read_only;
2958 #endif
2959 #ifdef MULTI_USER
2960-struct cpu_user_fns cpu_user __read_mostly;
2961+struct cpu_user_fns cpu_user __read_only;
2962 #endif
2963 #ifdef MULTI_CACHE
2964-struct cpu_cache_fns cpu_cache __read_mostly;
2965+struct cpu_cache_fns cpu_cache __read_only;
2966 #endif
2967 #ifdef CONFIG_OUTER_CACHE
2968-struct outer_cache_fns outer_cache __read_mostly;
2969+struct outer_cache_fns outer_cache __read_only;
2970 EXPORT_SYMBOL(outer_cache);
2971 #endif
2972
2973@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2974 asm("mrc p15, 0, %0, c0, c1, 4"
2975 : "=r" (mmfr0));
2976 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2977- (mmfr0 & 0x000000f0) >= 0x00000030)
2978+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2979 cpu_arch = CPU_ARCH_ARMv7;
2980- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2981+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2982+ __supported_pte_mask |= L_PTE_PXN;
2983+ __supported_pmd_mask |= PMD_PXNTABLE;
2984+ }
2985+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2986 (mmfr0 & 0x000000f0) == 0x00000020)
2987 cpu_arch = CPU_ARCH_ARMv6;
2988 else
2989@@ -479,7 +485,7 @@ static void __init setup_processor(void)
2990 __cpu_architecture = __get_cpu_architecture();
2991
2992 #ifdef MULTI_CPU
2993- processor = *list->proc;
2994+ memcpy((void *)&processor, list->proc, sizeof processor);
2995 #endif
2996 #ifdef MULTI_TLB
2997 cpu_tlb = *list->tlb;
2998diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
2999index 296786b..a8d4dd5 100644
3000--- a/arch/arm/kernel/signal.c
3001+++ b/arch/arm/kernel/signal.c
3002@@ -396,22 +396,14 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3003 __put_user(sigreturn_codes[idx+1], rc+1))
3004 return 1;
3005
3006- if (cpsr & MODE32_BIT) {
3007- /*
3008- * 32-bit code can use the new high-page
3009- * signal return code support.
3010- */
3011- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
3012- } else {
3013- /*
3014- * Ensure that the instruction cache sees
3015- * the return code written onto the stack.
3016- */
3017- flush_icache_range((unsigned long)rc,
3018- (unsigned long)(rc + 2));
3019+ /*
3020+ * Ensure that the instruction cache sees
3021+ * the return code written onto the stack.
3022+ */
3023+ flush_icache_range((unsigned long)rc,
3024+ (unsigned long)(rc + 2));
3025
3026- retcode = ((unsigned long)rc) + thumb;
3027- }
3028+ retcode = ((unsigned long)rc) + thumb;
3029 }
3030
3031 regs->ARM_r0 = map_sig(ksig->sig);
3032diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3033index 5919eb4..b5d6dfe 100644
3034--- a/arch/arm/kernel/smp.c
3035+++ b/arch/arm/kernel/smp.c
3036@@ -70,7 +70,7 @@ enum ipi_msg_type {
3037
3038 static DECLARE_COMPLETION(cpu_running);
3039
3040-static struct smp_operations smp_ops;
3041+static struct smp_operations smp_ops __read_only;
3042
3043 void __init smp_set_ops(struct smp_operations *ops)
3044 {
3045diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3046index 18b32e8..b0c8dca 100644
3047--- a/arch/arm/kernel/traps.c
3048+++ b/arch/arm/kernel/traps.c
3049@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3050 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3051 {
3052 #ifdef CONFIG_KALLSYMS
3053- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3054+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3055 #else
3056 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3057 #endif
3058@@ -259,6 +259,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3059 static int die_owner = -1;
3060 static unsigned int die_nest_count;
3061
3062+extern void gr_handle_kernel_exploit(void);
3063+
3064 static unsigned long oops_begin(void)
3065 {
3066 int cpu;
3067@@ -301,6 +303,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3068 panic("Fatal exception in interrupt");
3069 if (panic_on_oops)
3070 panic("Fatal exception");
3071+
3072+ gr_handle_kernel_exploit();
3073+
3074 if (signr)
3075 do_exit(signr);
3076 }
3077@@ -594,7 +599,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3078 * The user helper at 0xffff0fe0 must be used instead.
3079 * (see entry-armv.S for details)
3080 */
3081+ pax_open_kernel();
3082 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3083+ pax_close_kernel();
3084 }
3085 return 0;
3086
3087@@ -834,13 +841,10 @@ void __init early_trap_init(void *vectors_base)
3088 */
3089 kuser_get_tls_init(vectors);
3090
3091- /*
3092- * Copy signal return handlers into the vector page, and
3093- * set sigreturn to be a pointer to these.
3094- */
3095- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3096- sigreturn_codes, sizeof(sigreturn_codes));
3097-
3098 flush_icache_range(vectors, vectors + PAGE_SIZE);
3099- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3100+
3101+#ifndef CONFIG_PAX_MEMORY_UDEREF
3102+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3103+#endif
3104+
3105 }
3106diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3107index a871b8e..123b00a 100644
3108--- a/arch/arm/kernel/vmlinux.lds.S
3109+++ b/arch/arm/kernel/vmlinux.lds.S
3110@@ -8,7 +8,11 @@
3111 #include <asm/thread_info.h>
3112 #include <asm/memory.h>
3113 #include <asm/page.h>
3114-
3115+
3116+#ifdef CONFIG_PAX_KERNEXEC
3117+#include <asm/pgtable.h>
3118+#endif
3119+
3120 #define PROC_INFO \
3121 . = ALIGN(4); \
3122 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3123@@ -94,6 +98,11 @@ SECTIONS
3124 _text = .;
3125 HEAD_TEXT
3126 }
3127+
3128+#ifdef CONFIG_PAX_KERNEXEC
3129+ . = ALIGN(1<<SECTION_SHIFT);
3130+#endif
3131+
3132 .text : { /* Real text segment */
3133 _stext = .; /* Text and read-only data */
3134 __exception_text_start = .;
3135@@ -116,6 +125,8 @@ SECTIONS
3136 ARM_CPU_KEEP(PROC_INFO)
3137 }
3138
3139+ _etext = .; /* End of text section */
3140+
3141 RO_DATA(PAGE_SIZE)
3142
3143 . = ALIGN(4);
3144@@ -146,7 +157,9 @@ SECTIONS
3145
3146 NOTES
3147
3148- _etext = .; /* End of text and rodata section */
3149+#ifdef CONFIG_PAX_KERNEXEC
3150+ . = ALIGN(1<<SECTION_SHIFT);
3151+#endif
3152
3153 #ifndef CONFIG_XIP_KERNEL
3154 . = ALIGN(PAGE_SIZE);
3155@@ -207,6 +220,11 @@ SECTIONS
3156 . = PAGE_OFFSET + TEXT_OFFSET;
3157 #else
3158 __init_end = .;
3159+
3160+#ifdef CONFIG_PAX_KERNEXEC
3161+ . = ALIGN(1<<SECTION_SHIFT);
3162+#endif
3163+
3164 . = ALIGN(THREAD_SIZE);
3165 __data_loc = .;
3166 #endif
3167diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3168index 14a0d98..7771a7d 100644
3169--- a/arch/arm/lib/clear_user.S
3170+++ b/arch/arm/lib/clear_user.S
3171@@ -12,14 +12,14 @@
3172
3173 .text
3174
3175-/* Prototype: int __clear_user(void *addr, size_t sz)
3176+/* Prototype: int ___clear_user(void *addr, size_t sz)
3177 * Purpose : clear some user memory
3178 * Params : addr - user memory address to clear
3179 * : sz - number of bytes to clear
3180 * Returns : number of bytes NOT cleared
3181 */
3182 ENTRY(__clear_user_std)
3183-WEAK(__clear_user)
3184+WEAK(___clear_user)
3185 stmfd sp!, {r1, lr}
3186 mov r2, #0
3187 cmp r1, #4
3188@@ -44,7 +44,7 @@ WEAK(__clear_user)
3189 USER( strnebt r2, [r0])
3190 mov r0, #0
3191 ldmfd sp!, {r1, pc}
3192-ENDPROC(__clear_user)
3193+ENDPROC(___clear_user)
3194 ENDPROC(__clear_user_std)
3195
3196 .pushsection .fixup,"ax"
3197diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3198index 66a477a..bee61d3 100644
3199--- a/arch/arm/lib/copy_from_user.S
3200+++ b/arch/arm/lib/copy_from_user.S
3201@@ -16,7 +16,7 @@
3202 /*
3203 * Prototype:
3204 *
3205- * size_t __copy_from_user(void *to, const void *from, size_t n)
3206+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3207 *
3208 * Purpose:
3209 *
3210@@ -84,11 +84,11 @@
3211
3212 .text
3213
3214-ENTRY(__copy_from_user)
3215+ENTRY(___copy_from_user)
3216
3217 #include "copy_template.S"
3218
3219-ENDPROC(__copy_from_user)
3220+ENDPROC(___copy_from_user)
3221
3222 .pushsection .fixup,"ax"
3223 .align 0
3224diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3225index 6ee2f67..d1cce76 100644
3226--- a/arch/arm/lib/copy_page.S
3227+++ b/arch/arm/lib/copy_page.S
3228@@ -10,6 +10,7 @@
3229 * ASM optimised string functions
3230 */
3231 #include <linux/linkage.h>
3232+#include <linux/const.h>
3233 #include <asm/assembler.h>
3234 #include <asm/asm-offsets.h>
3235 #include <asm/cache.h>
3236diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3237index d066df6..df28194 100644
3238--- a/arch/arm/lib/copy_to_user.S
3239+++ b/arch/arm/lib/copy_to_user.S
3240@@ -16,7 +16,7 @@
3241 /*
3242 * Prototype:
3243 *
3244- * size_t __copy_to_user(void *to, const void *from, size_t n)
3245+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3246 *
3247 * Purpose:
3248 *
3249@@ -88,11 +88,11 @@
3250 .text
3251
3252 ENTRY(__copy_to_user_std)
3253-WEAK(__copy_to_user)
3254+WEAK(___copy_to_user)
3255
3256 #include "copy_template.S"
3257
3258-ENDPROC(__copy_to_user)
3259+ENDPROC(___copy_to_user)
3260 ENDPROC(__copy_to_user_std)
3261
3262 .pushsection .fixup,"ax"
3263diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3264index 7d08b43..f7ca7ea 100644
3265--- a/arch/arm/lib/csumpartialcopyuser.S
3266+++ b/arch/arm/lib/csumpartialcopyuser.S
3267@@ -57,8 +57,8 @@
3268 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3269 */
3270
3271-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3272-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3273+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3274+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3275
3276 #include "csumpartialcopygeneric.S"
3277
3278diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3279index 64dbfa5..84a3fd9 100644
3280--- a/arch/arm/lib/delay.c
3281+++ b/arch/arm/lib/delay.c
3282@@ -28,7 +28,7 @@
3283 /*
3284 * Default to the loop-based delay implementation.
3285 */
3286-struct arm_delay_ops arm_delay_ops = {
3287+struct arm_delay_ops arm_delay_ops __read_only = {
3288 .delay = __loop_delay,
3289 .const_udelay = __loop_const_udelay,
3290 .udelay = __loop_udelay,
3291diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3292index 025f742..8432b08 100644
3293--- a/arch/arm/lib/uaccess_with_memcpy.c
3294+++ b/arch/arm/lib/uaccess_with_memcpy.c
3295@@ -104,7 +104,7 @@ out:
3296 }
3297
3298 unsigned long
3299-__copy_to_user(void __user *to, const void *from, unsigned long n)
3300+___copy_to_user(void __user *to, const void *from, unsigned long n)
3301 {
3302 /*
3303 * This test is stubbed out of the main function above to keep
3304diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3305index f389228..592ef66 100644
3306--- a/arch/arm/mach-kirkwood/common.c
3307+++ b/arch/arm/mach-kirkwood/common.c
3308@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3309 clk_gate_ops.disable(hw);
3310 }
3311
3312-static struct clk_ops clk_gate_fn_ops;
3313+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3314+{
3315+ return clk_gate_ops.is_enabled(hw);
3316+}
3317+
3318+static struct clk_ops clk_gate_fn_ops = {
3319+ .enable = clk_gate_fn_enable,
3320+ .disable = clk_gate_fn_disable,
3321+ .is_enabled = clk_gate_fn_is_enabled,
3322+};
3323
3324 static struct clk __init *clk_register_gate_fn(struct device *dev,
3325 const char *name,
3326@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3327 gate_fn->fn_en = fn_en;
3328 gate_fn->fn_dis = fn_dis;
3329
3330- /* ops is the gate ops, but with our enable/disable functions */
3331- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3332- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3333- clk_gate_fn_ops = clk_gate_ops;
3334- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3335- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3336- }
3337-
3338 clk = clk_register(dev, &gate_fn->gate.hw);
3339
3340 if (IS_ERR(clk))
3341diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3342index f6eeb87..cc90868 100644
3343--- a/arch/arm/mach-omap2/board-n8x0.c
3344+++ b/arch/arm/mach-omap2/board-n8x0.c
3345@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3346 }
3347 #endif
3348
3349-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3350+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3351 .late_init = n8x0_menelaus_late_init,
3352 };
3353
3354diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3355index 6c4da12..d9ca72d 100644
3356--- a/arch/arm/mach-omap2/gpmc.c
3357+++ b/arch/arm/mach-omap2/gpmc.c
3358@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3359 };
3360
3361 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3362-static struct irq_chip gpmc_irq_chip;
3363 static unsigned gpmc_irq_start;
3364
3365 static struct resource gpmc_mem_root;
3366@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3367
3368 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3369
3370+static struct irq_chip gpmc_irq_chip = {
3371+ .name = "gpmc",
3372+ .irq_startup = gpmc_irq_noop_ret,
3373+ .irq_enable = gpmc_irq_enable,
3374+ .irq_disable = gpmc_irq_disable,
3375+ .irq_shutdown = gpmc_irq_noop,
3376+ .irq_ack = gpmc_irq_noop,
3377+ .irq_mask = gpmc_irq_noop,
3378+ .irq_unmask = gpmc_irq_noop,
3379+
3380+};
3381+
3382 static int gpmc_setup_irq(void)
3383 {
3384 int i;
3385@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3386 return gpmc_irq_start;
3387 }
3388
3389- gpmc_irq_chip.name = "gpmc";
3390- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3391- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3392- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3393- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3394- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3395- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3396- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3397-
3398 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3399 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3400
3401diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3402index f8bb3b9..831e7b8 100644
3403--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3404+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3405@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3406 return NOTIFY_OK;
3407 }
3408
3409-static struct notifier_block __refdata irq_hotplug_notifier = {
3410+static struct notifier_block irq_hotplug_notifier = {
3411 .notifier_call = irq_cpu_hotplug_notify,
3412 };
3413
3414diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3415index e6d2307..d057195 100644
3416--- a/arch/arm/mach-omap2/omap_device.c
3417+++ b/arch/arm/mach-omap2/omap_device.c
3418@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3419 struct platform_device __init *omap_device_build(const char *pdev_name,
3420 int pdev_id,
3421 struct omap_hwmod *oh,
3422- void *pdata, int pdata_len)
3423+ const void *pdata, int pdata_len)
3424 {
3425 struct omap_hwmod *ohs[] = { oh };
3426
3427@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3428 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3429 int pdev_id,
3430 struct omap_hwmod **ohs,
3431- int oh_cnt, void *pdata,
3432+ int oh_cnt, const void *pdata,
3433 int pdata_len)
3434 {
3435 int ret = -ENOMEM;
3436diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3437index 044c31d..2ee0861 100644
3438--- a/arch/arm/mach-omap2/omap_device.h
3439+++ b/arch/arm/mach-omap2/omap_device.h
3440@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3441 /* Core code interface */
3442
3443 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3444- struct omap_hwmod *oh, void *pdata,
3445+ struct omap_hwmod *oh, const void *pdata,
3446 int pdata_len);
3447
3448 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3449 struct omap_hwmod **oh, int oh_cnt,
3450- void *pdata, int pdata_len);
3451+ const void *pdata, int pdata_len);
3452
3453 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3454 struct omap_hwmod **ohs, int oh_cnt);
3455diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3456index 7341eff..fd75e34 100644
3457--- a/arch/arm/mach-omap2/omap_hwmod.c
3458+++ b/arch/arm/mach-omap2/omap_hwmod.c
3459@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3460 int (*init_clkdm)(struct omap_hwmod *oh);
3461 void (*update_context_lost)(struct omap_hwmod *oh);
3462 int (*get_context_lost)(struct omap_hwmod *oh);
3463-};
3464+} __no_const;
3465
3466 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3467-static struct omap_hwmod_soc_ops soc_ops;
3468+static struct omap_hwmod_soc_ops soc_ops __read_only;
3469
3470 /* omap_hwmod_list contains all registered struct omap_hwmods */
3471 static LIST_HEAD(omap_hwmod_list);
3472diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3473index d15c7bb..b2d1f0c 100644
3474--- a/arch/arm/mach-omap2/wd_timer.c
3475+++ b/arch/arm/mach-omap2/wd_timer.c
3476@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3477 struct omap_hwmod *oh;
3478 char *oh_name = "wd_timer2";
3479 char *dev_name = "omap_wdt";
3480- struct omap_wd_timer_platform_data pdata;
3481+ static struct omap_wd_timer_platform_data pdata = {
3482+ .read_reset_sources = prm_read_reset_sources
3483+ };
3484
3485 if (!cpu_class_is_omap2() || of_have_populated_dt())
3486 return 0;
3487@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3488 return -EINVAL;
3489 }
3490
3491- pdata.read_reset_sources = prm_read_reset_sources;
3492-
3493 pdev = omap_device_build(dev_name, id, oh, &pdata,
3494 sizeof(struct omap_wd_timer_platform_data));
3495 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3496diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3497index 0cdba8d..297993e 100644
3498--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3499+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3500@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3501 bool entered_lp2 = false;
3502
3503 if (tegra_pending_sgi())
3504- ACCESS_ONCE(abort_flag) = true;
3505+ ACCESS_ONCE_RW(abort_flag) = true;
3506
3507 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3508
3509diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3510index cad3ca86..1d79e0f 100644
3511--- a/arch/arm/mach-ux500/setup.h
3512+++ b/arch/arm/mach-ux500/setup.h
3513@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3514 .type = MT_DEVICE, \
3515 }
3516
3517-#define __MEM_DEV_DESC(x, sz) { \
3518- .virtual = IO_ADDRESS(x), \
3519- .pfn = __phys_to_pfn(x), \
3520- .length = sz, \
3521- .type = MT_MEMORY, \
3522-}
3523-
3524 extern struct smp_operations ux500_smp_ops;
3525 extern void ux500_cpu_die(unsigned int cpu);
3526
3527diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3528index 35955b5..b475042 100644
3529--- a/arch/arm/mm/Kconfig
3530+++ b/arch/arm/mm/Kconfig
3531@@ -432,7 +432,7 @@ config CPU_32v5
3532
3533 config CPU_32v6
3534 bool
3535- select CPU_USE_DOMAINS if CPU_V6 && MMU
3536+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3537 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3538
3539 config CPU_32v6K
3540@@ -581,6 +581,7 @@ config CPU_CP15_MPU
3541
3542 config CPU_USE_DOMAINS
3543 bool
3544+ depends on !ARM_LPAE && !PAX_KERNEXEC
3545 help
3546 This option enables or disables the use of domain switching
3547 via the set_fs() function.
3548diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3549index 6f4585b..7b6f52b 100644
3550--- a/arch/arm/mm/alignment.c
3551+++ b/arch/arm/mm/alignment.c
3552@@ -211,10 +211,12 @@ union offset_union {
3553 #define __get16_unaligned_check(ins,val,addr) \
3554 do { \
3555 unsigned int err = 0, v, a = addr; \
3556+ pax_open_userland(); \
3557 __get8_unaligned_check(ins,v,a,err); \
3558 val = v << ((BE) ? 8 : 0); \
3559 __get8_unaligned_check(ins,v,a,err); \
3560 val |= v << ((BE) ? 0 : 8); \
3561+ pax_close_userland(); \
3562 if (err) \
3563 goto fault; \
3564 } while (0)
3565@@ -228,6 +230,7 @@ union offset_union {
3566 #define __get32_unaligned_check(ins,val,addr) \
3567 do { \
3568 unsigned int err = 0, v, a = addr; \
3569+ pax_open_userland(); \
3570 __get8_unaligned_check(ins,v,a,err); \
3571 val = v << ((BE) ? 24 : 0); \
3572 __get8_unaligned_check(ins,v,a,err); \
3573@@ -236,6 +239,7 @@ union offset_union {
3574 val |= v << ((BE) ? 8 : 16); \
3575 __get8_unaligned_check(ins,v,a,err); \
3576 val |= v << ((BE) ? 0 : 24); \
3577+ pax_close_userland(); \
3578 if (err) \
3579 goto fault; \
3580 } while (0)
3581@@ -249,6 +253,7 @@ union offset_union {
3582 #define __put16_unaligned_check(ins,val,addr) \
3583 do { \
3584 unsigned int err = 0, v = val, a = addr; \
3585+ pax_open_userland(); \
3586 __asm__( FIRST_BYTE_16 \
3587 ARM( "1: "ins" %1, [%2], #1\n" ) \
3588 THUMB( "1: "ins" %1, [%2]\n" ) \
3589@@ -268,6 +273,7 @@ union offset_union {
3590 " .popsection\n" \
3591 : "=r" (err), "=&r" (v), "=&r" (a) \
3592 : "0" (err), "1" (v), "2" (a)); \
3593+ pax_close_userland(); \
3594 if (err) \
3595 goto fault; \
3596 } while (0)
3597@@ -281,6 +287,7 @@ union offset_union {
3598 #define __put32_unaligned_check(ins,val,addr) \
3599 do { \
3600 unsigned int err = 0, v = val, a = addr; \
3601+ pax_open_userland(); \
3602 __asm__( FIRST_BYTE_32 \
3603 ARM( "1: "ins" %1, [%2], #1\n" ) \
3604 THUMB( "1: "ins" %1, [%2]\n" ) \
3605@@ -310,6 +317,7 @@ union offset_union {
3606 " .popsection\n" \
3607 : "=r" (err), "=&r" (v), "=&r" (a) \
3608 : "0" (err), "1" (v), "2" (a)); \
3609+ pax_close_userland(); \
3610 if (err) \
3611 goto fault; \
3612 } while (0)
3613diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3614index 5dbf13f..1a60561 100644
3615--- a/arch/arm/mm/fault.c
3616+++ b/arch/arm/mm/fault.c
3617@@ -25,6 +25,7 @@
3618 #include <asm/system_misc.h>
3619 #include <asm/system_info.h>
3620 #include <asm/tlbflush.h>
3621+#include <asm/sections.h>
3622
3623 #include "fault.h"
3624
3625@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3626 if (fixup_exception(regs))
3627 return;
3628
3629+#ifdef CONFIG_PAX_KERNEXEC
3630+ if ((fsr & FSR_WRITE) &&
3631+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3632+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3633+ {
3634+ if (current->signal->curr_ip)
3635+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3636+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3637+ else
3638+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3639+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3640+ }
3641+#endif
3642+
3643 /*
3644 * No handler, we'll have to terminate things with extreme prejudice.
3645 */
3646@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3647 }
3648 #endif
3649
3650+#ifdef CONFIG_PAX_PAGEEXEC
3651+ if (fsr & FSR_LNX_PF) {
3652+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3653+ do_group_exit(SIGKILL);
3654+ }
3655+#endif
3656+
3657 tsk->thread.address = addr;
3658 tsk->thread.error_code = fsr;
3659 tsk->thread.trap_no = 14;
3660@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3661 }
3662 #endif /* CONFIG_MMU */
3663
3664+#ifdef CONFIG_PAX_PAGEEXEC
3665+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3666+{
3667+ long i;
3668+
3669+ printk(KERN_ERR "PAX: bytes at PC: ");
3670+ for (i = 0; i < 20; i++) {
3671+ unsigned char c;
3672+ if (get_user(c, (__force unsigned char __user *)pc+i))
3673+ printk(KERN_CONT "?? ");
3674+ else
3675+ printk(KERN_CONT "%02x ", c);
3676+ }
3677+ printk("\n");
3678+
3679+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3680+ for (i = -1; i < 20; i++) {
3681+ unsigned long c;
3682+ if (get_user(c, (__force unsigned long __user *)sp+i))
3683+ printk(KERN_CONT "???????? ");
3684+ else
3685+ printk(KERN_CONT "%08lx ", c);
3686+ }
3687+ printk("\n");
3688+}
3689+#endif
3690+
3691 /*
3692 * First Level Translation Fault Handler
3693 *
3694@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3695 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3696 struct siginfo info;
3697
3698+#ifdef CONFIG_PAX_MEMORY_UDEREF
3699+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3700+ if (current->signal->curr_ip)
3701+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3702+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3703+ else
3704+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3705+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3706+ goto die;
3707+ }
3708+#endif
3709+
3710 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3711 return;
3712
3713+die:
3714 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3715 inf->name, fsr, addr);
3716
3717@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3718 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3719 struct siginfo info;
3720
3721+ if (user_mode(regs)) {
3722+ if (addr == 0xffff0fe0UL) {
3723+ /*
3724+ * PaX: __kuser_get_tls emulation
3725+ */
3726+ regs->ARM_r0 = current_thread_info()->tp_value;
3727+ regs->ARM_pc = regs->ARM_lr;
3728+ return;
3729+ }
3730+ }
3731+
3732+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3733+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3734+ if (current->signal->curr_ip)
3735+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3736+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3737+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3738+ else
3739+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3740+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3741+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3742+ goto die;
3743+ }
3744+#endif
3745+
3746+#ifdef CONFIG_PAX_REFCOUNT
3747+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3748+ unsigned int bkpt;
3749+
3750+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3751+ current->thread.error_code = ifsr;
3752+ current->thread.trap_no = 0;
3753+ pax_report_refcount_overflow(regs);
3754+ fixup_exception(regs);
3755+ return;
3756+ }
3757+ }
3758+#endif
3759+
3760 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3761 return;
3762
3763+die:
3764 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3765 inf->name, ifsr, addr);
3766
3767diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3768index cf08bdf..772656c 100644
3769--- a/arch/arm/mm/fault.h
3770+++ b/arch/arm/mm/fault.h
3771@@ -3,6 +3,7 @@
3772
3773 /*
3774 * Fault status register encodings. We steal bit 31 for our own purposes.
3775+ * Set when the FSR value is from an instruction fault.
3776 */
3777 #define FSR_LNX_PF (1 << 31)
3778 #define FSR_WRITE (1 << 11)
3779@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3780 }
3781 #endif
3782
3783+/* valid for LPAE and !LPAE */
3784+static inline int is_xn_fault(unsigned int fsr)
3785+{
3786+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3787+}
3788+
3789+static inline int is_domain_fault(unsigned int fsr)
3790+{
3791+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3792+}
3793+
3794 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3795 unsigned long search_exception_table(unsigned long addr);
3796
3797diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3798index 9a5cdc0..a1182cf 100644
3799--- a/arch/arm/mm/init.c
3800+++ b/arch/arm/mm/init.c
3801@@ -30,6 +30,8 @@
3802 #include <asm/setup.h>
3803 #include <asm/tlb.h>
3804 #include <asm/fixmap.h>
3805+#include <asm/system_info.h>
3806+#include <asm/cp15.h>
3807
3808 #include <asm/mach/arch.h>
3809 #include <asm/mach/map.h>
3810@@ -726,7 +728,46 @@ void free_initmem(void)
3811 {
3812 #ifdef CONFIG_HAVE_TCM
3813 extern char __tcm_start, __tcm_end;
3814+#endif
3815
3816+#ifdef CONFIG_PAX_KERNEXEC
3817+ unsigned long addr;
3818+ pgd_t *pgd;
3819+ pud_t *pud;
3820+ pmd_t *pmd;
3821+ int cpu_arch = cpu_architecture();
3822+ unsigned int cr = get_cr();
3823+
3824+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3825+ /* make pages tables, etc before .text NX */
3826+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3827+ pgd = pgd_offset_k(addr);
3828+ pud = pud_offset(pgd, addr);
3829+ pmd = pmd_offset(pud, addr);
3830+ __section_update(pmd, addr, PMD_SECT_XN);
3831+ }
3832+ /* make init NX */
3833+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3834+ pgd = pgd_offset_k(addr);
3835+ pud = pud_offset(pgd, addr);
3836+ pmd = pmd_offset(pud, addr);
3837+ __section_update(pmd, addr, PMD_SECT_XN);
3838+ }
3839+ /* make kernel code/rodata RX */
3840+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3841+ pgd = pgd_offset_k(addr);
3842+ pud = pud_offset(pgd, addr);
3843+ pmd = pmd_offset(pud, addr);
3844+#ifdef CONFIG_ARM_LPAE
3845+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3846+#else
3847+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3848+#endif
3849+ }
3850+ }
3851+#endif
3852+
3853+#ifdef CONFIG_HAVE_TCM
3854 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3855 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
3856 #endif
3857diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3858index 04d9006..c547d85 100644
3859--- a/arch/arm/mm/ioremap.c
3860+++ b/arch/arm/mm/ioremap.c
3861@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3862 unsigned int mtype;
3863
3864 if (cached)
3865- mtype = MT_MEMORY;
3866+ mtype = MT_MEMORY_RX;
3867 else
3868- mtype = MT_MEMORY_NONCACHED;
3869+ mtype = MT_MEMORY_NONCACHED_RX;
3870
3871 return __arm_ioremap_caller(phys_addr, size, mtype,
3872 __builtin_return_address(0));
3873diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3874index 10062ce..8695745 100644
3875--- a/arch/arm/mm/mmap.c
3876+++ b/arch/arm/mm/mmap.c
3877@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3878 struct vm_area_struct *vma;
3879 int do_align = 0;
3880 int aliasing = cache_is_vipt_aliasing();
3881+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3882 struct vm_unmapped_area_info info;
3883
3884 /*
3885@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3886 if (len > TASK_SIZE)
3887 return -ENOMEM;
3888
3889+#ifdef CONFIG_PAX_RANDMMAP
3890+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3891+#endif
3892+
3893 if (addr) {
3894 if (do_align)
3895 addr = COLOUR_ALIGN(addr, pgoff);
3896@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3897 addr = PAGE_ALIGN(addr);
3898
3899 vma = find_vma(mm, addr);
3900- if (TASK_SIZE - len >= addr &&
3901- (!vma || addr + len <= vma->vm_start))
3902+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3903 return addr;
3904 }
3905
3906@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3907 info.high_limit = TASK_SIZE;
3908 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3909 info.align_offset = pgoff << PAGE_SHIFT;
3910+ info.threadstack_offset = offset;
3911 return vm_unmapped_area(&info);
3912 }
3913
3914@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3915 unsigned long addr = addr0;
3916 int do_align = 0;
3917 int aliasing = cache_is_vipt_aliasing();
3918+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3919 struct vm_unmapped_area_info info;
3920
3921 /*
3922@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3923 return addr;
3924 }
3925
3926+#ifdef CONFIG_PAX_RANDMMAP
3927+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3928+#endif
3929+
3930 /* requesting a specific address */
3931 if (addr) {
3932 if (do_align)
3933@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3934 else
3935 addr = PAGE_ALIGN(addr);
3936 vma = find_vma(mm, addr);
3937- if (TASK_SIZE - len >= addr &&
3938- (!vma || addr + len <= vma->vm_start))
3939+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3940 return addr;
3941 }
3942
3943@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3944 info.high_limit = mm->mmap_base;
3945 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3946 info.align_offset = pgoff << PAGE_SHIFT;
3947+ info.threadstack_offset = offset;
3948 addr = vm_unmapped_area(&info);
3949
3950 /*
3951@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3952 {
3953 unsigned long random_factor = 0UL;
3954
3955+#ifdef CONFIG_PAX_RANDMMAP
3956+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3957+#endif
3958+
3959 /* 8 bits of randomness in 20 address space bits */
3960 if ((current->flags & PF_RANDOMIZE) &&
3961 !(current->personality & ADDR_NO_RANDOMIZE))
3962@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3963
3964 if (mmap_is_legacy()) {
3965 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3966+
3967+#ifdef CONFIG_PAX_RANDMMAP
3968+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3969+ mm->mmap_base += mm->delta_mmap;
3970+#endif
3971+
3972 mm->get_unmapped_area = arch_get_unmapped_area;
3973 mm->unmap_area = arch_unmap_area;
3974 } else {
3975 mm->mmap_base = mmap_base(random_factor);
3976+
3977+#ifdef CONFIG_PAX_RANDMMAP
3978+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3979+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3980+#endif
3981+
3982 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3983 mm->unmap_area = arch_unmap_area_topdown;
3984 }
3985diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3986index 4d409e6..f375351 100644
3987--- a/arch/arm/mm/mmu.c
3988+++ b/arch/arm/mm/mmu.c
3989@@ -36,6 +36,22 @@
3990 #include "mm.h"
3991 #include "tcm.h"
3992
3993+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3994+void modify_domain(unsigned int dom, unsigned int type)
3995+{
3996+ struct thread_info *thread = current_thread_info();
3997+ unsigned int domain = thread->cpu_domain;
3998+ /*
3999+ * DOMAIN_MANAGER might be defined to some other value,
4000+ * use the arch-defined constant
4001+ */
4002+ domain &= ~domain_val(dom, 3);
4003+ thread->cpu_domain = domain | domain_val(dom, type);
4004+ set_domain(thread->cpu_domain);
4005+}
4006+EXPORT_SYMBOL(modify_domain);
4007+#endif
4008+
4009 /*
4010 * empty_zero_page is a special page that is used for
4011 * zero-initialized data and COW.
4012@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4013
4014 #endif /* ifdef CONFIG_CPU_CP15 / else */
4015
4016-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4017+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4018 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4019
4020-static struct mem_type mem_types[] = {
4021+#ifdef CONFIG_PAX_KERNEXEC
4022+#define L_PTE_KERNEXEC L_PTE_RDONLY
4023+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4024+#else
4025+#define L_PTE_KERNEXEC L_PTE_DIRTY
4026+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4027+#endif
4028+
4029+static struct mem_type mem_types[] __read_only = {
4030 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4031 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4032 L_PTE_SHARED,
4033@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4034 [MT_UNCACHED] = {
4035 .prot_pte = PROT_PTE_DEVICE,
4036 .prot_l1 = PMD_TYPE_TABLE,
4037- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4038+ .prot_sect = PROT_SECT_DEVICE,
4039 .domain = DOMAIN_IO,
4040 },
4041 [MT_CACHECLEAN] = {
4042- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4043+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4044 .domain = DOMAIN_KERNEL,
4045 },
4046 #ifndef CONFIG_ARM_LPAE
4047 [MT_MINICLEAN] = {
4048- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4049+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4050 .domain = DOMAIN_KERNEL,
4051 },
4052 #endif
4053@@ -277,36 +301,65 @@ static struct mem_type mem_types[] = {
4054 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4055 L_PTE_RDONLY,
4056 .prot_l1 = PMD_TYPE_TABLE,
4057- .domain = DOMAIN_USER,
4058+ .domain = DOMAIN_VECTORS,
4059 },
4060 [MT_HIGH_VECTORS] = {
4061- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4062- L_PTE_USER | L_PTE_RDONLY,
4063+ /* we always want the vector page to be noaccess for userland on archs with
4064+ XN where we can enforce some reasonable measure of security
4065+ therefore, when kernexec is disabled, instead of L_PTE_USER | L_PTE_RDONLY
4066+ which turns into supervisor rwx, userland rx, we instead omit that entirely,
4067+ leaving it as supervisor rwx only
4068+ */
4069+#ifdef CONFIG_PAX_KERNEXEC
4070+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY,
4071+#elif __LINUX_ARM_ARCH__ >= 6
4072+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4073+#else
4074+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_RDONLY,
4075+#endif
4076 .prot_l1 = PMD_TYPE_TABLE,
4077- .domain = DOMAIN_USER,
4078+ .domain = DOMAIN_VECTORS,
4079 },
4080- [MT_MEMORY] = {
4081+ [MT_MEMORY_RWX] = {
4082 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4083 .prot_l1 = PMD_TYPE_TABLE,
4084 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4085 .domain = DOMAIN_KERNEL,
4086 },
4087+ [MT_MEMORY_RW] = {
4088+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4089+ .prot_l1 = PMD_TYPE_TABLE,
4090+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4091+ .domain = DOMAIN_KERNEL,
4092+ },
4093+ [MT_MEMORY_RX] = {
4094+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4095+ .prot_l1 = PMD_TYPE_TABLE,
4096+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4097+ .domain = DOMAIN_KERNEL,
4098+ },
4099 [MT_ROM] = {
4100- .prot_sect = PMD_TYPE_SECT,
4101+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4102 .domain = DOMAIN_KERNEL,
4103 },
4104- [MT_MEMORY_NONCACHED] = {
4105+ [MT_MEMORY_NONCACHED_RW] = {
4106 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4107 L_PTE_MT_BUFFERABLE,
4108 .prot_l1 = PMD_TYPE_TABLE,
4109 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4110 .domain = DOMAIN_KERNEL,
4111 },
4112+ [MT_MEMORY_NONCACHED_RX] = {
4113+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4114+ L_PTE_MT_BUFFERABLE,
4115+ .prot_l1 = PMD_TYPE_TABLE,
4116+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4117+ .domain = DOMAIN_KERNEL,
4118+ },
4119 [MT_MEMORY_DTCM] = {
4120- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4121- L_PTE_XN,
4122+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4123 .prot_l1 = PMD_TYPE_TABLE,
4124- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4125+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4126 .domain = DOMAIN_KERNEL,
4127 },
4128 [MT_MEMORY_ITCM] = {
4129@@ -316,10 +369,10 @@ static struct mem_type mem_types[] = {
4130 },
4131 [MT_MEMORY_SO] = {
4132 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4133- L_PTE_MT_UNCACHED | L_PTE_XN,
4134+ L_PTE_MT_UNCACHED,
4135 .prot_l1 = PMD_TYPE_TABLE,
4136 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4137- PMD_SECT_UNCACHED | PMD_SECT_XN,
4138+ PMD_SECT_UNCACHED,
4139 .domain = DOMAIN_KERNEL,
4140 },
4141 [MT_MEMORY_DMA_READY] = {
4142@@ -405,9 +458,35 @@ static void __init build_mem_type_table(void)
4143 * to prevent speculative instruction fetches.
4144 */
4145 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4146+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4147 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4148+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4149 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4150+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4151 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4152+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4153+
4154+ /* Mark other regions on ARMv6+ as execute-never */
4155+
4156+#ifdef CONFIG_PAX_KERNEXEC
4157+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4158+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4159+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4160+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4161+#ifndef CONFIG_ARM_LPAE
4162+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4163+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4164+#endif
4165+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4166+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4167+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4168+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4169+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4170+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4171+#endif
4172+
4173+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4174+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4175 }
4176 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4177 /*
4178@@ -468,6 +547,9 @@ static void __init build_mem_type_table(void)
4179 * from SVC mode and no access from userspace.
4180 */
4181 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4182+#ifdef CONFIG_PAX_KERNEXEC
4183+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4184+#endif
4185 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4186 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4187 #endif
4188@@ -485,11 +567,17 @@ static void __init build_mem_type_table(void)
4189 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4190 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4191 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4192- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4193- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4194+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4195+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4196+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4197+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4198+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4199+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4200 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4201- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4202- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4203+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4204+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4205+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4206+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4207 }
4208 }
4209
4210@@ -500,15 +588,20 @@ static void __init build_mem_type_table(void)
4211 if (cpu_arch >= CPU_ARCH_ARMv6) {
4212 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4213 /* Non-cacheable Normal is XCB = 001 */
4214- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4215+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4216+ PMD_SECT_BUFFERED;
4217+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4218 PMD_SECT_BUFFERED;
4219 } else {
4220 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4221- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4222+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4223+ PMD_SECT_TEX(1);
4224+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4225 PMD_SECT_TEX(1);
4226 }
4227 } else {
4228- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4229+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4230+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4231 }
4232
4233 #ifdef CONFIG_ARM_LPAE
4234@@ -524,6 +617,8 @@ static void __init build_mem_type_table(void)
4235 vecs_pgprot |= PTE_EXT_AF;
4236 #endif
4237
4238+ user_pgprot |= __supported_pte_mask;
4239+
4240 for (i = 0; i < 16; i++) {
4241 pteval_t v = pgprot_val(protection_map[i]);
4242 protection_map[i] = __pgprot(v | user_pgprot);
4243@@ -541,10 +636,15 @@ static void __init build_mem_type_table(void)
4244
4245 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4246 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4247- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4248- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4249+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4250+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4251+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4252+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4253+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4254+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4255 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4256- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4257+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4258+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4259 mem_types[MT_ROM].prot_sect |= cp->pmd;
4260
4261 switch (cp->pmd) {
4262@@ -1166,18 +1266,15 @@ void __init arm_mm_memblock_reserve(void)
4263 * called function. This means you can't use any function or debugging
4264 * method which may touch any device, otherwise the kernel _will_ crash.
4265 */
4266+
4267+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4268+
4269 static void __init devicemaps_init(struct machine_desc *mdesc)
4270 {
4271 struct map_desc map;
4272 unsigned long addr;
4273- void *vectors;
4274
4275- /*
4276- * Allocate the vector page early.
4277- */
4278- vectors = early_alloc(PAGE_SIZE);
4279-
4280- early_trap_init(vectors);
4281+ early_trap_init(&vectors);
4282
4283 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4284 pmd_clear(pmd_off_k(addr));
4285@@ -1217,7 +1314,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4286 * location (0xffff0000). If we aren't using high-vectors, also
4287 * create a mapping at the low-vectors virtual address.
4288 */
4289- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4290+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4291 map.virtual = 0xffff0000;
4292 map.length = PAGE_SIZE;
4293 map.type = MT_HIGH_VECTORS;
4294@@ -1275,8 +1372,39 @@ static void __init map_lowmem(void)
4295 map.pfn = __phys_to_pfn(start);
4296 map.virtual = __phys_to_virt(start);
4297 map.length = end - start;
4298- map.type = MT_MEMORY;
4299
4300+#ifdef CONFIG_PAX_KERNEXEC
4301+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4302+ struct map_desc kernel;
4303+ struct map_desc initmap;
4304+
4305+ /* when freeing initmem we will make this RW */
4306+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4307+ initmap.virtual = (unsigned long)__init_begin;
4308+ initmap.length = _sdata - __init_begin;
4309+ initmap.type = MT_MEMORY_RWX;
4310+ create_mapping(&initmap);
4311+
4312+ /* when freeing initmem we will make this RX */
4313+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4314+ kernel.virtual = (unsigned long)_stext;
4315+ kernel.length = __init_begin - _stext;
4316+ kernel.type = MT_MEMORY_RWX;
4317+ create_mapping(&kernel);
4318+
4319+ if (map.virtual < (unsigned long)_stext) {
4320+ map.length = (unsigned long)_stext - map.virtual;
4321+ map.type = MT_MEMORY_RWX;
4322+ create_mapping(&map);
4323+ }
4324+
4325+ map.pfn = __phys_to_pfn(__pa(_sdata));
4326+ map.virtual = (unsigned long)_sdata;
4327+ map.length = end - __pa(_sdata);
4328+ }
4329+#endif
4330+
4331+ map.type = MT_MEMORY_RW;
4332 create_mapping(&map);
4333 }
4334 }
4335diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4336index 9704097..3e36dde 100644
4337--- a/arch/arm/mm/proc-v7-2level.S
4338+++ b/arch/arm/mm/proc-v7-2level.S
4339@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4340 tst r1, #L_PTE_XN
4341 orrne r3, r3, #PTE_EXT_XN
4342
4343+ tst r1, #L_PTE_PXN
4344+ orrne r3, r3, #PTE_EXT_PXN
4345+
4346 tst r1, #L_PTE_YOUNG
4347 tstne r1, #L_PTE_VALID
4348 #ifndef CONFIG_CPU_USE_DOMAINS
4349diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4350index a5bc92d..0bb4730 100644
4351--- a/arch/arm/plat-omap/sram.c
4352+++ b/arch/arm/plat-omap/sram.c
4353@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4354 * Looks like we need to preserve some bootloader code at the
4355 * beginning of SRAM for jumping to flash for reboot to work...
4356 */
4357+ pax_open_kernel();
4358 memset_io(omap_sram_base + omap_sram_skip, 0,
4359 omap_sram_size - omap_sram_skip);
4360+ pax_close_kernel();
4361 }
4362diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4363index ce6d763..cfea917 100644
4364--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4365+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4366@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4367 int (*started)(unsigned ch);
4368 int (*flush)(unsigned ch);
4369 int (*stop)(unsigned ch);
4370-};
4371+} __no_const;
4372
4373 extern void *samsung_dmadev_get_ops(void);
4374 extern void *s3c_dma_get_ops(void);
4375diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4376index f4726dc..39ed646 100644
4377--- a/arch/arm64/kernel/debug-monitors.c
4378+++ b/arch/arm64/kernel/debug-monitors.c
4379@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4380 return NOTIFY_OK;
4381 }
4382
4383-static struct notifier_block __cpuinitdata os_lock_nb = {
4384+static struct notifier_block os_lock_nb = {
4385 .notifier_call = os_lock_notify,
4386 };
4387
4388diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4389index 5ab825c..96aaec8 100644
4390--- a/arch/arm64/kernel/hw_breakpoint.c
4391+++ b/arch/arm64/kernel/hw_breakpoint.c
4392@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4393 return NOTIFY_OK;
4394 }
4395
4396-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4397+static struct notifier_block hw_breakpoint_reset_nb = {
4398 .notifier_call = hw_breakpoint_reset_notify,
4399 };
4400
4401diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4402index c3a58a1..78fbf54 100644
4403--- a/arch/avr32/include/asm/cache.h
4404+++ b/arch/avr32/include/asm/cache.h
4405@@ -1,8 +1,10 @@
4406 #ifndef __ASM_AVR32_CACHE_H
4407 #define __ASM_AVR32_CACHE_H
4408
4409+#include <linux/const.h>
4410+
4411 #define L1_CACHE_SHIFT 5
4412-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4413+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4414
4415 /*
4416 * Memory returned by kmalloc() may be used for DMA, so we must make
4417diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4418index d232888..87c8df1 100644
4419--- a/arch/avr32/include/asm/elf.h
4420+++ b/arch/avr32/include/asm/elf.h
4421@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4422 the loader. We need to make sure that it is out of the way of the program
4423 that it will "exec", and that there is sufficient room for the brk. */
4424
4425-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4426+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4427
4428+#ifdef CONFIG_PAX_ASLR
4429+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4430+
4431+#define PAX_DELTA_MMAP_LEN 15
4432+#define PAX_DELTA_STACK_LEN 15
4433+#endif
4434
4435 /* This yields a mask that user programs can use to figure out what
4436 instruction set this CPU supports. This could be done in user space,
4437diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4438index 479330b..53717a8 100644
4439--- a/arch/avr32/include/asm/kmap_types.h
4440+++ b/arch/avr32/include/asm/kmap_types.h
4441@@ -2,9 +2,9 @@
4442 #define __ASM_AVR32_KMAP_TYPES_H
4443
4444 #ifdef CONFIG_DEBUG_HIGHMEM
4445-# define KM_TYPE_NR 29
4446+# define KM_TYPE_NR 30
4447 #else
4448-# define KM_TYPE_NR 14
4449+# define KM_TYPE_NR 15
4450 #endif
4451
4452 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4453diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4454index b2f2d2d..d1c85cb 100644
4455--- a/arch/avr32/mm/fault.c
4456+++ b/arch/avr32/mm/fault.c
4457@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4458
4459 int exception_trace = 1;
4460
4461+#ifdef CONFIG_PAX_PAGEEXEC
4462+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4463+{
4464+ unsigned long i;
4465+
4466+ printk(KERN_ERR "PAX: bytes at PC: ");
4467+ for (i = 0; i < 20; i++) {
4468+ unsigned char c;
4469+ if (get_user(c, (unsigned char *)pc+i))
4470+ printk(KERN_CONT "???????? ");
4471+ else
4472+ printk(KERN_CONT "%02x ", c);
4473+ }
4474+ printk("\n");
4475+}
4476+#endif
4477+
4478 /*
4479 * This routine handles page faults. It determines the address and the
4480 * problem, and then passes it off to one of the appropriate routines.
4481@@ -174,6 +191,16 @@ bad_area:
4482 up_read(&mm->mmap_sem);
4483
4484 if (user_mode(regs)) {
4485+
4486+#ifdef CONFIG_PAX_PAGEEXEC
4487+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4488+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4489+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4490+ do_group_exit(SIGKILL);
4491+ }
4492+ }
4493+#endif
4494+
4495 if (exception_trace && printk_ratelimit())
4496 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4497 "sp %08lx ecr %lu\n",
4498diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4499index 568885a..f8008df 100644
4500--- a/arch/blackfin/include/asm/cache.h
4501+++ b/arch/blackfin/include/asm/cache.h
4502@@ -7,6 +7,7 @@
4503 #ifndef __ARCH_BLACKFIN_CACHE_H
4504 #define __ARCH_BLACKFIN_CACHE_H
4505
4506+#include <linux/const.h>
4507 #include <linux/linkage.h> /* for asmlinkage */
4508
4509 /*
4510@@ -14,7 +15,7 @@
4511 * Blackfin loads 32 bytes for cache
4512 */
4513 #define L1_CACHE_SHIFT 5
4514-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4515+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4516 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4517
4518 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4519diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4520index aea2718..3639a60 100644
4521--- a/arch/cris/include/arch-v10/arch/cache.h
4522+++ b/arch/cris/include/arch-v10/arch/cache.h
4523@@ -1,8 +1,9 @@
4524 #ifndef _ASM_ARCH_CACHE_H
4525 #define _ASM_ARCH_CACHE_H
4526
4527+#include <linux/const.h>
4528 /* Etrax 100LX have 32-byte cache-lines. */
4529-#define L1_CACHE_BYTES 32
4530 #define L1_CACHE_SHIFT 5
4531+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4532
4533 #endif /* _ASM_ARCH_CACHE_H */
4534diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4535index 7caf25d..ee65ac5 100644
4536--- a/arch/cris/include/arch-v32/arch/cache.h
4537+++ b/arch/cris/include/arch-v32/arch/cache.h
4538@@ -1,11 +1,12 @@
4539 #ifndef _ASM_CRIS_ARCH_CACHE_H
4540 #define _ASM_CRIS_ARCH_CACHE_H
4541
4542+#include <linux/const.h>
4543 #include <arch/hwregs/dma.h>
4544
4545 /* A cache-line is 32 bytes. */
4546-#define L1_CACHE_BYTES 32
4547 #define L1_CACHE_SHIFT 5
4548+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4549
4550 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4551
4552diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4553index b86329d..6709906 100644
4554--- a/arch/frv/include/asm/atomic.h
4555+++ b/arch/frv/include/asm/atomic.h
4556@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4557 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4558 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4559
4560+#define atomic64_read_unchecked(v) atomic64_read(v)
4561+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4562+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4563+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4564+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4565+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4566+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4567+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4568+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4569+
4570 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4571 {
4572 int c, old;
4573diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4574index 2797163..c2a401d 100644
4575--- a/arch/frv/include/asm/cache.h
4576+++ b/arch/frv/include/asm/cache.h
4577@@ -12,10 +12,11 @@
4578 #ifndef __ASM_CACHE_H
4579 #define __ASM_CACHE_H
4580
4581+#include <linux/const.h>
4582
4583 /* bytes per L1 cache line */
4584 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4585-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4586+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4587
4588 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4589 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4590diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4591index 43901f2..0d8b865 100644
4592--- a/arch/frv/include/asm/kmap_types.h
4593+++ b/arch/frv/include/asm/kmap_types.h
4594@@ -2,6 +2,6 @@
4595 #ifndef _ASM_KMAP_TYPES_H
4596 #define _ASM_KMAP_TYPES_H
4597
4598-#define KM_TYPE_NR 17
4599+#define KM_TYPE_NR 18
4600
4601 #endif
4602diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4603index 836f147..4cf23f5 100644
4604--- a/arch/frv/mm/elf-fdpic.c
4605+++ b/arch/frv/mm/elf-fdpic.c
4606@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4607 {
4608 struct vm_area_struct *vma;
4609 struct vm_unmapped_area_info info;
4610+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4611
4612 if (len > TASK_SIZE)
4613 return -ENOMEM;
4614@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4615 if (addr) {
4616 addr = PAGE_ALIGN(addr);
4617 vma = find_vma(current->mm, addr);
4618- if (TASK_SIZE - len >= addr &&
4619- (!vma || addr + len <= vma->vm_start))
4620+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4621 goto success;
4622 }
4623
4624@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4625 info.high_limit = (current->mm->start_stack - 0x00200000);
4626 info.align_mask = 0;
4627 info.align_offset = 0;
4628+ info.threadstack_offset = offset;
4629 addr = vm_unmapped_area(&info);
4630 if (!(addr & ~PAGE_MASK))
4631 goto success;
4632diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4633index f4ca594..adc72fd6 100644
4634--- a/arch/hexagon/include/asm/cache.h
4635+++ b/arch/hexagon/include/asm/cache.h
4636@@ -21,9 +21,11 @@
4637 #ifndef __ASM_CACHE_H
4638 #define __ASM_CACHE_H
4639
4640+#include <linux/const.h>
4641+
4642 /* Bytes per L1 cache line */
4643-#define L1_CACHE_SHIFT (5)
4644-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4645+#define L1_CACHE_SHIFT 5
4646+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4647
4648 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4649 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4650diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4651index 6e6fe18..a6ae668 100644
4652--- a/arch/ia64/include/asm/atomic.h
4653+++ b/arch/ia64/include/asm/atomic.h
4654@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4655 #define atomic64_inc(v) atomic64_add(1, (v))
4656 #define atomic64_dec(v) atomic64_sub(1, (v))
4657
4658+#define atomic64_read_unchecked(v) atomic64_read(v)
4659+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4660+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4661+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4662+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4663+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4664+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4665+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4666+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4667+
4668 /* Atomic operations are already serializing */
4669 #define smp_mb__before_atomic_dec() barrier()
4670 #define smp_mb__after_atomic_dec() barrier()
4671diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4672index 988254a..e1ee885 100644
4673--- a/arch/ia64/include/asm/cache.h
4674+++ b/arch/ia64/include/asm/cache.h
4675@@ -1,6 +1,7 @@
4676 #ifndef _ASM_IA64_CACHE_H
4677 #define _ASM_IA64_CACHE_H
4678
4679+#include <linux/const.h>
4680
4681 /*
4682 * Copyright (C) 1998-2000 Hewlett-Packard Co
4683@@ -9,7 +10,7 @@
4684
4685 /* Bytes per L1 (data) cache line. */
4686 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4687-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4688+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4689
4690 #ifdef CONFIG_SMP
4691 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4692diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4693index 5a83c5c..4d7f553 100644
4694--- a/arch/ia64/include/asm/elf.h
4695+++ b/arch/ia64/include/asm/elf.h
4696@@ -42,6 +42,13 @@
4697 */
4698 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4699
4700+#ifdef CONFIG_PAX_ASLR
4701+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4702+
4703+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4704+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4705+#endif
4706+
4707 #define PT_IA_64_UNWIND 0x70000001
4708
4709 /* IA-64 relocations: */
4710diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4711index 96a8d92..617a1cf 100644
4712--- a/arch/ia64/include/asm/pgalloc.h
4713+++ b/arch/ia64/include/asm/pgalloc.h
4714@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4715 pgd_val(*pgd_entry) = __pa(pud);
4716 }
4717
4718+static inline void
4719+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4720+{
4721+ pgd_populate(mm, pgd_entry, pud);
4722+}
4723+
4724 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4725 {
4726 return quicklist_alloc(0, GFP_KERNEL, NULL);
4727@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4728 pud_val(*pud_entry) = __pa(pmd);
4729 }
4730
4731+static inline void
4732+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4733+{
4734+ pud_populate(mm, pud_entry, pmd);
4735+}
4736+
4737 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4738 {
4739 return quicklist_alloc(0, GFP_KERNEL, NULL);
4740diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4741index 815810c..d60bd4c 100644
4742--- a/arch/ia64/include/asm/pgtable.h
4743+++ b/arch/ia64/include/asm/pgtable.h
4744@@ -12,7 +12,7 @@
4745 * David Mosberger-Tang <davidm@hpl.hp.com>
4746 */
4747
4748-
4749+#include <linux/const.h>
4750 #include <asm/mman.h>
4751 #include <asm/page.h>
4752 #include <asm/processor.h>
4753@@ -142,6 +142,17 @@
4754 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4755 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4756 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4757+
4758+#ifdef CONFIG_PAX_PAGEEXEC
4759+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4760+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4761+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4762+#else
4763+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4764+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4765+# define PAGE_COPY_NOEXEC PAGE_COPY
4766+#endif
4767+
4768 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4769 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4770 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4771diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4772index 54ff557..70c88b7 100644
4773--- a/arch/ia64/include/asm/spinlock.h
4774+++ b/arch/ia64/include/asm/spinlock.h
4775@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4776 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4777
4778 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4779- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4780+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4781 }
4782
4783 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4784diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4785index 449c8c0..18965fb 100644
4786--- a/arch/ia64/include/asm/uaccess.h
4787+++ b/arch/ia64/include/asm/uaccess.h
4788@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4789 static inline unsigned long
4790 __copy_to_user (void __user *to, const void *from, unsigned long count)
4791 {
4792+ if (count > INT_MAX)
4793+ return count;
4794+
4795+ if (!__builtin_constant_p(count))
4796+ check_object_size(from, count, true);
4797+
4798 return __copy_user(to, (__force void __user *) from, count);
4799 }
4800
4801 static inline unsigned long
4802 __copy_from_user (void *to, const void __user *from, unsigned long count)
4803 {
4804+ if (count > INT_MAX)
4805+ return count;
4806+
4807+ if (!__builtin_constant_p(count))
4808+ check_object_size(to, count, false);
4809+
4810 return __copy_user((__force void __user *) to, from, count);
4811 }
4812
4813@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4814 ({ \
4815 void __user *__cu_to = (to); \
4816 const void *__cu_from = (from); \
4817- long __cu_len = (n); \
4818+ unsigned long __cu_len = (n); \
4819 \
4820- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4821+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4822+ if (!__builtin_constant_p(n)) \
4823+ check_object_size(__cu_from, __cu_len, true); \
4824 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4825+ } \
4826 __cu_len; \
4827 })
4828
4829@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4830 ({ \
4831 void *__cu_to = (to); \
4832 const void __user *__cu_from = (from); \
4833- long __cu_len = (n); \
4834+ unsigned long __cu_len = (n); \
4835 \
4836 __chk_user_ptr(__cu_from); \
4837- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4838+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4839+ if (!__builtin_constant_p(n)) \
4840+ check_object_size(__cu_to, __cu_len, false); \
4841 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4842+ } \
4843 __cu_len; \
4844 })
4845
4846diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4847index 2d67317..07d8bfa 100644
4848--- a/arch/ia64/kernel/err_inject.c
4849+++ b/arch/ia64/kernel/err_inject.c
4850@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4851 return NOTIFY_OK;
4852 }
4853
4854-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4855+static struct notifier_block err_inject_cpu_notifier =
4856 {
4857 .notifier_call = err_inject_cpu_callback,
4858 };
4859diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4860index d7396db..b33e873 100644
4861--- a/arch/ia64/kernel/mca.c
4862+++ b/arch/ia64/kernel/mca.c
4863@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4864 return NOTIFY_OK;
4865 }
4866
4867-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4868+static struct notifier_block mca_cpu_notifier = {
4869 .notifier_call = mca_cpu_callback
4870 };
4871
4872diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4873index 24603be..948052d 100644
4874--- a/arch/ia64/kernel/module.c
4875+++ b/arch/ia64/kernel/module.c
4876@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4877 void
4878 module_free (struct module *mod, void *module_region)
4879 {
4880- if (mod && mod->arch.init_unw_table &&
4881- module_region == mod->module_init) {
4882+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4883 unw_remove_unwind_table(mod->arch.init_unw_table);
4884 mod->arch.init_unw_table = NULL;
4885 }
4886@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4887 }
4888
4889 static inline int
4890+in_init_rx (const struct module *mod, uint64_t addr)
4891+{
4892+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4893+}
4894+
4895+static inline int
4896+in_init_rw (const struct module *mod, uint64_t addr)
4897+{
4898+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4899+}
4900+
4901+static inline int
4902 in_init (const struct module *mod, uint64_t addr)
4903 {
4904- return addr - (uint64_t) mod->module_init < mod->init_size;
4905+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4906+}
4907+
4908+static inline int
4909+in_core_rx (const struct module *mod, uint64_t addr)
4910+{
4911+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4912+}
4913+
4914+static inline int
4915+in_core_rw (const struct module *mod, uint64_t addr)
4916+{
4917+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4918 }
4919
4920 static inline int
4921 in_core (const struct module *mod, uint64_t addr)
4922 {
4923- return addr - (uint64_t) mod->module_core < mod->core_size;
4924+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4925 }
4926
4927 static inline int
4928@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4929 break;
4930
4931 case RV_BDREL:
4932- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4933+ if (in_init_rx(mod, val))
4934+ val -= (uint64_t) mod->module_init_rx;
4935+ else if (in_init_rw(mod, val))
4936+ val -= (uint64_t) mod->module_init_rw;
4937+ else if (in_core_rx(mod, val))
4938+ val -= (uint64_t) mod->module_core_rx;
4939+ else if (in_core_rw(mod, val))
4940+ val -= (uint64_t) mod->module_core_rw;
4941 break;
4942
4943 case RV_LTV:
4944@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4945 * addresses have been selected...
4946 */
4947 uint64_t gp;
4948- if (mod->core_size > MAX_LTOFF)
4949+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4950 /*
4951 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4952 * at the end of the module.
4953 */
4954- gp = mod->core_size - MAX_LTOFF / 2;
4955+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4956 else
4957- gp = mod->core_size / 2;
4958- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4959+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4960+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4961 mod->arch.gp = gp;
4962 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4963 }
4964diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4965index 2b3c2d7..a318d84 100644
4966--- a/arch/ia64/kernel/palinfo.c
4967+++ b/arch/ia64/kernel/palinfo.c
4968@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4969 return NOTIFY_OK;
4970 }
4971
4972-static struct notifier_block __refdata palinfo_cpu_notifier =
4973+static struct notifier_block palinfo_cpu_notifier =
4974 {
4975 .notifier_call = palinfo_cpu_callback,
4976 .priority = 0,
4977diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4978index 4bc580a..7767f24 100644
4979--- a/arch/ia64/kernel/salinfo.c
4980+++ b/arch/ia64/kernel/salinfo.c
4981@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4982 return NOTIFY_OK;
4983 }
4984
4985-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4986+static struct notifier_block salinfo_cpu_notifier =
4987 {
4988 .notifier_call = salinfo_cpu_callback,
4989 .priority = 0,
4990diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4991index 41e33f8..65180b2 100644
4992--- a/arch/ia64/kernel/sys_ia64.c
4993+++ b/arch/ia64/kernel/sys_ia64.c
4994@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4995 unsigned long align_mask = 0;
4996 struct mm_struct *mm = current->mm;
4997 struct vm_unmapped_area_info info;
4998+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
4999
5000 if (len > RGN_MAP_LIMIT)
5001 return -ENOMEM;
5002@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5003 if (REGION_NUMBER(addr) == RGN_HPAGE)
5004 addr = 0;
5005 #endif
5006+
5007+#ifdef CONFIG_PAX_RANDMMAP
5008+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5009+ addr = mm->free_area_cache;
5010+ else
5011+#endif
5012+
5013 if (!addr)
5014 addr = TASK_UNMAPPED_BASE;
5015
5016@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5017 info.high_limit = TASK_SIZE;
5018 info.align_mask = align_mask;
5019 info.align_offset = 0;
5020+ info.threadstack_offset = offset;
5021 return vm_unmapped_area(&info);
5022 }
5023
5024diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5025index dc00b2c..cce53c2 100644
5026--- a/arch/ia64/kernel/topology.c
5027+++ b/arch/ia64/kernel/topology.c
5028@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5029 return NOTIFY_OK;
5030 }
5031
5032-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5033+static struct notifier_block cache_cpu_notifier =
5034 {
5035 .notifier_call = cache_cpu_callback
5036 };
5037diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5038index 0ccb28f..8992469 100644
5039--- a/arch/ia64/kernel/vmlinux.lds.S
5040+++ b/arch/ia64/kernel/vmlinux.lds.S
5041@@ -198,7 +198,7 @@ SECTIONS {
5042 /* Per-cpu data: */
5043 . = ALIGN(PERCPU_PAGE_SIZE);
5044 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5045- __phys_per_cpu_start = __per_cpu_load;
5046+ __phys_per_cpu_start = per_cpu_load;
5047 /*
5048 * ensure percpu data fits
5049 * into percpu page size
5050diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5051index 6cf0341..d352594 100644
5052--- a/arch/ia64/mm/fault.c
5053+++ b/arch/ia64/mm/fault.c
5054@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5055 return pte_present(pte);
5056 }
5057
5058+#ifdef CONFIG_PAX_PAGEEXEC
5059+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5060+{
5061+ unsigned long i;
5062+
5063+ printk(KERN_ERR "PAX: bytes at PC: ");
5064+ for (i = 0; i < 8; i++) {
5065+ unsigned int c;
5066+ if (get_user(c, (unsigned int *)pc+i))
5067+ printk(KERN_CONT "???????? ");
5068+ else
5069+ printk(KERN_CONT "%08x ", c);
5070+ }
5071+ printk("\n");
5072+}
5073+#endif
5074+
5075 # define VM_READ_BIT 0
5076 # define VM_WRITE_BIT 1
5077 # define VM_EXEC_BIT 2
5078@@ -149,8 +166,21 @@ retry:
5079 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5080 goto bad_area;
5081
5082- if ((vma->vm_flags & mask) != mask)
5083+ if ((vma->vm_flags & mask) != mask) {
5084+
5085+#ifdef CONFIG_PAX_PAGEEXEC
5086+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5087+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5088+ goto bad_area;
5089+
5090+ up_read(&mm->mmap_sem);
5091+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5092+ do_group_exit(SIGKILL);
5093+ }
5094+#endif
5095+
5096 goto bad_area;
5097+ }
5098
5099 /*
5100 * If for any reason at all we couldn't handle the fault, make
5101diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5102index 76069c1..c2aa816 100644
5103--- a/arch/ia64/mm/hugetlbpage.c
5104+++ b/arch/ia64/mm/hugetlbpage.c
5105@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5106 unsigned long pgoff, unsigned long flags)
5107 {
5108 struct vm_unmapped_area_info info;
5109+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5110
5111 if (len > RGN_MAP_LIMIT)
5112 return -ENOMEM;
5113@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5114 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5115 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5116 info.align_offset = 0;
5117+ info.threadstack_offset = offset;
5118 return vm_unmapped_area(&info);
5119 }
5120
5121diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5122index d1fe4b4..2628f37 100644
5123--- a/arch/ia64/mm/init.c
5124+++ b/arch/ia64/mm/init.c
5125@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5126 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5127 vma->vm_end = vma->vm_start + PAGE_SIZE;
5128 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5129+
5130+#ifdef CONFIG_PAX_PAGEEXEC
5131+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5132+ vma->vm_flags &= ~VM_EXEC;
5133+
5134+#ifdef CONFIG_PAX_MPROTECT
5135+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5136+ vma->vm_flags &= ~VM_MAYEXEC;
5137+#endif
5138+
5139+ }
5140+#endif
5141+
5142 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5143 down_write(&current->mm->mmap_sem);
5144 if (insert_vm_struct(current->mm, vma)) {
5145diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5146index 40b3ee9..8c2c112 100644
5147--- a/arch/m32r/include/asm/cache.h
5148+++ b/arch/m32r/include/asm/cache.h
5149@@ -1,8 +1,10 @@
5150 #ifndef _ASM_M32R_CACHE_H
5151 #define _ASM_M32R_CACHE_H
5152
5153+#include <linux/const.h>
5154+
5155 /* L1 cache line size */
5156 #define L1_CACHE_SHIFT 4
5157-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5158+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5159
5160 #endif /* _ASM_M32R_CACHE_H */
5161diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5162index 82abd15..d95ae5d 100644
5163--- a/arch/m32r/lib/usercopy.c
5164+++ b/arch/m32r/lib/usercopy.c
5165@@ -14,6 +14,9 @@
5166 unsigned long
5167 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5168 {
5169+ if ((long)n < 0)
5170+ return n;
5171+
5172 prefetch(from);
5173 if (access_ok(VERIFY_WRITE, to, n))
5174 __copy_user(to,from,n);
5175@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5176 unsigned long
5177 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5178 {
5179+ if ((long)n < 0)
5180+ return n;
5181+
5182 prefetchw(to);
5183 if (access_ok(VERIFY_READ, from, n))
5184 __copy_user_zeroing(to,from,n);
5185diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5186index 0395c51..5f26031 100644
5187--- a/arch/m68k/include/asm/cache.h
5188+++ b/arch/m68k/include/asm/cache.h
5189@@ -4,9 +4,11 @@
5190 #ifndef __ARCH_M68K_CACHE_H
5191 #define __ARCH_M68K_CACHE_H
5192
5193+#include <linux/const.h>
5194+
5195 /* bytes per L1 cache line */
5196 #define L1_CACHE_SHIFT 4
5197-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5198+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5199
5200 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5201
5202diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5203index 3c52fa6..11b2ad8 100644
5204--- a/arch/metag/mm/hugetlbpage.c
5205+++ b/arch/metag/mm/hugetlbpage.c
5206@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5207 info.high_limit = TASK_SIZE;
5208 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5209 info.align_offset = 0;
5210+ info.threadstack_offset = 0;
5211 return vm_unmapped_area(&info);
5212 }
5213
5214diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5215index 4efe96a..60e8699 100644
5216--- a/arch/microblaze/include/asm/cache.h
5217+++ b/arch/microblaze/include/asm/cache.h
5218@@ -13,11 +13,12 @@
5219 #ifndef _ASM_MICROBLAZE_CACHE_H
5220 #define _ASM_MICROBLAZE_CACHE_H
5221
5222+#include <linux/const.h>
5223 #include <asm/registers.h>
5224
5225 #define L1_CACHE_SHIFT 5
5226 /* word-granular cache in microblaze */
5227-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5228+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5229
5230 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5231
5232diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5233index 08b6079..eb272cf 100644
5234--- a/arch/mips/include/asm/atomic.h
5235+++ b/arch/mips/include/asm/atomic.h
5236@@ -21,6 +21,10 @@
5237 #include <asm/cmpxchg.h>
5238 #include <asm/war.h>
5239
5240+#ifdef CONFIG_GENERIC_ATOMIC64
5241+#include <asm-generic/atomic64.h>
5242+#endif
5243+
5244 #define ATOMIC_INIT(i) { (i) }
5245
5246 /*
5247@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5248 */
5249 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5250
5251+#define atomic64_read_unchecked(v) atomic64_read(v)
5252+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5253+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5254+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5255+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5256+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5257+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5258+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5259+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5260+
5261 #endif /* CONFIG_64BIT */
5262
5263 /*
5264diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5265index b4db69f..8f3b093 100644
5266--- a/arch/mips/include/asm/cache.h
5267+++ b/arch/mips/include/asm/cache.h
5268@@ -9,10 +9,11 @@
5269 #ifndef _ASM_CACHE_H
5270 #define _ASM_CACHE_H
5271
5272+#include <linux/const.h>
5273 #include <kmalloc.h>
5274
5275 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5276-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5277+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5278
5279 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5280 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5281diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5282index cf3ae24..238d22f 100644
5283--- a/arch/mips/include/asm/elf.h
5284+++ b/arch/mips/include/asm/elf.h
5285@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5286 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5287 #endif
5288
5289+#ifdef CONFIG_PAX_ASLR
5290+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5291+
5292+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5293+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5294+#endif
5295+
5296 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5297 struct linux_binprm;
5298 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5299 int uses_interp);
5300
5301-struct mm_struct;
5302-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5303-#define arch_randomize_brk arch_randomize_brk
5304-
5305 #endif /* _ASM_ELF_H */
5306diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5307index c1f6afa..38cc6e9 100644
5308--- a/arch/mips/include/asm/exec.h
5309+++ b/arch/mips/include/asm/exec.h
5310@@ -12,6 +12,6 @@
5311 #ifndef _ASM_EXEC_H
5312 #define _ASM_EXEC_H
5313
5314-extern unsigned long arch_align_stack(unsigned long sp);
5315+#define arch_align_stack(x) ((x) & ~0xfUL)
5316
5317 #endif /* _ASM_EXEC_H */
5318diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5319index f59552f..3abe9b9 100644
5320--- a/arch/mips/include/asm/page.h
5321+++ b/arch/mips/include/asm/page.h
5322@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5323 #ifdef CONFIG_CPU_MIPS32
5324 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5325 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5326- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5327+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5328 #else
5329 typedef struct { unsigned long long pte; } pte_t;
5330 #define pte_val(x) ((x).pte)
5331diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5332index 881d18b..cea38bc 100644
5333--- a/arch/mips/include/asm/pgalloc.h
5334+++ b/arch/mips/include/asm/pgalloc.h
5335@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5336 {
5337 set_pud(pud, __pud((unsigned long)pmd));
5338 }
5339+
5340+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5341+{
5342+ pud_populate(mm, pud, pmd);
5343+}
5344 #endif
5345
5346 /*
5347diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5348index 895320e..bf63e10 100644
5349--- a/arch/mips/include/asm/thread_info.h
5350+++ b/arch/mips/include/asm/thread_info.h
5351@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
5352 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5353 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5354 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5355+/* li takes a 32bit immediate */
5356+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5357 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5358
5359 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5360@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
5361 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5362 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5363 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5364+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5365+
5366+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5367
5368 /* work to do in syscall_trace_leave() */
5369-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5370+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5371
5372 /* work to do on interrupt/exception return */
5373 #define _TIF_WORK_MASK \
5374 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5375 /* work to do on any return to u-space */
5376-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5377+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5378
5379 #endif /* __KERNEL__ */
5380
5381diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5382index 1188e00..41cf144 100644
5383--- a/arch/mips/kernel/binfmt_elfn32.c
5384+++ b/arch/mips/kernel/binfmt_elfn32.c
5385@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5386 #undef ELF_ET_DYN_BASE
5387 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5388
5389+#ifdef CONFIG_PAX_ASLR
5390+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5391+
5392+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5393+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5394+#endif
5395+
5396 #include <asm/processor.h>
5397 #include <linux/module.h>
5398 #include <linux/elfcore.h>
5399diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5400index 202e581..689ca79 100644
5401--- a/arch/mips/kernel/binfmt_elfo32.c
5402+++ b/arch/mips/kernel/binfmt_elfo32.c
5403@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5404 #undef ELF_ET_DYN_BASE
5405 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5406
5407+#ifdef CONFIG_PAX_ASLR
5408+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5409+
5410+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5411+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5412+#endif
5413+
5414 #include <asm/processor.h>
5415
5416 /*
5417diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5418index c6a041d..b3e7318 100644
5419--- a/arch/mips/kernel/process.c
5420+++ b/arch/mips/kernel/process.c
5421@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
5422 out:
5423 return pc;
5424 }
5425-
5426-/*
5427- * Don't forget that the stack pointer must be aligned on a 8 bytes
5428- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5429- */
5430-unsigned long arch_align_stack(unsigned long sp)
5431-{
5432- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5433- sp -= get_random_int() & ~PAGE_MASK;
5434-
5435- return sp & ALMASK;
5436-}
5437diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5438index 9c6299c..2fb4c22 100644
5439--- a/arch/mips/kernel/ptrace.c
5440+++ b/arch/mips/kernel/ptrace.c
5441@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5442 return arch;
5443 }
5444
5445+#ifdef CONFIG_GRKERNSEC_SETXID
5446+extern void gr_delayed_cred_worker(void);
5447+#endif
5448+
5449 /*
5450 * Notification of system call entry/exit
5451 * - triggered by current->work.syscall_trace
5452@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5453 /* do the secure computing check first */
5454 secure_computing_strict(regs->regs[2]);
5455
5456+#ifdef CONFIG_GRKERNSEC_SETXID
5457+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5458+ gr_delayed_cred_worker();
5459+#endif
5460+
5461 if (!(current->ptrace & PT_PTRACED))
5462 goto out;
5463
5464diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5465index 9b36424..e7f4154 100644
5466--- a/arch/mips/kernel/scall32-o32.S
5467+++ b/arch/mips/kernel/scall32-o32.S
5468@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5469
5470 stack_done:
5471 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5472- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5473+ li t1, _TIF_SYSCALL_WORK
5474 and t0, t1
5475 bnez t0, syscall_trace_entry # -> yes
5476
5477diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5478index 97a5909..59622f8 100644
5479--- a/arch/mips/kernel/scall64-64.S
5480+++ b/arch/mips/kernel/scall64-64.S
5481@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5482
5483 sd a3, PT_R26(sp) # save a3 for syscall restarting
5484
5485- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5486+ li t1, _TIF_SYSCALL_WORK
5487 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5488 and t0, t1, t0
5489 bnez t0, syscall_trace_entry
5490diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5491index edcb659..fb2ab09 100644
5492--- a/arch/mips/kernel/scall64-n32.S
5493+++ b/arch/mips/kernel/scall64-n32.S
5494@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5495
5496 sd a3, PT_R26(sp) # save a3 for syscall restarting
5497
5498- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5499+ li t1, _TIF_SYSCALL_WORK
5500 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5501 and t0, t1, t0
5502 bnez t0, n32_syscall_trace_entry
5503diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5504index 74f485d..47d2c38 100644
5505--- a/arch/mips/kernel/scall64-o32.S
5506+++ b/arch/mips/kernel/scall64-o32.S
5507@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5508 PTR 4b, bad_stack
5509 .previous
5510
5511- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5512+ li t1, _TIF_SYSCALL_WORK
5513 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5514 and t0, t1, t0
5515 bnez t0, trace_a_syscall
5516diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5517index 0fead53..a2c0fb5 100644
5518--- a/arch/mips/mm/fault.c
5519+++ b/arch/mips/mm/fault.c
5520@@ -27,6 +27,23 @@
5521 #include <asm/highmem.h> /* For VMALLOC_END */
5522 #include <linux/kdebug.h>
5523
5524+#ifdef CONFIG_PAX_PAGEEXEC
5525+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5526+{
5527+ unsigned long i;
5528+
5529+ printk(KERN_ERR "PAX: bytes at PC: ");
5530+ for (i = 0; i < 5; i++) {
5531+ unsigned int c;
5532+ if (get_user(c, (unsigned int *)pc+i))
5533+ printk(KERN_CONT "???????? ");
5534+ else
5535+ printk(KERN_CONT "%08x ", c);
5536+ }
5537+ printk("\n");
5538+}
5539+#endif
5540+
5541 /*
5542 * This routine handles page faults. It determines the address,
5543 * and the problem, and then passes it off to one of the appropriate
5544diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5545index 7e5fe27..9656513 100644
5546--- a/arch/mips/mm/mmap.c
5547+++ b/arch/mips/mm/mmap.c
5548@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5549 struct vm_area_struct *vma;
5550 unsigned long addr = addr0;
5551 int do_color_align;
5552+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5553 struct vm_unmapped_area_info info;
5554
5555 if (unlikely(len > TASK_SIZE))
5556@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5557 do_color_align = 1;
5558
5559 /* requesting a specific address */
5560+
5561+#ifdef CONFIG_PAX_RANDMMAP
5562+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5563+#endif
5564+
5565 if (addr) {
5566 if (do_color_align)
5567 addr = COLOUR_ALIGN(addr, pgoff);
5568@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5569 addr = PAGE_ALIGN(addr);
5570
5571 vma = find_vma(mm, addr);
5572- if (TASK_SIZE - len >= addr &&
5573- (!vma || addr + len <= vma->vm_start))
5574+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5575 return addr;
5576 }
5577
5578 info.length = len;
5579 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
5580 info.align_offset = pgoff << PAGE_SHIFT;
5581+ info.threadstack_offset = offset;
5582
5583 if (dir == DOWN) {
5584 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
5585@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5586 {
5587 unsigned long random_factor = 0UL;
5588
5589+#ifdef CONFIG_PAX_RANDMMAP
5590+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5591+#endif
5592+
5593 if (current->flags & PF_RANDOMIZE) {
5594 random_factor = get_random_int();
5595 random_factor = random_factor << PAGE_SHIFT;
5596@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5597
5598 if (mmap_is_legacy()) {
5599 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5600+
5601+#ifdef CONFIG_PAX_RANDMMAP
5602+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5603+ mm->mmap_base += mm->delta_mmap;
5604+#endif
5605+
5606 mm->get_unmapped_area = arch_get_unmapped_area;
5607 mm->unmap_area = arch_unmap_area;
5608 } else {
5609 mm->mmap_base = mmap_base(random_factor);
5610+
5611+#ifdef CONFIG_PAX_RANDMMAP
5612+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5613+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5614+#endif
5615+
5616 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5617 mm->unmap_area = arch_unmap_area_topdown;
5618 }
5619 }
5620
5621-static inline unsigned long brk_rnd(void)
5622-{
5623- unsigned long rnd = get_random_int();
5624-
5625- rnd = rnd << PAGE_SHIFT;
5626- /* 8MB for 32bit, 256MB for 64bit */
5627- if (TASK_IS_32BIT_ADDR)
5628- rnd = rnd & 0x7ffffful;
5629- else
5630- rnd = rnd & 0xffffffful;
5631-
5632- return rnd;
5633-}
5634-
5635-unsigned long arch_randomize_brk(struct mm_struct *mm)
5636-{
5637- unsigned long base = mm->brk;
5638- unsigned long ret;
5639-
5640- ret = PAGE_ALIGN(base + brk_rnd());
5641-
5642- if (ret < mm->brk)
5643- return mm->brk;
5644-
5645- return ret;
5646-}
5647-
5648 int __virt_addr_valid(const volatile void *kaddr)
5649 {
5650 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5651diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5652index 967d144..db12197 100644
5653--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5654+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5655@@ -11,12 +11,14 @@
5656 #ifndef _ASM_PROC_CACHE_H
5657 #define _ASM_PROC_CACHE_H
5658
5659+#include <linux/const.h>
5660+
5661 /* L1 cache */
5662
5663 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5664 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5665-#define L1_CACHE_BYTES 16 /* bytes per entry */
5666 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5667+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5668 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5669
5670 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5671diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5672index bcb5df2..84fabd2 100644
5673--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5674+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5675@@ -16,13 +16,15 @@
5676 #ifndef _ASM_PROC_CACHE_H
5677 #define _ASM_PROC_CACHE_H
5678
5679+#include <linux/const.h>
5680+
5681 /*
5682 * L1 cache
5683 */
5684 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5685 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5686-#define L1_CACHE_BYTES 32 /* bytes per entry */
5687 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5688+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5689 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5690
5691 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5692diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5693index 4ce7a01..449202a 100644
5694--- a/arch/openrisc/include/asm/cache.h
5695+++ b/arch/openrisc/include/asm/cache.h
5696@@ -19,11 +19,13 @@
5697 #ifndef __ASM_OPENRISC_CACHE_H
5698 #define __ASM_OPENRISC_CACHE_H
5699
5700+#include <linux/const.h>
5701+
5702 /* FIXME: How can we replace these with values from the CPU...
5703 * they shouldn't be hard-coded!
5704 */
5705
5706-#define L1_CACHE_BYTES 16
5707 #define L1_CACHE_SHIFT 4
5708+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5709
5710 #endif /* __ASM_OPENRISC_CACHE_H */
5711diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5712index 472886c..00e7df9 100644
5713--- a/arch/parisc/include/asm/atomic.h
5714+++ b/arch/parisc/include/asm/atomic.h
5715@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
5716 return dec;
5717 }
5718
5719+#define atomic64_read_unchecked(v) atomic64_read(v)
5720+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5721+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5722+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5723+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5724+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5725+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5726+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5727+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5728+
5729 #endif /* !CONFIG_64BIT */
5730
5731
5732diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5733index 47f11c7..3420df2 100644
5734--- a/arch/parisc/include/asm/cache.h
5735+++ b/arch/parisc/include/asm/cache.h
5736@@ -5,6 +5,7 @@
5737 #ifndef __ARCH_PARISC_CACHE_H
5738 #define __ARCH_PARISC_CACHE_H
5739
5740+#include <linux/const.h>
5741
5742 /*
5743 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5744@@ -15,13 +16,13 @@
5745 * just ruin performance.
5746 */
5747 #ifdef CONFIG_PA20
5748-#define L1_CACHE_BYTES 64
5749 #define L1_CACHE_SHIFT 6
5750 #else
5751-#define L1_CACHE_BYTES 32
5752 #define L1_CACHE_SHIFT 5
5753 #endif
5754
5755+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5756+
5757 #ifndef __ASSEMBLY__
5758
5759 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5760diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5761index ad2b503..bdf1651 100644
5762--- a/arch/parisc/include/asm/elf.h
5763+++ b/arch/parisc/include/asm/elf.h
5764@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5765
5766 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5767
5768+#ifdef CONFIG_PAX_ASLR
5769+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5770+
5771+#define PAX_DELTA_MMAP_LEN 16
5772+#define PAX_DELTA_STACK_LEN 16
5773+#endif
5774+
5775 /* This yields a mask that user programs can use to figure out what
5776 instruction set this CPU supports. This could be done in user space,
5777 but it's not easy, and we've already done it here. */
5778diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5779index fc987a1..6e068ef 100644
5780--- a/arch/parisc/include/asm/pgalloc.h
5781+++ b/arch/parisc/include/asm/pgalloc.h
5782@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5783 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5784 }
5785
5786+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5787+{
5788+ pgd_populate(mm, pgd, pmd);
5789+}
5790+
5791 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5792 {
5793 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5794@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5795 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5796 #define pmd_free(mm, x) do { } while (0)
5797 #define pgd_populate(mm, pmd, pte) BUG()
5798+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5799
5800 #endif
5801
5802diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5803index 1e40d7f..a3eb445 100644
5804--- a/arch/parisc/include/asm/pgtable.h
5805+++ b/arch/parisc/include/asm/pgtable.h
5806@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5807 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5808 #define PAGE_COPY PAGE_EXECREAD
5809 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5810+
5811+#ifdef CONFIG_PAX_PAGEEXEC
5812+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5813+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5814+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5815+#else
5816+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5817+# define PAGE_COPY_NOEXEC PAGE_COPY
5818+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5819+#endif
5820+
5821 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5822 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5823 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5824diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5825index e0a8235..ce2f1e1 100644
5826--- a/arch/parisc/include/asm/uaccess.h
5827+++ b/arch/parisc/include/asm/uaccess.h
5828@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5829 const void __user *from,
5830 unsigned long n)
5831 {
5832- int sz = __compiletime_object_size(to);
5833+ size_t sz = __compiletime_object_size(to);
5834 int ret = -EFAULT;
5835
5836- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5837+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5838 ret = __copy_from_user(to, from, n);
5839 else
5840 copy_from_user_overflow();
5841diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5842index 2a625fb..9908930 100644
5843--- a/arch/parisc/kernel/module.c
5844+++ b/arch/parisc/kernel/module.c
5845@@ -98,16 +98,38 @@
5846
5847 /* three functions to determine where in the module core
5848 * or init pieces the location is */
5849+static inline int in_init_rx(struct module *me, void *loc)
5850+{
5851+ return (loc >= me->module_init_rx &&
5852+ loc < (me->module_init_rx + me->init_size_rx));
5853+}
5854+
5855+static inline int in_init_rw(struct module *me, void *loc)
5856+{
5857+ return (loc >= me->module_init_rw &&
5858+ loc < (me->module_init_rw + me->init_size_rw));
5859+}
5860+
5861 static inline int in_init(struct module *me, void *loc)
5862 {
5863- return (loc >= me->module_init &&
5864- loc <= (me->module_init + me->init_size));
5865+ return in_init_rx(me, loc) || in_init_rw(me, loc);
5866+}
5867+
5868+static inline int in_core_rx(struct module *me, void *loc)
5869+{
5870+ return (loc >= me->module_core_rx &&
5871+ loc < (me->module_core_rx + me->core_size_rx));
5872+}
5873+
5874+static inline int in_core_rw(struct module *me, void *loc)
5875+{
5876+ return (loc >= me->module_core_rw &&
5877+ loc < (me->module_core_rw + me->core_size_rw));
5878 }
5879
5880 static inline int in_core(struct module *me, void *loc)
5881 {
5882- return (loc >= me->module_core &&
5883- loc <= (me->module_core + me->core_size));
5884+ return in_core_rx(me, loc) || in_core_rw(me, loc);
5885 }
5886
5887 static inline int in_local(struct module *me, void *loc)
5888@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5889 }
5890
5891 /* align things a bit */
5892- me->core_size = ALIGN(me->core_size, 16);
5893- me->arch.got_offset = me->core_size;
5894- me->core_size += gots * sizeof(struct got_entry);
5895+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5896+ me->arch.got_offset = me->core_size_rw;
5897+ me->core_size_rw += gots * sizeof(struct got_entry);
5898
5899- me->core_size = ALIGN(me->core_size, 16);
5900- me->arch.fdesc_offset = me->core_size;
5901- me->core_size += fdescs * sizeof(Elf_Fdesc);
5902+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5903+ me->arch.fdesc_offset = me->core_size_rw;
5904+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5905
5906 me->arch.got_max = gots;
5907 me->arch.fdesc_max = fdescs;
5908@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5909
5910 BUG_ON(value == 0);
5911
5912- got = me->module_core + me->arch.got_offset;
5913+ got = me->module_core_rw + me->arch.got_offset;
5914 for (i = 0; got[i].addr; i++)
5915 if (got[i].addr == value)
5916 goto out;
5917@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5918 #ifdef CONFIG_64BIT
5919 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5920 {
5921- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5922+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5923
5924 if (!value) {
5925 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5926@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5927
5928 /* Create new one */
5929 fdesc->addr = value;
5930- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5931+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5932 return (Elf_Addr)fdesc;
5933 }
5934 #endif /* CONFIG_64BIT */
5935@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5936
5937 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5938 end = table + sechdrs[me->arch.unwind_section].sh_size;
5939- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5940+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5941
5942 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5943 me->arch.unwind_section, table, end, gp);
5944diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5945index 5dfd248..64914ac 100644
5946--- a/arch/parisc/kernel/sys_parisc.c
5947+++ b/arch/parisc/kernel/sys_parisc.c
5948@@ -33,9 +33,11 @@
5949 #include <linux/utsname.h>
5950 #include <linux/personality.h>
5951
5952-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5953+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5954+ unsigned long flags)
5955 {
5956 struct vm_unmapped_area_info info;
5957+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5958
5959 info.flags = 0;
5960 info.length = len;
5961@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5962 info.high_limit = TASK_SIZE;
5963 info.align_mask = 0;
5964 info.align_offset = 0;
5965+ info.threadstack_offset = offset;
5966 return vm_unmapped_area(&info);
5967 }
5968
5969@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
5970 return (unsigned long) mapping >> 8;
5971 }
5972
5973-static unsigned long get_shared_area(struct address_space *mapping,
5974- unsigned long addr, unsigned long len, unsigned long pgoff)
5975+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5976+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5977 {
5978 struct vm_unmapped_area_info info;
5979+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5980
5981 info.flags = 0;
5982 info.length = len;
5983@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5984 info.high_limit = TASK_SIZE;
5985 info.align_mask = PAGE_MASK & (SHMLBA - 1);
5986 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
5987+ info.threadstack_offset = offset;
5988 return vm_unmapped_area(&info);
5989 }
5990
5991@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5992 return -EINVAL;
5993 return addr;
5994 }
5995- if (!addr)
5996+ if (!addr) {
5997 addr = TASK_UNMAPPED_BASE;
5998
5999+#ifdef CONFIG_PAX_RANDMMAP
6000+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
6001+ addr += current->mm->delta_mmap;
6002+#endif
6003+
6004+ }
6005+
6006 if (filp) {
6007- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6008+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6009 } else if(flags & MAP_SHARED) {
6010- addr = get_shared_area(NULL, addr, len, pgoff);
6011+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6012 } else {
6013- addr = get_unshared_area(addr, len);
6014+ addr = get_unshared_area(filp, addr, len, flags);
6015 }
6016 return addr;
6017 }
6018diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6019index 04e47c6..7a8faf6 100644
6020--- a/arch/parisc/kernel/traps.c
6021+++ b/arch/parisc/kernel/traps.c
6022@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6023
6024 down_read(&current->mm->mmap_sem);
6025 vma = find_vma(current->mm,regs->iaoq[0]);
6026- if (vma && (regs->iaoq[0] >= vma->vm_start)
6027- && (vma->vm_flags & VM_EXEC)) {
6028-
6029+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6030 fault_address = regs->iaoq[0];
6031 fault_space = regs->iasq[0];
6032
6033diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6034index f247a34..dc0f219 100644
6035--- a/arch/parisc/mm/fault.c
6036+++ b/arch/parisc/mm/fault.c
6037@@ -15,6 +15,7 @@
6038 #include <linux/sched.h>
6039 #include <linux/interrupt.h>
6040 #include <linux/module.h>
6041+#include <linux/unistd.h>
6042
6043 #include <asm/uaccess.h>
6044 #include <asm/traps.h>
6045@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6046 static unsigned long
6047 parisc_acctyp(unsigned long code, unsigned int inst)
6048 {
6049- if (code == 6 || code == 16)
6050+ if (code == 6 || code == 7 || code == 16)
6051 return VM_EXEC;
6052
6053 switch (inst & 0xf0000000) {
6054@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6055 }
6056 #endif
6057
6058+#ifdef CONFIG_PAX_PAGEEXEC
6059+/*
6060+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6061+ *
6062+ * returns 1 when task should be killed
6063+ * 2 when rt_sigreturn trampoline was detected
6064+ * 3 when unpatched PLT trampoline was detected
6065+ */
6066+static int pax_handle_fetch_fault(struct pt_regs *regs)
6067+{
6068+
6069+#ifdef CONFIG_PAX_EMUPLT
6070+ int err;
6071+
6072+ do { /* PaX: unpatched PLT emulation */
6073+ unsigned int bl, depwi;
6074+
6075+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6076+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6077+
6078+ if (err)
6079+ break;
6080+
6081+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6082+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6083+
6084+ err = get_user(ldw, (unsigned int *)addr);
6085+ err |= get_user(bv, (unsigned int *)(addr+4));
6086+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6087+
6088+ if (err)
6089+ break;
6090+
6091+ if (ldw == 0x0E801096U &&
6092+ bv == 0xEAC0C000U &&
6093+ ldw2 == 0x0E881095U)
6094+ {
6095+ unsigned int resolver, map;
6096+
6097+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6098+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6099+ if (err)
6100+ break;
6101+
6102+ regs->gr[20] = instruction_pointer(regs)+8;
6103+ regs->gr[21] = map;
6104+ regs->gr[22] = resolver;
6105+ regs->iaoq[0] = resolver | 3UL;
6106+ regs->iaoq[1] = regs->iaoq[0] + 4;
6107+ return 3;
6108+ }
6109+ }
6110+ } while (0);
6111+#endif
6112+
6113+#ifdef CONFIG_PAX_EMUTRAMP
6114+
6115+#ifndef CONFIG_PAX_EMUSIGRT
6116+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6117+ return 1;
6118+#endif
6119+
6120+ do { /* PaX: rt_sigreturn emulation */
6121+ unsigned int ldi1, ldi2, bel, nop;
6122+
6123+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6124+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6125+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6126+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6127+
6128+ if (err)
6129+ break;
6130+
6131+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6132+ ldi2 == 0x3414015AU &&
6133+ bel == 0xE4008200U &&
6134+ nop == 0x08000240U)
6135+ {
6136+ regs->gr[25] = (ldi1 & 2) >> 1;
6137+ regs->gr[20] = __NR_rt_sigreturn;
6138+ regs->gr[31] = regs->iaoq[1] + 16;
6139+ regs->sr[0] = regs->iasq[1];
6140+ regs->iaoq[0] = 0x100UL;
6141+ regs->iaoq[1] = regs->iaoq[0] + 4;
6142+ regs->iasq[0] = regs->sr[2];
6143+ regs->iasq[1] = regs->sr[2];
6144+ return 2;
6145+ }
6146+ } while (0);
6147+#endif
6148+
6149+ return 1;
6150+}
6151+
6152+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6153+{
6154+ unsigned long i;
6155+
6156+ printk(KERN_ERR "PAX: bytes at PC: ");
6157+ for (i = 0; i < 5; i++) {
6158+ unsigned int c;
6159+ if (get_user(c, (unsigned int *)pc+i))
6160+ printk(KERN_CONT "???????? ");
6161+ else
6162+ printk(KERN_CONT "%08x ", c);
6163+ }
6164+ printk("\n");
6165+}
6166+#endif
6167+
6168 int fixup_exception(struct pt_regs *regs)
6169 {
6170 const struct exception_table_entry *fix;
6171@@ -194,8 +305,33 @@ good_area:
6172
6173 acc_type = parisc_acctyp(code,regs->iir);
6174
6175- if ((vma->vm_flags & acc_type) != acc_type)
6176+ if ((vma->vm_flags & acc_type) != acc_type) {
6177+
6178+#ifdef CONFIG_PAX_PAGEEXEC
6179+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6180+ (address & ~3UL) == instruction_pointer(regs))
6181+ {
6182+ up_read(&mm->mmap_sem);
6183+ switch (pax_handle_fetch_fault(regs)) {
6184+
6185+#ifdef CONFIG_PAX_EMUPLT
6186+ case 3:
6187+ return;
6188+#endif
6189+
6190+#ifdef CONFIG_PAX_EMUTRAMP
6191+ case 2:
6192+ return;
6193+#endif
6194+
6195+ }
6196+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6197+ do_group_exit(SIGKILL);
6198+ }
6199+#endif
6200+
6201 goto bad_area;
6202+ }
6203
6204 /*
6205 * If for any reason at all we couldn't handle the fault, make
6206diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6207index e3b1d41..8e81edf 100644
6208--- a/arch/powerpc/include/asm/atomic.h
6209+++ b/arch/powerpc/include/asm/atomic.h
6210@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6211 return t1;
6212 }
6213
6214+#define atomic64_read_unchecked(v) atomic64_read(v)
6215+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6216+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6217+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6218+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6219+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6220+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6221+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6222+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6223+
6224 #endif /* __powerpc64__ */
6225
6226 #endif /* __KERNEL__ */
6227diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6228index 9e495c9..b6878e5 100644
6229--- a/arch/powerpc/include/asm/cache.h
6230+++ b/arch/powerpc/include/asm/cache.h
6231@@ -3,6 +3,7 @@
6232
6233 #ifdef __KERNEL__
6234
6235+#include <linux/const.h>
6236
6237 /* bytes per L1 cache line */
6238 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6239@@ -22,7 +23,7 @@
6240 #define L1_CACHE_SHIFT 7
6241 #endif
6242
6243-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6244+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6245
6246 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6247
6248diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6249index cc0655a..13eac2e 100644
6250--- a/arch/powerpc/include/asm/elf.h
6251+++ b/arch/powerpc/include/asm/elf.h
6252@@ -28,8 +28,19 @@
6253 the loader. We need to make sure that it is out of the way of the program
6254 that it will "exec", and that there is sufficient room for the brk. */
6255
6256-extern unsigned long randomize_et_dyn(unsigned long base);
6257-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6258+#define ELF_ET_DYN_BASE (0x20000000)
6259+
6260+#ifdef CONFIG_PAX_ASLR
6261+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6262+
6263+#ifdef __powerpc64__
6264+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6265+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6266+#else
6267+#define PAX_DELTA_MMAP_LEN 15
6268+#define PAX_DELTA_STACK_LEN 15
6269+#endif
6270+#endif
6271
6272 /*
6273 * Our registers are always unsigned longs, whether we're a 32 bit
6274@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6275 (0x7ff >> (PAGE_SHIFT - 12)) : \
6276 (0x3ffff >> (PAGE_SHIFT - 12)))
6277
6278-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6279-#define arch_randomize_brk arch_randomize_brk
6280-
6281-
6282 #ifdef CONFIG_SPU_BASE
6283 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6284 #define NT_SPU 1
6285diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6286index 8196e9c..d83a9f3 100644
6287--- a/arch/powerpc/include/asm/exec.h
6288+++ b/arch/powerpc/include/asm/exec.h
6289@@ -4,6 +4,6 @@
6290 #ifndef _ASM_POWERPC_EXEC_H
6291 #define _ASM_POWERPC_EXEC_H
6292
6293-extern unsigned long arch_align_stack(unsigned long sp);
6294+#define arch_align_stack(x) ((x) & ~0xfUL)
6295
6296 #endif /* _ASM_POWERPC_EXEC_H */
6297diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6298index 5acabbd..7ea14fa 100644
6299--- a/arch/powerpc/include/asm/kmap_types.h
6300+++ b/arch/powerpc/include/asm/kmap_types.h
6301@@ -10,7 +10,7 @@
6302 * 2 of the License, or (at your option) any later version.
6303 */
6304
6305-#define KM_TYPE_NR 16
6306+#define KM_TYPE_NR 17
6307
6308 #endif /* __KERNEL__ */
6309 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6310diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6311index 8565c25..2865190 100644
6312--- a/arch/powerpc/include/asm/mman.h
6313+++ b/arch/powerpc/include/asm/mman.h
6314@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6315 }
6316 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6317
6318-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6319+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6320 {
6321 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6322 }
6323diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6324index 988c812..63c7d70 100644
6325--- a/arch/powerpc/include/asm/page.h
6326+++ b/arch/powerpc/include/asm/page.h
6327@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6328 * and needs to be executable. This means the whole heap ends
6329 * up being executable.
6330 */
6331-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6332- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6333+#define VM_DATA_DEFAULT_FLAGS32 \
6334+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6335+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6336
6337 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6338 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6339@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6340 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6341 #endif
6342
6343+#define ktla_ktva(addr) (addr)
6344+#define ktva_ktla(addr) (addr)
6345+
6346 #ifndef CONFIG_PPC_BOOK3S_64
6347 /*
6348 * Use the top bit of the higher-level page table entries to indicate whether
6349diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6350index 88693ce..ac6f9ab 100644
6351--- a/arch/powerpc/include/asm/page_64.h
6352+++ b/arch/powerpc/include/asm/page_64.h
6353@@ -153,15 +153,18 @@ do { \
6354 * stack by default, so in the absence of a PT_GNU_STACK program header
6355 * we turn execute permission off.
6356 */
6357-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6358- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6359+#define VM_STACK_DEFAULT_FLAGS32 \
6360+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6361+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6362
6363 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6364 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6365
6366+#ifndef CONFIG_PAX_PAGEEXEC
6367 #define VM_STACK_DEFAULT_FLAGS \
6368 (is_32bit_task() ? \
6369 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6370+#endif
6371
6372 #include <asm-generic/getorder.h>
6373
6374diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6375index b66ae72..4a378cd 100644
6376--- a/arch/powerpc/include/asm/pgalloc-64.h
6377+++ b/arch/powerpc/include/asm/pgalloc-64.h
6378@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6379 #ifndef CONFIG_PPC_64K_PAGES
6380
6381 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6382+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6383
6384 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6385 {
6386@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6387 pud_set(pud, (unsigned long)pmd);
6388 }
6389
6390+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6391+{
6392+ pud_populate(mm, pud, pmd);
6393+}
6394+
6395 #define pmd_populate(mm, pmd, pte_page) \
6396 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6397 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6398@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
6399 #endif
6400
6401 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6402+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6403
6404 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6405 pte_t *pte)
6406diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6407index 7aeb955..19f748e 100644
6408--- a/arch/powerpc/include/asm/pgtable.h
6409+++ b/arch/powerpc/include/asm/pgtable.h
6410@@ -2,6 +2,7 @@
6411 #define _ASM_POWERPC_PGTABLE_H
6412 #ifdef __KERNEL__
6413
6414+#include <linux/const.h>
6415 #ifndef __ASSEMBLY__
6416 #include <asm/processor.h> /* For TASK_SIZE */
6417 #include <asm/mmu.h>
6418diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6419index 4aad413..85d86bf 100644
6420--- a/arch/powerpc/include/asm/pte-hash32.h
6421+++ b/arch/powerpc/include/asm/pte-hash32.h
6422@@ -21,6 +21,7 @@
6423 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6424 #define _PAGE_USER 0x004 /* usermode access allowed */
6425 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6426+#define _PAGE_EXEC _PAGE_GUARDED
6427 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6428 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6429 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6430diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6431index 4a9e408..724aa59 100644
6432--- a/arch/powerpc/include/asm/reg.h
6433+++ b/arch/powerpc/include/asm/reg.h
6434@@ -234,6 +234,7 @@
6435 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6436 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6437 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6438+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6439 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6440 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6441 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6442diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6443index ffbaabe..eabe843 100644
6444--- a/arch/powerpc/include/asm/smp.h
6445+++ b/arch/powerpc/include/asm/smp.h
6446@@ -50,7 +50,7 @@ struct smp_ops_t {
6447 int (*cpu_disable)(void);
6448 void (*cpu_die)(unsigned int nr);
6449 int (*cpu_bootable)(unsigned int nr);
6450-};
6451+} __no_const;
6452
6453 extern void smp_send_debugger_break(void);
6454 extern void start_secondary_resume(void);
6455diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6456index ba7b197..d292e26 100644
6457--- a/arch/powerpc/include/asm/thread_info.h
6458+++ b/arch/powerpc/include/asm/thread_info.h
6459@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
6460 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
6461 TIF_NEED_RESCHED */
6462 #define TIF_32BIT 4 /* 32 bit binary */
6463-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
6464 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6465 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6466 #define TIF_SINGLESTEP 8 /* singlestepping active */
6467@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6468 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6469 for stack store? */
6470 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6471+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
6472+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6473+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
6474
6475 /* as above, but as bit values */
6476 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6477@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
6478 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6479 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6480 #define _TIF_NOHZ (1<<TIF_NOHZ)
6481+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6482 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6483 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6484- _TIF_NOHZ)
6485+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
6486
6487 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6488 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6489diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6490index 4db4959..aba5c41 100644
6491--- a/arch/powerpc/include/asm/uaccess.h
6492+++ b/arch/powerpc/include/asm/uaccess.h
6493@@ -318,52 +318,6 @@ do { \
6494 extern unsigned long __copy_tofrom_user(void __user *to,
6495 const void __user *from, unsigned long size);
6496
6497-#ifndef __powerpc64__
6498-
6499-static inline unsigned long copy_from_user(void *to,
6500- const void __user *from, unsigned long n)
6501-{
6502- unsigned long over;
6503-
6504- if (access_ok(VERIFY_READ, from, n))
6505- return __copy_tofrom_user((__force void __user *)to, from, n);
6506- if ((unsigned long)from < TASK_SIZE) {
6507- over = (unsigned long)from + n - TASK_SIZE;
6508- return __copy_tofrom_user((__force void __user *)to, from,
6509- n - over) + over;
6510- }
6511- return n;
6512-}
6513-
6514-static inline unsigned long copy_to_user(void __user *to,
6515- const void *from, unsigned long n)
6516-{
6517- unsigned long over;
6518-
6519- if (access_ok(VERIFY_WRITE, to, n))
6520- return __copy_tofrom_user(to, (__force void __user *)from, n);
6521- if ((unsigned long)to < TASK_SIZE) {
6522- over = (unsigned long)to + n - TASK_SIZE;
6523- return __copy_tofrom_user(to, (__force void __user *)from,
6524- n - over) + over;
6525- }
6526- return n;
6527-}
6528-
6529-#else /* __powerpc64__ */
6530-
6531-#define __copy_in_user(to, from, size) \
6532- __copy_tofrom_user((to), (from), (size))
6533-
6534-extern unsigned long copy_from_user(void *to, const void __user *from,
6535- unsigned long n);
6536-extern unsigned long copy_to_user(void __user *to, const void *from,
6537- unsigned long n);
6538-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6539- unsigned long n);
6540-
6541-#endif /* __powerpc64__ */
6542-
6543 static inline unsigned long __copy_from_user_inatomic(void *to,
6544 const void __user *from, unsigned long n)
6545 {
6546@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6547 if (ret == 0)
6548 return 0;
6549 }
6550+
6551+ if (!__builtin_constant_p(n))
6552+ check_object_size(to, n, false);
6553+
6554 return __copy_tofrom_user((__force void __user *)to, from, n);
6555 }
6556
6557@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6558 if (ret == 0)
6559 return 0;
6560 }
6561+
6562+ if (!__builtin_constant_p(n))
6563+ check_object_size(from, n, true);
6564+
6565 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6566 }
6567
6568@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6569 return __copy_to_user_inatomic(to, from, size);
6570 }
6571
6572+#ifndef __powerpc64__
6573+
6574+static inline unsigned long __must_check copy_from_user(void *to,
6575+ const void __user *from, unsigned long n)
6576+{
6577+ unsigned long over;
6578+
6579+ if ((long)n < 0)
6580+ return n;
6581+
6582+ if (access_ok(VERIFY_READ, from, n)) {
6583+ if (!__builtin_constant_p(n))
6584+ check_object_size(to, n, false);
6585+ return __copy_tofrom_user((__force void __user *)to, from, n);
6586+ }
6587+ if ((unsigned long)from < TASK_SIZE) {
6588+ over = (unsigned long)from + n - TASK_SIZE;
6589+ if (!__builtin_constant_p(n - over))
6590+ check_object_size(to, n - over, false);
6591+ return __copy_tofrom_user((__force void __user *)to, from,
6592+ n - over) + over;
6593+ }
6594+ return n;
6595+}
6596+
6597+static inline unsigned long __must_check copy_to_user(void __user *to,
6598+ const void *from, unsigned long n)
6599+{
6600+ unsigned long over;
6601+
6602+ if ((long)n < 0)
6603+ return n;
6604+
6605+ if (access_ok(VERIFY_WRITE, to, n)) {
6606+ if (!__builtin_constant_p(n))
6607+ check_object_size(from, n, true);
6608+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6609+ }
6610+ if ((unsigned long)to < TASK_SIZE) {
6611+ over = (unsigned long)to + n - TASK_SIZE;
6612+ if (!__builtin_constant_p(n))
6613+ check_object_size(from, n - over, true);
6614+ return __copy_tofrom_user(to, (__force void __user *)from,
6615+ n - over) + over;
6616+ }
6617+ return n;
6618+}
6619+
6620+#else /* __powerpc64__ */
6621+
6622+#define __copy_in_user(to, from, size) \
6623+ __copy_tofrom_user((to), (from), (size))
6624+
6625+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6626+{
6627+ if ((long)n < 0 || n > INT_MAX)
6628+ return n;
6629+
6630+ if (!__builtin_constant_p(n))
6631+ check_object_size(to, n, false);
6632+
6633+ if (likely(access_ok(VERIFY_READ, from, n)))
6634+ n = __copy_from_user(to, from, n);
6635+ else
6636+ memset(to, 0, n);
6637+ return n;
6638+}
6639+
6640+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6641+{
6642+ if ((long)n < 0 || n > INT_MAX)
6643+ return n;
6644+
6645+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6646+ if (!__builtin_constant_p(n))
6647+ check_object_size(from, n, true);
6648+ n = __copy_to_user(to, from, n);
6649+ }
6650+ return n;
6651+}
6652+
6653+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6654+ unsigned long n);
6655+
6656+#endif /* __powerpc64__ */
6657+
6658 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6659
6660 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6661diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6662index 645170a..6cf0271 100644
6663--- a/arch/powerpc/kernel/exceptions-64e.S
6664+++ b/arch/powerpc/kernel/exceptions-64e.S
6665@@ -757,6 +757,7 @@ storage_fault_common:
6666 std r14,_DAR(r1)
6667 std r15,_DSISR(r1)
6668 addi r3,r1,STACK_FRAME_OVERHEAD
6669+ bl .save_nvgprs
6670 mr r4,r14
6671 mr r5,r15
6672 ld r14,PACA_EXGEN+EX_R14(r13)
6673@@ -765,8 +766,7 @@ storage_fault_common:
6674 cmpdi r3,0
6675 bne- 1f
6676 b .ret_from_except_lite
6677-1: bl .save_nvgprs
6678- mr r5,r3
6679+1: mr r5,r3
6680 addi r3,r1,STACK_FRAME_OVERHEAD
6681 ld r4,_DAR(r1)
6682 bl .bad_page_fault
6683diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6684index 40e4a17..5a84b37 100644
6685--- a/arch/powerpc/kernel/exceptions-64s.S
6686+++ b/arch/powerpc/kernel/exceptions-64s.S
6687@@ -1362,10 +1362,10 @@ handle_page_fault:
6688 11: ld r4,_DAR(r1)
6689 ld r5,_DSISR(r1)
6690 addi r3,r1,STACK_FRAME_OVERHEAD
6691+ bl .save_nvgprs
6692 bl .do_page_fault
6693 cmpdi r3,0
6694 beq+ 12f
6695- bl .save_nvgprs
6696 mr r5,r3
6697 addi r3,r1,STACK_FRAME_OVERHEAD
6698 lwz r4,_DAR(r1)
6699diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6700index 2e3200c..72095ce 100644
6701--- a/arch/powerpc/kernel/module_32.c
6702+++ b/arch/powerpc/kernel/module_32.c
6703@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6704 me->arch.core_plt_section = i;
6705 }
6706 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6707- printk("Module doesn't contain .plt or .init.plt sections.\n");
6708+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6709 return -ENOEXEC;
6710 }
6711
6712@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6713
6714 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6715 /* Init, or core PLT? */
6716- if (location >= mod->module_core
6717- && location < mod->module_core + mod->core_size)
6718+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6719+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6720 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6721- else
6722+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6723+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6724 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6725+ else {
6726+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6727+ return ~0UL;
6728+ }
6729
6730 /* Find this entry, or if that fails, the next avail. entry */
6731 while (entry->jump[0]) {
6732diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6733index 076d124..6cb2cbf 100644
6734--- a/arch/powerpc/kernel/process.c
6735+++ b/arch/powerpc/kernel/process.c
6736@@ -874,8 +874,8 @@ void show_regs(struct pt_regs * regs)
6737 * Lookup NIP late so we have the best change of getting the
6738 * above info out without failing
6739 */
6740- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6741- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6742+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6743+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6744 #endif
6745 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6746 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
6747@@ -1335,10 +1335,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6748 newsp = stack[0];
6749 ip = stack[STACK_FRAME_LR_SAVE];
6750 if (!firstframe || ip != lr) {
6751- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6752+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6753 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6754 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6755- printk(" (%pS)",
6756+ printk(" (%pA)",
6757 (void *)current->ret_stack[curr_frame].ret);
6758 curr_frame--;
6759 }
6760@@ -1358,7 +1358,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6761 struct pt_regs *regs = (struct pt_regs *)
6762 (sp + STACK_FRAME_OVERHEAD);
6763 lr = regs->link;
6764- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6765+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6766 regs->trap, (void *)regs->nip, (void *)lr);
6767 firstframe = 1;
6768 }
6769@@ -1394,58 +1394,3 @@ void notrace __ppc64_runlatch_off(void)
6770 mtspr(SPRN_CTRLT, ctrl);
6771 }
6772 #endif /* CONFIG_PPC64 */
6773-
6774-unsigned long arch_align_stack(unsigned long sp)
6775-{
6776- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6777- sp -= get_random_int() & ~PAGE_MASK;
6778- return sp & ~0xf;
6779-}
6780-
6781-static inline unsigned long brk_rnd(void)
6782-{
6783- unsigned long rnd = 0;
6784-
6785- /* 8MB for 32bit, 1GB for 64bit */
6786- if (is_32bit_task())
6787- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6788- else
6789- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6790-
6791- return rnd << PAGE_SHIFT;
6792-}
6793-
6794-unsigned long arch_randomize_brk(struct mm_struct *mm)
6795-{
6796- unsigned long base = mm->brk;
6797- unsigned long ret;
6798-
6799-#ifdef CONFIG_PPC_STD_MMU_64
6800- /*
6801- * If we are using 1TB segments and we are allowed to randomise
6802- * the heap, we can put it above 1TB so it is backed by a 1TB
6803- * segment. Otherwise the heap will be in the bottom 1TB
6804- * which always uses 256MB segments and this may result in a
6805- * performance penalty.
6806- */
6807- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6808- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6809-#endif
6810-
6811- ret = PAGE_ALIGN(base + brk_rnd());
6812-
6813- if (ret < mm->brk)
6814- return mm->brk;
6815-
6816- return ret;
6817-}
6818-
6819-unsigned long randomize_et_dyn(unsigned long base)
6820-{
6821- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6822-
6823- if (ret < base)
6824- return base;
6825-
6826- return ret;
6827-}
6828diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6829index 98c2fc1..b73a4ca 100644
6830--- a/arch/powerpc/kernel/ptrace.c
6831+++ b/arch/powerpc/kernel/ptrace.c
6832@@ -1781,6 +1781,10 @@ long arch_ptrace(struct task_struct *child, long request,
6833 return ret;
6834 }
6835
6836+#ifdef CONFIG_GRKERNSEC_SETXID
6837+extern void gr_delayed_cred_worker(void);
6838+#endif
6839+
6840 /*
6841 * We must return the syscall number to actually look up in the table.
6842 * This can be -1L to skip running any syscall at all.
6843@@ -1793,6 +1797,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6844
6845 secure_computing_strict(regs->gpr[0]);
6846
6847+#ifdef CONFIG_GRKERNSEC_SETXID
6848+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6849+ gr_delayed_cred_worker();
6850+#endif
6851+
6852 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6853 tracehook_report_syscall_entry(regs))
6854 /*
6855@@ -1827,6 +1836,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6856 {
6857 int step;
6858
6859+#ifdef CONFIG_GRKERNSEC_SETXID
6860+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6861+ gr_delayed_cred_worker();
6862+#endif
6863+
6864 audit_syscall_exit(regs);
6865
6866 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6867diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6868index 201385c..0f01828 100644
6869--- a/arch/powerpc/kernel/signal_32.c
6870+++ b/arch/powerpc/kernel/signal_32.c
6871@@ -976,7 +976,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6872 /* Save user registers on the stack */
6873 frame = &rt_sf->uc.uc_mcontext;
6874 addr = frame;
6875- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6876+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6877 sigret = 0;
6878 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6879 } else {
6880diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6881index 3459473..2d40783 100644
6882--- a/arch/powerpc/kernel/signal_64.c
6883+++ b/arch/powerpc/kernel/signal_64.c
6884@@ -749,7 +749,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6885 #endif
6886
6887 /* Set up to return from userspace. */
6888- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6889+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6890 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6891 } else {
6892 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6893diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6894index e68a845..8b140e6 100644
6895--- a/arch/powerpc/kernel/sysfs.c
6896+++ b/arch/powerpc/kernel/sysfs.c
6897@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6898 return NOTIFY_OK;
6899 }
6900
6901-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6902+static struct notifier_block sysfs_cpu_nb = {
6903 .notifier_call = sysfs_cpu_notify,
6904 };
6905
6906diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6907index c0e5caf..68e8305 100644
6908--- a/arch/powerpc/kernel/traps.c
6909+++ b/arch/powerpc/kernel/traps.c
6910@@ -143,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6911 return flags;
6912 }
6913
6914+extern void gr_handle_kernel_exploit(void);
6915+
6916 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6917 int signr)
6918 {
6919@@ -192,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6920 panic("Fatal exception in interrupt");
6921 if (panic_on_oops)
6922 panic("Fatal exception");
6923+
6924+ gr_handle_kernel_exploit();
6925+
6926 do_exit(signr);
6927 }
6928
6929diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6930index d4f463a..8fb7431 100644
6931--- a/arch/powerpc/kernel/vdso.c
6932+++ b/arch/powerpc/kernel/vdso.c
6933@@ -34,6 +34,7 @@
6934 #include <asm/firmware.h>
6935 #include <asm/vdso.h>
6936 #include <asm/vdso_datapage.h>
6937+#include <asm/mman.h>
6938
6939 #include "setup.h"
6940
6941@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6942 vdso_base = VDSO32_MBASE;
6943 #endif
6944
6945- current->mm->context.vdso_base = 0;
6946+ current->mm->context.vdso_base = ~0UL;
6947
6948 /* vDSO has a problem and was disabled, just don't "enable" it for the
6949 * process
6950@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6951 vdso_base = get_unmapped_area(NULL, vdso_base,
6952 (vdso_pages << PAGE_SHIFT) +
6953 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6954- 0, 0);
6955+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
6956 if (IS_ERR_VALUE(vdso_base)) {
6957 rc = vdso_base;
6958 goto fail_mmapsem;
6959diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6960index 5eea6f3..5d10396 100644
6961--- a/arch/powerpc/lib/usercopy_64.c
6962+++ b/arch/powerpc/lib/usercopy_64.c
6963@@ -9,22 +9,6 @@
6964 #include <linux/module.h>
6965 #include <asm/uaccess.h>
6966
6967-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6968-{
6969- if (likely(access_ok(VERIFY_READ, from, n)))
6970- n = __copy_from_user(to, from, n);
6971- else
6972- memset(to, 0, n);
6973- return n;
6974-}
6975-
6976-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6977-{
6978- if (likely(access_ok(VERIFY_WRITE, to, n)))
6979- n = __copy_to_user(to, from, n);
6980- return n;
6981-}
6982-
6983 unsigned long copy_in_user(void __user *to, const void __user *from,
6984 unsigned long n)
6985 {
6986@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6987 return n;
6988 }
6989
6990-EXPORT_SYMBOL(copy_from_user);
6991-EXPORT_SYMBOL(copy_to_user);
6992 EXPORT_SYMBOL(copy_in_user);
6993
6994diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6995index 8726779..a33c512 100644
6996--- a/arch/powerpc/mm/fault.c
6997+++ b/arch/powerpc/mm/fault.c
6998@@ -33,6 +33,10 @@
6999 #include <linux/magic.h>
7000 #include <linux/ratelimit.h>
7001 #include <linux/context_tracking.h>
7002+#include <linux/slab.h>
7003+#include <linux/pagemap.h>
7004+#include <linux/compiler.h>
7005+#include <linux/unistd.h>
7006
7007 #include <asm/firmware.h>
7008 #include <asm/page.h>
7009@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7010 }
7011 #endif
7012
7013+#ifdef CONFIG_PAX_PAGEEXEC
7014+/*
7015+ * PaX: decide what to do with offenders (regs->nip = fault address)
7016+ *
7017+ * returns 1 when task should be killed
7018+ */
7019+static int pax_handle_fetch_fault(struct pt_regs *regs)
7020+{
7021+ return 1;
7022+}
7023+
7024+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7025+{
7026+ unsigned long i;
7027+
7028+ printk(KERN_ERR "PAX: bytes at PC: ");
7029+ for (i = 0; i < 5; i++) {
7030+ unsigned int c;
7031+ if (get_user(c, (unsigned int __user *)pc+i))
7032+ printk(KERN_CONT "???????? ");
7033+ else
7034+ printk(KERN_CONT "%08x ", c);
7035+ }
7036+ printk("\n");
7037+}
7038+#endif
7039+
7040 /*
7041 * Check whether the instruction at regs->nip is a store using
7042 * an update addressing form which will update r1.
7043@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7044 * indicate errors in DSISR but can validly be set in SRR1.
7045 */
7046 if (trap == 0x400)
7047- error_code &= 0x48200000;
7048+ error_code &= 0x58200000;
7049 else
7050 is_write = error_code & DSISR_ISSTORE;
7051 #else
7052@@ -371,7 +402,7 @@ good_area:
7053 * "undefined". Of those that can be set, this is the only
7054 * one which seems bad.
7055 */
7056- if (error_code & 0x10000000)
7057+ if (error_code & DSISR_GUARDED)
7058 /* Guarded storage error. */
7059 goto bad_area;
7060 #endif /* CONFIG_8xx */
7061@@ -386,7 +417,7 @@ good_area:
7062 * processors use the same I/D cache coherency mechanism
7063 * as embedded.
7064 */
7065- if (error_code & DSISR_PROTFAULT)
7066+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7067 goto bad_area;
7068 #endif /* CONFIG_PPC_STD_MMU */
7069
7070@@ -471,6 +502,23 @@ bad_area:
7071 bad_area_nosemaphore:
7072 /* User mode accesses cause a SIGSEGV */
7073 if (user_mode(regs)) {
7074+
7075+#ifdef CONFIG_PAX_PAGEEXEC
7076+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7077+#ifdef CONFIG_PPC_STD_MMU
7078+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7079+#else
7080+ if (is_exec && regs->nip == address) {
7081+#endif
7082+ switch (pax_handle_fetch_fault(regs)) {
7083+ }
7084+
7085+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7086+ do_group_exit(SIGKILL);
7087+ }
7088+ }
7089+#endif
7090+
7091 _exception(SIGSEGV, regs, code, address);
7092 goto bail;
7093 }
7094diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7095index 67a42ed..cd463e0 100644
7096--- a/arch/powerpc/mm/mmap_64.c
7097+++ b/arch/powerpc/mm/mmap_64.c
7098@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7099 {
7100 unsigned long rnd = 0;
7101
7102+#ifdef CONFIG_PAX_RANDMMAP
7103+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7104+#endif
7105+
7106 if (current->flags & PF_RANDOMIZE) {
7107 /* 8MB for 32bit, 1GB for 64bit */
7108 if (is_32bit_task())
7109@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7110 */
7111 if (mmap_is_legacy()) {
7112 mm->mmap_base = TASK_UNMAPPED_BASE;
7113+
7114+#ifdef CONFIG_PAX_RANDMMAP
7115+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7116+ mm->mmap_base += mm->delta_mmap;
7117+#endif
7118+
7119 mm->get_unmapped_area = arch_get_unmapped_area;
7120 mm->unmap_area = arch_unmap_area;
7121 } else {
7122 mm->mmap_base = mmap_base();
7123+
7124+#ifdef CONFIG_PAX_RANDMMAP
7125+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7126+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7127+#endif
7128+
7129 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7130 mm->unmap_area = arch_unmap_area_topdown;
7131 }
7132diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7133index e779642..e5bb889 100644
7134--- a/arch/powerpc/mm/mmu_context_nohash.c
7135+++ b/arch/powerpc/mm/mmu_context_nohash.c
7136@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7137 return NOTIFY_OK;
7138 }
7139
7140-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7141+static struct notifier_block mmu_context_cpu_nb = {
7142 .notifier_call = mmu_context_cpu_notify,
7143 };
7144
7145diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7146index 88c0425..717feb8 100644
7147--- a/arch/powerpc/mm/numa.c
7148+++ b/arch/powerpc/mm/numa.c
7149@@ -919,7 +919,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7150 return ret;
7151 }
7152
7153-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7154+static struct notifier_block ppc64_numa_nb = {
7155 .notifier_call = cpu_numa_callback,
7156 .priority = 1 /* Must run before sched domains notifier. */
7157 };
7158diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7159index 3e99c14..f00953c 100644
7160--- a/arch/powerpc/mm/slice.c
7161+++ b/arch/powerpc/mm/slice.c
7162@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7163 if ((mm->task_size - len) < addr)
7164 return 0;
7165 vma = find_vma(mm, addr);
7166- return (!vma || (addr + len) <= vma->vm_start);
7167+ return check_heap_stack_gap(vma, addr, len, 0);
7168 }
7169
7170 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7171@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
7172 info.align_offset = 0;
7173
7174 addr = TASK_UNMAPPED_BASE;
7175+
7176+#ifdef CONFIG_PAX_RANDMMAP
7177+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7178+ addr += mm->delta_mmap;
7179+#endif
7180+
7181 while (addr < TASK_SIZE) {
7182 info.low_limit = addr;
7183 if (!slice_scan_available(addr, available, 1, &addr))
7184@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7185 if (fixed && addr > (mm->task_size - len))
7186 return -EINVAL;
7187
7188+#ifdef CONFIG_PAX_RANDMMAP
7189+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7190+ addr = 0;
7191+#endif
7192+
7193 /* If hint, make sure it matches our alignment restrictions */
7194 if (!fixed && addr) {
7195 addr = _ALIGN_UP(addr, 1ul << pshift);
7196diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7197index 9098692..3d54cd1 100644
7198--- a/arch/powerpc/platforms/cell/spufs/file.c
7199+++ b/arch/powerpc/platforms/cell/spufs/file.c
7200@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7201 return VM_FAULT_NOPAGE;
7202 }
7203
7204-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7205+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7206 unsigned long address,
7207- void *buf, int len, int write)
7208+ void *buf, size_t len, int write)
7209 {
7210 struct spu_context *ctx = vma->vm_file->private_data;
7211 unsigned long offset = address - vma->vm_start;
7212diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7213index bdb738a..49c9f95 100644
7214--- a/arch/powerpc/platforms/powermac/smp.c
7215+++ b/arch/powerpc/platforms/powermac/smp.c
7216@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7217 return NOTIFY_OK;
7218 }
7219
7220-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7221+static struct notifier_block smp_core99_cpu_nb = {
7222 .notifier_call = smp_core99_cpu_notify,
7223 };
7224 #endif /* CONFIG_HOTPLUG_CPU */
7225diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7226index c797832..ce575c8 100644
7227--- a/arch/s390/include/asm/atomic.h
7228+++ b/arch/s390/include/asm/atomic.h
7229@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7230 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7231 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7232
7233+#define atomic64_read_unchecked(v) atomic64_read(v)
7234+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7235+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7236+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7237+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7238+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7239+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7240+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7241+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7242+
7243 #define smp_mb__before_atomic_dec() smp_mb()
7244 #define smp_mb__after_atomic_dec() smp_mb()
7245 #define smp_mb__before_atomic_inc() smp_mb()
7246diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7247index 4d7ccac..d03d0ad 100644
7248--- a/arch/s390/include/asm/cache.h
7249+++ b/arch/s390/include/asm/cache.h
7250@@ -9,8 +9,10 @@
7251 #ifndef __ARCH_S390_CACHE_H
7252 #define __ARCH_S390_CACHE_H
7253
7254-#define L1_CACHE_BYTES 256
7255+#include <linux/const.h>
7256+
7257 #define L1_CACHE_SHIFT 8
7258+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7259 #define NET_SKB_PAD 32
7260
7261 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7262diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7263index 78f4f87..598ce39 100644
7264--- a/arch/s390/include/asm/elf.h
7265+++ b/arch/s390/include/asm/elf.h
7266@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
7267 the loader. We need to make sure that it is out of the way of the program
7268 that it will "exec", and that there is sufficient room for the brk. */
7269
7270-extern unsigned long randomize_et_dyn(unsigned long base);
7271-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7272+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7273+
7274+#ifdef CONFIG_PAX_ASLR
7275+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7276+
7277+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7278+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7279+#endif
7280
7281 /* This yields a mask that user programs can use to figure out what
7282 instruction set this CPU supports. */
7283@@ -222,9 +228,6 @@ struct linux_binprm;
7284 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7285 int arch_setup_additional_pages(struct linux_binprm *, int);
7286
7287-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7288-#define arch_randomize_brk arch_randomize_brk
7289-
7290 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7291
7292 #endif
7293diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7294index c4a93d6..4d2a9b4 100644
7295--- a/arch/s390/include/asm/exec.h
7296+++ b/arch/s390/include/asm/exec.h
7297@@ -7,6 +7,6 @@
7298 #ifndef __ASM_EXEC_H
7299 #define __ASM_EXEC_H
7300
7301-extern unsigned long arch_align_stack(unsigned long sp);
7302+#define arch_align_stack(x) ((x) & ~0xfUL)
7303
7304 #endif /* __ASM_EXEC_H */
7305diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7306index 9c33ed4..e40cbef 100644
7307--- a/arch/s390/include/asm/uaccess.h
7308+++ b/arch/s390/include/asm/uaccess.h
7309@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7310 copy_to_user(void __user *to, const void *from, unsigned long n)
7311 {
7312 might_fault();
7313+
7314+ if ((long)n < 0)
7315+ return n;
7316+
7317 return __copy_to_user(to, from, n);
7318 }
7319
7320@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7321 static inline unsigned long __must_check
7322 __copy_from_user(void *to, const void __user *from, unsigned long n)
7323 {
7324+ if ((long)n < 0)
7325+ return n;
7326+
7327 if (__builtin_constant_p(n) && (n <= 256))
7328 return uaccess.copy_from_user_small(n, from, to);
7329 else
7330@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7331 static inline unsigned long __must_check
7332 copy_from_user(void *to, const void __user *from, unsigned long n)
7333 {
7334- unsigned int sz = __compiletime_object_size(to);
7335+ size_t sz = __compiletime_object_size(to);
7336
7337 might_fault();
7338- if (unlikely(sz != -1 && sz < n)) {
7339+
7340+ if ((long)n < 0)
7341+ return n;
7342+
7343+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7344 copy_from_user_overflow();
7345 return n;
7346 }
7347diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7348index 7845e15..59c4353 100644
7349--- a/arch/s390/kernel/module.c
7350+++ b/arch/s390/kernel/module.c
7351@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7352
7353 /* Increase core size by size of got & plt and set start
7354 offsets for got and plt. */
7355- me->core_size = ALIGN(me->core_size, 4);
7356- me->arch.got_offset = me->core_size;
7357- me->core_size += me->arch.got_size;
7358- me->arch.plt_offset = me->core_size;
7359- me->core_size += me->arch.plt_size;
7360+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7361+ me->arch.got_offset = me->core_size_rw;
7362+ me->core_size_rw += me->arch.got_size;
7363+ me->arch.plt_offset = me->core_size_rx;
7364+ me->core_size_rx += me->arch.plt_size;
7365 return 0;
7366 }
7367
7368@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7369 if (info->got_initialized == 0) {
7370 Elf_Addr *gotent;
7371
7372- gotent = me->module_core + me->arch.got_offset +
7373+ gotent = me->module_core_rw + me->arch.got_offset +
7374 info->got_offset;
7375 *gotent = val;
7376 info->got_initialized = 1;
7377@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7378 rc = apply_rela_bits(loc, val, 0, 64, 0);
7379 else if (r_type == R_390_GOTENT ||
7380 r_type == R_390_GOTPLTENT) {
7381- val += (Elf_Addr) me->module_core - loc;
7382+ val += (Elf_Addr) me->module_core_rw - loc;
7383 rc = apply_rela_bits(loc, val, 1, 32, 1);
7384 }
7385 break;
7386@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7387 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7388 if (info->plt_initialized == 0) {
7389 unsigned int *ip;
7390- ip = me->module_core + me->arch.plt_offset +
7391+ ip = me->module_core_rx + me->arch.plt_offset +
7392 info->plt_offset;
7393 #ifndef CONFIG_64BIT
7394 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7395@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7396 val - loc + 0xffffUL < 0x1ffffeUL) ||
7397 (r_type == R_390_PLT32DBL &&
7398 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7399- val = (Elf_Addr) me->module_core +
7400+ val = (Elf_Addr) me->module_core_rx +
7401 me->arch.plt_offset +
7402 info->plt_offset;
7403 val += rela->r_addend - loc;
7404@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7405 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7406 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7407 val = val + rela->r_addend -
7408- ((Elf_Addr) me->module_core + me->arch.got_offset);
7409+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7410 if (r_type == R_390_GOTOFF16)
7411 rc = apply_rela_bits(loc, val, 0, 16, 0);
7412 else if (r_type == R_390_GOTOFF32)
7413@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7414 break;
7415 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7416 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7417- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7418+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7419 rela->r_addend - loc;
7420 if (r_type == R_390_GOTPC)
7421 rc = apply_rela_bits(loc, val, 1, 32, 0);
7422diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7423index 2bc3edd..ab9d598 100644
7424--- a/arch/s390/kernel/process.c
7425+++ b/arch/s390/kernel/process.c
7426@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
7427 }
7428 return 0;
7429 }
7430-
7431-unsigned long arch_align_stack(unsigned long sp)
7432-{
7433- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7434- sp -= get_random_int() & ~PAGE_MASK;
7435- return sp & ~0xf;
7436-}
7437-
7438-static inline unsigned long brk_rnd(void)
7439-{
7440- /* 8MB for 32bit, 1GB for 64bit */
7441- if (is_32bit_task())
7442- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7443- else
7444- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7445-}
7446-
7447-unsigned long arch_randomize_brk(struct mm_struct *mm)
7448-{
7449- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7450-
7451- if (ret < mm->brk)
7452- return mm->brk;
7453- return ret;
7454-}
7455-
7456-unsigned long randomize_et_dyn(unsigned long base)
7457-{
7458- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7459-
7460- if (!(current->flags & PF_RANDOMIZE))
7461- return base;
7462- if (ret < base)
7463- return base;
7464- return ret;
7465-}
7466diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7467index 06bafec..2bca531 100644
7468--- a/arch/s390/mm/mmap.c
7469+++ b/arch/s390/mm/mmap.c
7470@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7471 */
7472 if (mmap_is_legacy()) {
7473 mm->mmap_base = TASK_UNMAPPED_BASE;
7474+
7475+#ifdef CONFIG_PAX_RANDMMAP
7476+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7477+ mm->mmap_base += mm->delta_mmap;
7478+#endif
7479+
7480 mm->get_unmapped_area = arch_get_unmapped_area;
7481 mm->unmap_area = arch_unmap_area;
7482 } else {
7483 mm->mmap_base = mmap_base();
7484+
7485+#ifdef CONFIG_PAX_RANDMMAP
7486+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7487+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7488+#endif
7489+
7490 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7491 mm->unmap_area = arch_unmap_area_topdown;
7492 }
7493@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7494 */
7495 if (mmap_is_legacy()) {
7496 mm->mmap_base = TASK_UNMAPPED_BASE;
7497+
7498+#ifdef CONFIG_PAX_RANDMMAP
7499+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7500+ mm->mmap_base += mm->delta_mmap;
7501+#endif
7502+
7503 mm->get_unmapped_area = s390_get_unmapped_area;
7504 mm->unmap_area = arch_unmap_area;
7505 } else {
7506 mm->mmap_base = mmap_base();
7507+
7508+#ifdef CONFIG_PAX_RANDMMAP
7509+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7510+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7511+#endif
7512+
7513 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7514 mm->unmap_area = arch_unmap_area_topdown;
7515 }
7516diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7517index ae3d59f..f65f075 100644
7518--- a/arch/score/include/asm/cache.h
7519+++ b/arch/score/include/asm/cache.h
7520@@ -1,7 +1,9 @@
7521 #ifndef _ASM_SCORE_CACHE_H
7522 #define _ASM_SCORE_CACHE_H
7523
7524+#include <linux/const.h>
7525+
7526 #define L1_CACHE_SHIFT 4
7527-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7528+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7529
7530 #endif /* _ASM_SCORE_CACHE_H */
7531diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7532index f9f3cd5..58ff438 100644
7533--- a/arch/score/include/asm/exec.h
7534+++ b/arch/score/include/asm/exec.h
7535@@ -1,6 +1,6 @@
7536 #ifndef _ASM_SCORE_EXEC_H
7537 #define _ASM_SCORE_EXEC_H
7538
7539-extern unsigned long arch_align_stack(unsigned long sp);
7540+#define arch_align_stack(x) (x)
7541
7542 #endif /* _ASM_SCORE_EXEC_H */
7543diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7544index f4c6d02..e9355c3 100644
7545--- a/arch/score/kernel/process.c
7546+++ b/arch/score/kernel/process.c
7547@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
7548
7549 return task_pt_regs(task)->cp0_epc;
7550 }
7551-
7552-unsigned long arch_align_stack(unsigned long sp)
7553-{
7554- return sp;
7555-}
7556diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7557index ef9e555..331bd29 100644
7558--- a/arch/sh/include/asm/cache.h
7559+++ b/arch/sh/include/asm/cache.h
7560@@ -9,10 +9,11 @@
7561 #define __ASM_SH_CACHE_H
7562 #ifdef __KERNEL__
7563
7564+#include <linux/const.h>
7565 #include <linux/init.h>
7566 #include <cpu/cache.h>
7567
7568-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7569+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7570
7571 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7572
7573diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7574index 03f2b55..b0270327 100644
7575--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7576+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7577@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7578 return NOTIFY_OK;
7579 }
7580
7581-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7582+static struct notifier_block shx3_cpu_notifier = {
7583 .notifier_call = shx3_cpu_callback,
7584 };
7585
7586diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7587index 6777177..cb5e44f 100644
7588--- a/arch/sh/mm/mmap.c
7589+++ b/arch/sh/mm/mmap.c
7590@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7591 struct mm_struct *mm = current->mm;
7592 struct vm_area_struct *vma;
7593 int do_colour_align;
7594+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7595 struct vm_unmapped_area_info info;
7596
7597 if (flags & MAP_FIXED) {
7598@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7599 if (filp || (flags & MAP_SHARED))
7600 do_colour_align = 1;
7601
7602+#ifdef CONFIG_PAX_RANDMMAP
7603+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7604+#endif
7605+
7606 if (addr) {
7607 if (do_colour_align)
7608 addr = COLOUR_ALIGN(addr, pgoff);
7609@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7610 addr = PAGE_ALIGN(addr);
7611
7612 vma = find_vma(mm, addr);
7613- if (TASK_SIZE - len >= addr &&
7614- (!vma || addr + len <= vma->vm_start))
7615+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7616 return addr;
7617 }
7618
7619 info.flags = 0;
7620 info.length = len;
7621- info.low_limit = TASK_UNMAPPED_BASE;
7622+ info.low_limit = mm->mmap_base;
7623 info.high_limit = TASK_SIZE;
7624 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7625 info.align_offset = pgoff << PAGE_SHIFT;
7626@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7627 struct mm_struct *mm = current->mm;
7628 unsigned long addr = addr0;
7629 int do_colour_align;
7630+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7631 struct vm_unmapped_area_info info;
7632
7633 if (flags & MAP_FIXED) {
7634@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7635 if (filp || (flags & MAP_SHARED))
7636 do_colour_align = 1;
7637
7638+#ifdef CONFIG_PAX_RANDMMAP
7639+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7640+#endif
7641+
7642 /* requesting a specific address */
7643 if (addr) {
7644 if (do_colour_align)
7645@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7646 addr = PAGE_ALIGN(addr);
7647
7648 vma = find_vma(mm, addr);
7649- if (TASK_SIZE - len >= addr &&
7650- (!vma || addr + len <= vma->vm_start))
7651+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7652 return addr;
7653 }
7654
7655@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7656 VM_BUG_ON(addr != -ENOMEM);
7657 info.flags = 0;
7658 info.low_limit = TASK_UNMAPPED_BASE;
7659+
7660+#ifdef CONFIG_PAX_RANDMMAP
7661+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7662+ info.low_limit += mm->delta_mmap;
7663+#endif
7664+
7665 info.high_limit = TASK_SIZE;
7666 addr = vm_unmapped_area(&info);
7667 }
7668diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7669index be56a24..443328f 100644
7670--- a/arch/sparc/include/asm/atomic_64.h
7671+++ b/arch/sparc/include/asm/atomic_64.h
7672@@ -14,18 +14,40 @@
7673 #define ATOMIC64_INIT(i) { (i) }
7674
7675 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7676+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7677+{
7678+ return v->counter;
7679+}
7680 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7681+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7682+{
7683+ return v->counter;
7684+}
7685
7686 #define atomic_set(v, i) (((v)->counter) = i)
7687+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7688+{
7689+ v->counter = i;
7690+}
7691 #define atomic64_set(v, i) (((v)->counter) = i)
7692+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7693+{
7694+ v->counter = i;
7695+}
7696
7697 extern void atomic_add(int, atomic_t *);
7698+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7699 extern void atomic64_add(long, atomic64_t *);
7700+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7701 extern void atomic_sub(int, atomic_t *);
7702+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7703 extern void atomic64_sub(long, atomic64_t *);
7704+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7705
7706 extern int atomic_add_ret(int, atomic_t *);
7707+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7708 extern long atomic64_add_ret(long, atomic64_t *);
7709+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7710 extern int atomic_sub_ret(int, atomic_t *);
7711 extern long atomic64_sub_ret(long, atomic64_t *);
7712
7713@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7714 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7715
7716 #define atomic_inc_return(v) atomic_add_ret(1, v)
7717+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7718+{
7719+ return atomic_add_ret_unchecked(1, v);
7720+}
7721 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7722+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7723+{
7724+ return atomic64_add_ret_unchecked(1, v);
7725+}
7726
7727 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7728 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7729
7730 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7731+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7732+{
7733+ return atomic_add_ret_unchecked(i, v);
7734+}
7735 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7736+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7737+{
7738+ return atomic64_add_ret_unchecked(i, v);
7739+}
7740
7741 /*
7742 * atomic_inc_and_test - increment and test
7743@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7744 * other cases.
7745 */
7746 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7747+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7748+{
7749+ return atomic_inc_return_unchecked(v) == 0;
7750+}
7751 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7752
7753 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7754@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7755 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7756
7757 #define atomic_inc(v) atomic_add(1, v)
7758+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7759+{
7760+ atomic_add_unchecked(1, v);
7761+}
7762 #define atomic64_inc(v) atomic64_add(1, v)
7763+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7764+{
7765+ atomic64_add_unchecked(1, v);
7766+}
7767
7768 #define atomic_dec(v) atomic_sub(1, v)
7769+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7770+{
7771+ atomic_sub_unchecked(1, v);
7772+}
7773 #define atomic64_dec(v) atomic64_sub(1, v)
7774+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7775+{
7776+ atomic64_sub_unchecked(1, v);
7777+}
7778
7779 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7780 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7781
7782 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7783+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7784+{
7785+ return cmpxchg(&v->counter, old, new);
7786+}
7787 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7788+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7789+{
7790+ return xchg(&v->counter, new);
7791+}
7792
7793 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7794 {
7795- int c, old;
7796+ int c, old, new;
7797 c = atomic_read(v);
7798 for (;;) {
7799- if (unlikely(c == (u)))
7800+ if (unlikely(c == u))
7801 break;
7802- old = atomic_cmpxchg((v), c, c + (a));
7803+
7804+ asm volatile("addcc %2, %0, %0\n"
7805+
7806+#ifdef CONFIG_PAX_REFCOUNT
7807+ "tvs %%icc, 6\n"
7808+#endif
7809+
7810+ : "=r" (new)
7811+ : "0" (c), "ir" (a)
7812+ : "cc");
7813+
7814+ old = atomic_cmpxchg(v, c, new);
7815 if (likely(old == c))
7816 break;
7817 c = old;
7818@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7819 #define atomic64_cmpxchg(v, o, n) \
7820 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7821 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7822+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7823+{
7824+ return xchg(&v->counter, new);
7825+}
7826
7827 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7828 {
7829- long c, old;
7830+ long c, old, new;
7831 c = atomic64_read(v);
7832 for (;;) {
7833- if (unlikely(c == (u)))
7834+ if (unlikely(c == u))
7835 break;
7836- old = atomic64_cmpxchg((v), c, c + (a));
7837+
7838+ asm volatile("addcc %2, %0, %0\n"
7839+
7840+#ifdef CONFIG_PAX_REFCOUNT
7841+ "tvs %%xcc, 6\n"
7842+#endif
7843+
7844+ : "=r" (new)
7845+ : "0" (c), "ir" (a)
7846+ : "cc");
7847+
7848+ old = atomic64_cmpxchg(v, c, new);
7849 if (likely(old == c))
7850 break;
7851 c = old;
7852 }
7853- return c != (u);
7854+ return c != u;
7855 }
7856
7857 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7858diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7859index 5bb6991..5c2132e 100644
7860--- a/arch/sparc/include/asm/cache.h
7861+++ b/arch/sparc/include/asm/cache.h
7862@@ -7,10 +7,12 @@
7863 #ifndef _SPARC_CACHE_H
7864 #define _SPARC_CACHE_H
7865
7866+#include <linux/const.h>
7867+
7868 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7869
7870 #define L1_CACHE_SHIFT 5
7871-#define L1_CACHE_BYTES 32
7872+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7873
7874 #ifdef CONFIG_SPARC32
7875 #define SMP_CACHE_BYTES_SHIFT 5
7876diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7877index a24e41f..47677ff 100644
7878--- a/arch/sparc/include/asm/elf_32.h
7879+++ b/arch/sparc/include/asm/elf_32.h
7880@@ -114,6 +114,13 @@ typedef struct {
7881
7882 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7883
7884+#ifdef CONFIG_PAX_ASLR
7885+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7886+
7887+#define PAX_DELTA_MMAP_LEN 16
7888+#define PAX_DELTA_STACK_LEN 16
7889+#endif
7890+
7891 /* This yields a mask that user programs can use to figure out what
7892 instruction set this cpu supports. This can NOT be done in userspace
7893 on Sparc. */
7894diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7895index 370ca1e..d4f4a98 100644
7896--- a/arch/sparc/include/asm/elf_64.h
7897+++ b/arch/sparc/include/asm/elf_64.h
7898@@ -189,6 +189,13 @@ typedef struct {
7899 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7900 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7901
7902+#ifdef CONFIG_PAX_ASLR
7903+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7904+
7905+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7906+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7907+#endif
7908+
7909 extern unsigned long sparc64_elf_hwcap;
7910 #define ELF_HWCAP sparc64_elf_hwcap
7911
7912diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7913index 9b1c36d..209298b 100644
7914--- a/arch/sparc/include/asm/pgalloc_32.h
7915+++ b/arch/sparc/include/asm/pgalloc_32.h
7916@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7917 }
7918
7919 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7920+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7921
7922 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7923 unsigned long address)
7924diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7925index bcfe063..b333142 100644
7926--- a/arch/sparc/include/asm/pgalloc_64.h
7927+++ b/arch/sparc/include/asm/pgalloc_64.h
7928@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7929 }
7930
7931 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7932+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7933
7934 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7935 {
7936diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7937index 6fc1348..390c50a 100644
7938--- a/arch/sparc/include/asm/pgtable_32.h
7939+++ b/arch/sparc/include/asm/pgtable_32.h
7940@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7941 #define PAGE_SHARED SRMMU_PAGE_SHARED
7942 #define PAGE_COPY SRMMU_PAGE_COPY
7943 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7944+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7945+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7946+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7947 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7948
7949 /* Top-level page directory - dummy used by init-mm.
7950@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7951
7952 /* xwr */
7953 #define __P000 PAGE_NONE
7954-#define __P001 PAGE_READONLY
7955-#define __P010 PAGE_COPY
7956-#define __P011 PAGE_COPY
7957+#define __P001 PAGE_READONLY_NOEXEC
7958+#define __P010 PAGE_COPY_NOEXEC
7959+#define __P011 PAGE_COPY_NOEXEC
7960 #define __P100 PAGE_READONLY
7961 #define __P101 PAGE_READONLY
7962 #define __P110 PAGE_COPY
7963 #define __P111 PAGE_COPY
7964
7965 #define __S000 PAGE_NONE
7966-#define __S001 PAGE_READONLY
7967-#define __S010 PAGE_SHARED
7968-#define __S011 PAGE_SHARED
7969+#define __S001 PAGE_READONLY_NOEXEC
7970+#define __S010 PAGE_SHARED_NOEXEC
7971+#define __S011 PAGE_SHARED_NOEXEC
7972 #define __S100 PAGE_READONLY
7973 #define __S101 PAGE_READONLY
7974 #define __S110 PAGE_SHARED
7975diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7976index 79da178..c2eede8 100644
7977--- a/arch/sparc/include/asm/pgtsrmmu.h
7978+++ b/arch/sparc/include/asm/pgtsrmmu.h
7979@@ -115,6 +115,11 @@
7980 SRMMU_EXEC | SRMMU_REF)
7981 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7982 SRMMU_EXEC | SRMMU_REF)
7983+
7984+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7985+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7986+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7987+
7988 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7989 SRMMU_DIRTY | SRMMU_REF)
7990
7991diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7992index 9689176..63c18ea 100644
7993--- a/arch/sparc/include/asm/spinlock_64.h
7994+++ b/arch/sparc/include/asm/spinlock_64.h
7995@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
7996
7997 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
7998
7999-static void inline arch_read_lock(arch_rwlock_t *lock)
8000+static inline void arch_read_lock(arch_rwlock_t *lock)
8001 {
8002 unsigned long tmp1, tmp2;
8003
8004 __asm__ __volatile__ (
8005 "1: ldsw [%2], %0\n"
8006 " brlz,pn %0, 2f\n"
8007-"4: add %0, 1, %1\n"
8008+"4: addcc %0, 1, %1\n"
8009+
8010+#ifdef CONFIG_PAX_REFCOUNT
8011+" tvs %%icc, 6\n"
8012+#endif
8013+
8014 " cas [%2], %0, %1\n"
8015 " cmp %0, %1\n"
8016 " bne,pn %%icc, 1b\n"
8017@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8018 " .previous"
8019 : "=&r" (tmp1), "=&r" (tmp2)
8020 : "r" (lock)
8021- : "memory");
8022+ : "memory", "cc");
8023 }
8024
8025-static int inline arch_read_trylock(arch_rwlock_t *lock)
8026+static inline int arch_read_trylock(arch_rwlock_t *lock)
8027 {
8028 int tmp1, tmp2;
8029
8030@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8031 "1: ldsw [%2], %0\n"
8032 " brlz,a,pn %0, 2f\n"
8033 " mov 0, %0\n"
8034-" add %0, 1, %1\n"
8035+" addcc %0, 1, %1\n"
8036+
8037+#ifdef CONFIG_PAX_REFCOUNT
8038+" tvs %%icc, 6\n"
8039+#endif
8040+
8041 " cas [%2], %0, %1\n"
8042 " cmp %0, %1\n"
8043 " bne,pn %%icc, 1b\n"
8044@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8045 return tmp1;
8046 }
8047
8048-static void inline arch_read_unlock(arch_rwlock_t *lock)
8049+static inline void arch_read_unlock(arch_rwlock_t *lock)
8050 {
8051 unsigned long tmp1, tmp2;
8052
8053 __asm__ __volatile__(
8054 "1: lduw [%2], %0\n"
8055-" sub %0, 1, %1\n"
8056+" subcc %0, 1, %1\n"
8057+
8058+#ifdef CONFIG_PAX_REFCOUNT
8059+" tvs %%icc, 6\n"
8060+#endif
8061+
8062 " cas [%2], %0, %1\n"
8063 " cmp %0, %1\n"
8064 " bne,pn %%xcc, 1b\n"
8065@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8066 : "memory");
8067 }
8068
8069-static void inline arch_write_lock(arch_rwlock_t *lock)
8070+static inline void arch_write_lock(arch_rwlock_t *lock)
8071 {
8072 unsigned long mask, tmp1, tmp2;
8073
8074@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8075 : "memory");
8076 }
8077
8078-static void inline arch_write_unlock(arch_rwlock_t *lock)
8079+static inline void arch_write_unlock(arch_rwlock_t *lock)
8080 {
8081 __asm__ __volatile__(
8082 " stw %%g0, [%0]"
8083@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8084 : "memory");
8085 }
8086
8087-static int inline arch_write_trylock(arch_rwlock_t *lock)
8088+static inline int arch_write_trylock(arch_rwlock_t *lock)
8089 {
8090 unsigned long mask, tmp1, tmp2, result;
8091
8092diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8093index dd38075..e7cac83 100644
8094--- a/arch/sparc/include/asm/thread_info_32.h
8095+++ b/arch/sparc/include/asm/thread_info_32.h
8096@@ -49,6 +49,8 @@ struct thread_info {
8097 unsigned long w_saved;
8098
8099 struct restart_block restart_block;
8100+
8101+ unsigned long lowest_stack;
8102 };
8103
8104 /*
8105diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8106index d5e5042..9bfee76 100644
8107--- a/arch/sparc/include/asm/thread_info_64.h
8108+++ b/arch/sparc/include/asm/thread_info_64.h
8109@@ -63,6 +63,8 @@ struct thread_info {
8110 struct pt_regs *kern_una_regs;
8111 unsigned int kern_una_insn;
8112
8113+ unsigned long lowest_stack;
8114+
8115 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8116 };
8117
8118@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8119 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8120 /* flag bit 6 is available */
8121 #define TIF_32BIT 7 /* 32-bit binary */
8122-/* flag bit 8 is available */
8123+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8124 #define TIF_SECCOMP 9 /* secure computing */
8125 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8126 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8127+
8128 /* NOTE: Thread flags >= 12 should be ones we have no interest
8129 * in using in assembly, else we can't use the mask as
8130 * an immediate value in instructions such as andcc.
8131@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8132 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8133 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8134 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8135+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8136
8137 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8138 _TIF_DO_NOTIFY_RESUME_MASK | \
8139 _TIF_NEED_RESCHED)
8140 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8141
8142+#define _TIF_WORK_SYSCALL \
8143+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8144+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8145+
8146+
8147 /*
8148 * Thread-synchronous status.
8149 *
8150diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8151index 0167d26..767bb0c 100644
8152--- a/arch/sparc/include/asm/uaccess.h
8153+++ b/arch/sparc/include/asm/uaccess.h
8154@@ -1,5 +1,6 @@
8155 #ifndef ___ASM_SPARC_UACCESS_H
8156 #define ___ASM_SPARC_UACCESS_H
8157+
8158 #if defined(__sparc__) && defined(__arch64__)
8159 #include <asm/uaccess_64.h>
8160 #else
8161diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8162index 53a28dd..50c38c3 100644
8163--- a/arch/sparc/include/asm/uaccess_32.h
8164+++ b/arch/sparc/include/asm/uaccess_32.h
8165@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8166
8167 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8168 {
8169- if (n && __access_ok((unsigned long) to, n))
8170+ if ((long)n < 0)
8171+ return n;
8172+
8173+ if (n && __access_ok((unsigned long) to, n)) {
8174+ if (!__builtin_constant_p(n))
8175+ check_object_size(from, n, true);
8176 return __copy_user(to, (__force void __user *) from, n);
8177- else
8178+ } else
8179 return n;
8180 }
8181
8182 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8183 {
8184+ if ((long)n < 0)
8185+ return n;
8186+
8187+ if (!__builtin_constant_p(n))
8188+ check_object_size(from, n, true);
8189+
8190 return __copy_user(to, (__force void __user *) from, n);
8191 }
8192
8193 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8194 {
8195- if (n && __access_ok((unsigned long) from, n))
8196+ if ((long)n < 0)
8197+ return n;
8198+
8199+ if (n && __access_ok((unsigned long) from, n)) {
8200+ if (!__builtin_constant_p(n))
8201+ check_object_size(to, n, false);
8202 return __copy_user((__force void __user *) to, from, n);
8203- else
8204+ } else
8205 return n;
8206 }
8207
8208 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8209 {
8210+ if ((long)n < 0)
8211+ return n;
8212+
8213 return __copy_user((__force void __user *) to, from, n);
8214 }
8215
8216diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8217index e562d3c..191f176 100644
8218--- a/arch/sparc/include/asm/uaccess_64.h
8219+++ b/arch/sparc/include/asm/uaccess_64.h
8220@@ -10,6 +10,7 @@
8221 #include <linux/compiler.h>
8222 #include <linux/string.h>
8223 #include <linux/thread_info.h>
8224+#include <linux/kernel.h>
8225 #include <asm/asi.h>
8226 #include <asm/spitfire.h>
8227 #include <asm-generic/uaccess-unaligned.h>
8228@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8229 static inline unsigned long __must_check
8230 copy_from_user(void *to, const void __user *from, unsigned long size)
8231 {
8232- unsigned long ret = ___copy_from_user(to, from, size);
8233+ unsigned long ret;
8234
8235+ if ((long)size < 0 || size > INT_MAX)
8236+ return size;
8237+
8238+ if (!__builtin_constant_p(size))
8239+ check_object_size(to, size, false);
8240+
8241+ ret = ___copy_from_user(to, from, size);
8242 if (unlikely(ret))
8243 ret = copy_from_user_fixup(to, from, size);
8244
8245@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8246 static inline unsigned long __must_check
8247 copy_to_user(void __user *to, const void *from, unsigned long size)
8248 {
8249- unsigned long ret = ___copy_to_user(to, from, size);
8250+ unsigned long ret;
8251
8252+ if ((long)size < 0 || size > INT_MAX)
8253+ return size;
8254+
8255+ if (!__builtin_constant_p(size))
8256+ check_object_size(from, size, true);
8257+
8258+ ret = ___copy_to_user(to, from, size);
8259 if (unlikely(ret))
8260 ret = copy_to_user_fixup(to, from, size);
8261 return ret;
8262diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8263index d432fb2..6056af1 100644
8264--- a/arch/sparc/kernel/Makefile
8265+++ b/arch/sparc/kernel/Makefile
8266@@ -3,7 +3,7 @@
8267 #
8268
8269 asflags-y := -ansi
8270-ccflags-y := -Werror
8271+#ccflags-y := -Werror
8272
8273 extra-y := head_$(BITS).o
8274
8275diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
8276index 5ef48da..11d460f 100644
8277--- a/arch/sparc/kernel/ds.c
8278+++ b/arch/sparc/kernel/ds.c
8279@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
8280 char *base, *p;
8281 int msg_len, loops;
8282
8283+ if (strlen(var) + strlen(value) + 2 >
8284+ sizeof(pkt) - sizeof(pkt.header)) {
8285+ printk(KERN_ERR PFX
8286+ "contents length: %zu, which more than max: %lu,"
8287+ "so could not set (%s) variable to (%s).\n",
8288+ strlen(var) + strlen(value) + 2,
8289+ sizeof(pkt) - sizeof(pkt.header), var, value);
8290+ return;
8291+ }
8292+
8293 memset(&pkt, 0, sizeof(pkt));
8294 pkt.header.data.tag.type = DS_DATA;
8295 pkt.header.data.handle = cp->handle;
8296diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8297index fdd819d..5af08c8 100644
8298--- a/arch/sparc/kernel/process_32.c
8299+++ b/arch/sparc/kernel/process_32.c
8300@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
8301
8302 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8303 r->psr, r->pc, r->npc, r->y, print_tainted());
8304- printk("PC: <%pS>\n", (void *) r->pc);
8305+ printk("PC: <%pA>\n", (void *) r->pc);
8306 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8307 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8308 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8309 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8310 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8311 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8312- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8313+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8314
8315 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8316 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8317@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8318 rw = (struct reg_window32 *) fp;
8319 pc = rw->ins[7];
8320 printk("[%08lx : ", pc);
8321- printk("%pS ] ", (void *) pc);
8322+ printk("%pA ] ", (void *) pc);
8323 fp = rw->ins[6];
8324 } while (++count < 16);
8325 printk("\n");
8326diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8327index baebab2..9cd13b1 100644
8328--- a/arch/sparc/kernel/process_64.c
8329+++ b/arch/sparc/kernel/process_64.c
8330@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
8331 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8332 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8333 if (regs->tstate & TSTATE_PRIV)
8334- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8335+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8336 }
8337
8338 void show_regs(struct pt_regs *regs)
8339@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
8340
8341 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8342 regs->tpc, regs->tnpc, regs->y, print_tainted());
8343- printk("TPC: <%pS>\n", (void *) regs->tpc);
8344+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8345 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8346 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8347 regs->u_regs[3]);
8348@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
8349 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8350 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8351 regs->u_regs[15]);
8352- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8353+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8354 show_regwindow(regs);
8355 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8356 }
8357@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
8358 ((tp && tp->task) ? tp->task->pid : -1));
8359
8360 if (gp->tstate & TSTATE_PRIV) {
8361- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8362+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8363 (void *) gp->tpc,
8364 (void *) gp->o7,
8365 (void *) gp->i7,
8366diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8367index 79cc0d1..ec62734 100644
8368--- a/arch/sparc/kernel/prom_common.c
8369+++ b/arch/sparc/kernel/prom_common.c
8370@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8371
8372 unsigned int prom_early_allocated __initdata;
8373
8374-static struct of_pdt_ops prom_sparc_ops __initdata = {
8375+static struct of_pdt_ops prom_sparc_ops __initconst = {
8376 .nextprop = prom_common_nextprop,
8377 .getproplen = prom_getproplen,
8378 .getproperty = prom_getproperty,
8379diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8380index 7ff45e4..a58f271 100644
8381--- a/arch/sparc/kernel/ptrace_64.c
8382+++ b/arch/sparc/kernel/ptrace_64.c
8383@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8384 return ret;
8385 }
8386
8387+#ifdef CONFIG_GRKERNSEC_SETXID
8388+extern void gr_delayed_cred_worker(void);
8389+#endif
8390+
8391 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8392 {
8393 int ret = 0;
8394@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8395 /* do the secure computing check first */
8396 secure_computing_strict(regs->u_regs[UREG_G1]);
8397
8398+#ifdef CONFIG_GRKERNSEC_SETXID
8399+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8400+ gr_delayed_cred_worker();
8401+#endif
8402+
8403 if (test_thread_flag(TIF_SYSCALL_TRACE))
8404 ret = tracehook_report_syscall_entry(regs);
8405
8406@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8407
8408 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8409 {
8410+#ifdef CONFIG_GRKERNSEC_SETXID
8411+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8412+ gr_delayed_cred_worker();
8413+#endif
8414+
8415 audit_syscall_exit(regs);
8416
8417 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8418diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8419index 3a8d184..49498a8 100644
8420--- a/arch/sparc/kernel/sys_sparc_32.c
8421+++ b/arch/sparc/kernel/sys_sparc_32.c
8422@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8423 if (len > TASK_SIZE - PAGE_SIZE)
8424 return -ENOMEM;
8425 if (!addr)
8426- addr = TASK_UNMAPPED_BASE;
8427+ addr = current->mm->mmap_base;
8428
8429 info.flags = 0;
8430 info.length = len;
8431diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8432index 2daaaa6..d29bb82 100644
8433--- a/arch/sparc/kernel/sys_sparc_64.c
8434+++ b/arch/sparc/kernel/sys_sparc_64.c
8435@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8436 struct vm_area_struct * vma;
8437 unsigned long task_size = TASK_SIZE;
8438 int do_color_align;
8439+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8440 struct vm_unmapped_area_info info;
8441
8442 if (flags & MAP_FIXED) {
8443 /* We do not accept a shared mapping if it would violate
8444 * cache aliasing constraints.
8445 */
8446- if ((flags & MAP_SHARED) &&
8447+ if ((filp || (flags & MAP_SHARED)) &&
8448 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8449 return -EINVAL;
8450 return addr;
8451@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8452 if (filp || (flags & MAP_SHARED))
8453 do_color_align = 1;
8454
8455+#ifdef CONFIG_PAX_RANDMMAP
8456+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8457+#endif
8458+
8459 if (addr) {
8460 if (do_color_align)
8461 addr = COLOR_ALIGN(addr, pgoff);
8462@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8463 addr = PAGE_ALIGN(addr);
8464
8465 vma = find_vma(mm, addr);
8466- if (task_size - len >= addr &&
8467- (!vma || addr + len <= vma->vm_start))
8468+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8469 return addr;
8470 }
8471
8472 info.flags = 0;
8473 info.length = len;
8474- info.low_limit = TASK_UNMAPPED_BASE;
8475+ info.low_limit = mm->mmap_base;
8476 info.high_limit = min(task_size, VA_EXCLUDE_START);
8477 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8478 info.align_offset = pgoff << PAGE_SHIFT;
8479+ info.threadstack_offset = offset;
8480 addr = vm_unmapped_area(&info);
8481
8482 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8483 VM_BUG_ON(addr != -ENOMEM);
8484 info.low_limit = VA_EXCLUDE_END;
8485+
8486+#ifdef CONFIG_PAX_RANDMMAP
8487+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8488+ info.low_limit += mm->delta_mmap;
8489+#endif
8490+
8491 info.high_limit = task_size;
8492 addr = vm_unmapped_area(&info);
8493 }
8494@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8495 unsigned long task_size = STACK_TOP32;
8496 unsigned long addr = addr0;
8497 int do_color_align;
8498+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8499 struct vm_unmapped_area_info info;
8500
8501 /* This should only ever run for 32-bit processes. */
8502@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8503 /* We do not accept a shared mapping if it would violate
8504 * cache aliasing constraints.
8505 */
8506- if ((flags & MAP_SHARED) &&
8507+ if ((filp || (flags & MAP_SHARED)) &&
8508 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8509 return -EINVAL;
8510 return addr;
8511@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8512 if (filp || (flags & MAP_SHARED))
8513 do_color_align = 1;
8514
8515+#ifdef CONFIG_PAX_RANDMMAP
8516+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8517+#endif
8518+
8519 /* requesting a specific address */
8520 if (addr) {
8521 if (do_color_align)
8522@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8523 addr = PAGE_ALIGN(addr);
8524
8525 vma = find_vma(mm, addr);
8526- if (task_size - len >= addr &&
8527- (!vma || addr + len <= vma->vm_start))
8528+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8529 return addr;
8530 }
8531
8532@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8533 info.high_limit = mm->mmap_base;
8534 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8535 info.align_offset = pgoff << PAGE_SHIFT;
8536+ info.threadstack_offset = offset;
8537 addr = vm_unmapped_area(&info);
8538
8539 /*
8540@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8541 VM_BUG_ON(addr != -ENOMEM);
8542 info.flags = 0;
8543 info.low_limit = TASK_UNMAPPED_BASE;
8544+
8545+#ifdef CONFIG_PAX_RANDMMAP
8546+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8547+ info.low_limit += mm->delta_mmap;
8548+#endif
8549+
8550 info.high_limit = STACK_TOP32;
8551 addr = vm_unmapped_area(&info);
8552 }
8553@@ -264,6 +286,10 @@ static unsigned long mmap_rnd(void)
8554 {
8555 unsigned long rnd = 0UL;
8556
8557+#ifdef CONFIG_PAX_RANDMMAP
8558+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8559+#endif
8560+
8561 if (current->flags & PF_RANDOMIZE) {
8562 unsigned long val = get_random_int();
8563 if (test_thread_flag(TIF_32BIT))
8564@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8565 gap == RLIM_INFINITY ||
8566 sysctl_legacy_va_layout) {
8567 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8568+
8569+#ifdef CONFIG_PAX_RANDMMAP
8570+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8571+ mm->mmap_base += mm->delta_mmap;
8572+#endif
8573+
8574 mm->get_unmapped_area = arch_get_unmapped_area;
8575 mm->unmap_area = arch_unmap_area;
8576 } else {
8577@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8578 gap = (task_size / 6 * 5);
8579
8580 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8581+
8582+#ifdef CONFIG_PAX_RANDMMAP
8583+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8584+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8585+#endif
8586+
8587 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8588 mm->unmap_area = arch_unmap_area_topdown;
8589 }
8590diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8591index 22a1098..6255eb9 100644
8592--- a/arch/sparc/kernel/syscalls.S
8593+++ b/arch/sparc/kernel/syscalls.S
8594@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
8595 #endif
8596 .align 32
8597 1: ldx [%g6 + TI_FLAGS], %l5
8598- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8599+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8600 be,pt %icc, rtrap
8601 nop
8602 call syscall_trace_leave
8603@@ -184,7 +184,7 @@ linux_sparc_syscall32:
8604
8605 srl %i5, 0, %o5 ! IEU1
8606 srl %i2, 0, %o2 ! IEU0 Group
8607- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8608+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8609 bne,pn %icc, linux_syscall_trace32 ! CTI
8610 mov %i0, %l5 ! IEU1
8611 call %l7 ! CTI Group brk forced
8612@@ -207,7 +207,7 @@ linux_sparc_syscall:
8613
8614 mov %i3, %o3 ! IEU1
8615 mov %i4, %o4 ! IEU0 Group
8616- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8617+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8618 bne,pn %icc, linux_syscall_trace ! CTI Group
8619 mov %i0, %l5 ! IEU0
8620 2: call %l7 ! CTI Group brk forced
8621@@ -223,7 +223,7 @@ ret_sys_call:
8622
8623 cmp %o0, -ERESTART_RESTARTBLOCK
8624 bgeu,pn %xcc, 1f
8625- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8626+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8627 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8628
8629 2:
8630diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8631index 654e8aa..45f431b 100644
8632--- a/arch/sparc/kernel/sysfs.c
8633+++ b/arch/sparc/kernel/sysfs.c
8634@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8635 return NOTIFY_OK;
8636 }
8637
8638-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8639+static struct notifier_block sysfs_cpu_nb = {
8640 .notifier_call = sysfs_cpu_notify,
8641 };
8642
8643diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8644index 6629829..036032d 100644
8645--- a/arch/sparc/kernel/traps_32.c
8646+++ b/arch/sparc/kernel/traps_32.c
8647@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8648 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8649 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8650
8651+extern void gr_handle_kernel_exploit(void);
8652+
8653 void die_if_kernel(char *str, struct pt_regs *regs)
8654 {
8655 static int die_counter;
8656@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8657 count++ < 30 &&
8658 (((unsigned long) rw) >= PAGE_OFFSET) &&
8659 !(((unsigned long) rw) & 0x7)) {
8660- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8661+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8662 (void *) rw->ins[7]);
8663 rw = (struct reg_window32 *)rw->ins[6];
8664 }
8665 }
8666 printk("Instruction DUMP:");
8667 instruction_dump ((unsigned long *) regs->pc);
8668- if(regs->psr & PSR_PS)
8669+ if(regs->psr & PSR_PS) {
8670+ gr_handle_kernel_exploit();
8671 do_exit(SIGKILL);
8672+ }
8673 do_exit(SIGSEGV);
8674 }
8675
8676diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8677index b3f833a..ac74b2d 100644
8678--- a/arch/sparc/kernel/traps_64.c
8679+++ b/arch/sparc/kernel/traps_64.c
8680@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8681 i + 1,
8682 p->trapstack[i].tstate, p->trapstack[i].tpc,
8683 p->trapstack[i].tnpc, p->trapstack[i].tt);
8684- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8685+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8686 }
8687 }
8688
8689@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8690
8691 lvl -= 0x100;
8692 if (regs->tstate & TSTATE_PRIV) {
8693+
8694+#ifdef CONFIG_PAX_REFCOUNT
8695+ if (lvl == 6)
8696+ pax_report_refcount_overflow(regs);
8697+#endif
8698+
8699 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8700 die_if_kernel(buffer, regs);
8701 }
8702@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8703 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8704 {
8705 char buffer[32];
8706-
8707+
8708 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8709 0, lvl, SIGTRAP) == NOTIFY_STOP)
8710 return;
8711
8712+#ifdef CONFIG_PAX_REFCOUNT
8713+ if (lvl == 6)
8714+ pax_report_refcount_overflow(regs);
8715+#endif
8716+
8717 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8718
8719 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8720@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8721 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8722 printk("%s" "ERROR(%d): ",
8723 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8724- printk("TPC<%pS>\n", (void *) regs->tpc);
8725+ printk("TPC<%pA>\n", (void *) regs->tpc);
8726 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8727 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8728 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8729@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8730 smp_processor_id(),
8731 (type & 0x1) ? 'I' : 'D',
8732 regs->tpc);
8733- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8734+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8735 panic("Irrecoverable Cheetah+ parity error.");
8736 }
8737
8738@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8739 smp_processor_id(),
8740 (type & 0x1) ? 'I' : 'D',
8741 regs->tpc);
8742- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8743+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8744 }
8745
8746 struct sun4v_error_entry {
8747@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8748
8749 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8750 regs->tpc, tl);
8751- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8752+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8753 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8754- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8755+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8756 (void *) regs->u_regs[UREG_I7]);
8757 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8758 "pte[%lx] error[%lx]\n",
8759@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8760
8761 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8762 regs->tpc, tl);
8763- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8764+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8765 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8766- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8767+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8768 (void *) regs->u_regs[UREG_I7]);
8769 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8770 "pte[%lx] error[%lx]\n",
8771@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8772 fp = (unsigned long)sf->fp + STACK_BIAS;
8773 }
8774
8775- printk(" [%016lx] %pS\n", pc, (void *) pc);
8776+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8777 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8778 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8779 int index = tsk->curr_ret_stack;
8780 if (tsk->ret_stack && index >= graph) {
8781 pc = tsk->ret_stack[index - graph].ret;
8782- printk(" [%016lx] %pS\n", pc, (void *) pc);
8783+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8784 graph++;
8785 }
8786 }
8787@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8788 return (struct reg_window *) (fp + STACK_BIAS);
8789 }
8790
8791+extern void gr_handle_kernel_exploit(void);
8792+
8793 void die_if_kernel(char *str, struct pt_regs *regs)
8794 {
8795 static int die_counter;
8796@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8797 while (rw &&
8798 count++ < 30 &&
8799 kstack_valid(tp, (unsigned long) rw)) {
8800- printk("Caller[%016lx]: %pS\n", rw->ins[7],
8801+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
8802 (void *) rw->ins[7]);
8803
8804 rw = kernel_stack_up(rw);
8805@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8806 }
8807 user_instruction_dump ((unsigned int __user *) regs->tpc);
8808 }
8809- if (regs->tstate & TSTATE_PRIV)
8810+ if (regs->tstate & TSTATE_PRIV) {
8811+ gr_handle_kernel_exploit();
8812 do_exit(SIGKILL);
8813+ }
8814 do_exit(SIGSEGV);
8815 }
8816 EXPORT_SYMBOL(die_if_kernel);
8817diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8818index 8201c25e..072a2a7 100644
8819--- a/arch/sparc/kernel/unaligned_64.c
8820+++ b/arch/sparc/kernel/unaligned_64.c
8821@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8822 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8823
8824 if (__ratelimit(&ratelimit)) {
8825- printk("Kernel unaligned access at TPC[%lx] %pS\n",
8826+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
8827 regs->tpc, (void *) regs->tpc);
8828 }
8829 }
8830diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8831index dbe119b..089c7c1 100644
8832--- a/arch/sparc/lib/Makefile
8833+++ b/arch/sparc/lib/Makefile
8834@@ -2,7 +2,7 @@
8835 #
8836
8837 asflags-y := -ansi -DST_DIV0=0x02
8838-ccflags-y := -Werror
8839+#ccflags-y := -Werror
8840
8841 lib-$(CONFIG_SPARC32) += ashrdi3.o
8842 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8843diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8844index 85c233d..68500e0 100644
8845--- a/arch/sparc/lib/atomic_64.S
8846+++ b/arch/sparc/lib/atomic_64.S
8847@@ -17,7 +17,12 @@
8848 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8849 BACKOFF_SETUP(%o2)
8850 1: lduw [%o1], %g1
8851- add %g1, %o0, %g7
8852+ addcc %g1, %o0, %g7
8853+
8854+#ifdef CONFIG_PAX_REFCOUNT
8855+ tvs %icc, 6
8856+#endif
8857+
8858 cas [%o1], %g1, %g7
8859 cmp %g1, %g7
8860 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8861@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8862 2: BACKOFF_SPIN(%o2, %o3, 1b)
8863 ENDPROC(atomic_add)
8864
8865+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8866+ BACKOFF_SETUP(%o2)
8867+1: lduw [%o1], %g1
8868+ add %g1, %o0, %g7
8869+ cas [%o1], %g1, %g7
8870+ cmp %g1, %g7
8871+ bne,pn %icc, 2f
8872+ nop
8873+ retl
8874+ nop
8875+2: BACKOFF_SPIN(%o2, %o3, 1b)
8876+ENDPROC(atomic_add_unchecked)
8877+
8878 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8879 BACKOFF_SETUP(%o2)
8880 1: lduw [%o1], %g1
8881- sub %g1, %o0, %g7
8882+ subcc %g1, %o0, %g7
8883+
8884+#ifdef CONFIG_PAX_REFCOUNT
8885+ tvs %icc, 6
8886+#endif
8887+
8888 cas [%o1], %g1, %g7
8889 cmp %g1, %g7
8890 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8891@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8892 2: BACKOFF_SPIN(%o2, %o3, 1b)
8893 ENDPROC(atomic_sub)
8894
8895+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8896+ BACKOFF_SETUP(%o2)
8897+1: lduw [%o1], %g1
8898+ sub %g1, %o0, %g7
8899+ cas [%o1], %g1, %g7
8900+ cmp %g1, %g7
8901+ bne,pn %icc, 2f
8902+ nop
8903+ retl
8904+ nop
8905+2: BACKOFF_SPIN(%o2, %o3, 1b)
8906+ENDPROC(atomic_sub_unchecked)
8907+
8908 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8909 BACKOFF_SETUP(%o2)
8910 1: lduw [%o1], %g1
8911- add %g1, %o0, %g7
8912+ addcc %g1, %o0, %g7
8913+
8914+#ifdef CONFIG_PAX_REFCOUNT
8915+ tvs %icc, 6
8916+#endif
8917+
8918 cas [%o1], %g1, %g7
8919 cmp %g1, %g7
8920 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8921@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8922 2: BACKOFF_SPIN(%o2, %o3, 1b)
8923 ENDPROC(atomic_add_ret)
8924
8925+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8926+ BACKOFF_SETUP(%o2)
8927+1: lduw [%o1], %g1
8928+ addcc %g1, %o0, %g7
8929+ cas [%o1], %g1, %g7
8930+ cmp %g1, %g7
8931+ bne,pn %icc, 2f
8932+ add %g7, %o0, %g7
8933+ sra %g7, 0, %o0
8934+ retl
8935+ nop
8936+2: BACKOFF_SPIN(%o2, %o3, 1b)
8937+ENDPROC(atomic_add_ret_unchecked)
8938+
8939 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8940 BACKOFF_SETUP(%o2)
8941 1: lduw [%o1], %g1
8942- sub %g1, %o0, %g7
8943+ subcc %g1, %o0, %g7
8944+
8945+#ifdef CONFIG_PAX_REFCOUNT
8946+ tvs %icc, 6
8947+#endif
8948+
8949 cas [%o1], %g1, %g7
8950 cmp %g1, %g7
8951 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8952@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8953 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8954 BACKOFF_SETUP(%o2)
8955 1: ldx [%o1], %g1
8956- add %g1, %o0, %g7
8957+ addcc %g1, %o0, %g7
8958+
8959+#ifdef CONFIG_PAX_REFCOUNT
8960+ tvs %xcc, 6
8961+#endif
8962+
8963 casx [%o1], %g1, %g7
8964 cmp %g1, %g7
8965 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8966@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8967 2: BACKOFF_SPIN(%o2, %o3, 1b)
8968 ENDPROC(atomic64_add)
8969
8970+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8971+ BACKOFF_SETUP(%o2)
8972+1: ldx [%o1], %g1
8973+ addcc %g1, %o0, %g7
8974+ casx [%o1], %g1, %g7
8975+ cmp %g1, %g7
8976+ bne,pn %xcc, 2f
8977+ nop
8978+ retl
8979+ nop
8980+2: BACKOFF_SPIN(%o2, %o3, 1b)
8981+ENDPROC(atomic64_add_unchecked)
8982+
8983 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8984 BACKOFF_SETUP(%o2)
8985 1: ldx [%o1], %g1
8986- sub %g1, %o0, %g7
8987+ subcc %g1, %o0, %g7
8988+
8989+#ifdef CONFIG_PAX_REFCOUNT
8990+ tvs %xcc, 6
8991+#endif
8992+
8993 casx [%o1], %g1, %g7
8994 cmp %g1, %g7
8995 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8996@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8997 2: BACKOFF_SPIN(%o2, %o3, 1b)
8998 ENDPROC(atomic64_sub)
8999
9000+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9001+ BACKOFF_SETUP(%o2)
9002+1: ldx [%o1], %g1
9003+ subcc %g1, %o0, %g7
9004+ casx [%o1], %g1, %g7
9005+ cmp %g1, %g7
9006+ bne,pn %xcc, 2f
9007+ nop
9008+ retl
9009+ nop
9010+2: BACKOFF_SPIN(%o2, %o3, 1b)
9011+ENDPROC(atomic64_sub_unchecked)
9012+
9013 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9014 BACKOFF_SETUP(%o2)
9015 1: ldx [%o1], %g1
9016- add %g1, %o0, %g7
9017+ addcc %g1, %o0, %g7
9018+
9019+#ifdef CONFIG_PAX_REFCOUNT
9020+ tvs %xcc, 6
9021+#endif
9022+
9023 casx [%o1], %g1, %g7
9024 cmp %g1, %g7
9025 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9026@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9027 2: BACKOFF_SPIN(%o2, %o3, 1b)
9028 ENDPROC(atomic64_add_ret)
9029
9030+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9031+ BACKOFF_SETUP(%o2)
9032+1: ldx [%o1], %g1
9033+ addcc %g1, %o0, %g7
9034+ casx [%o1], %g1, %g7
9035+ cmp %g1, %g7
9036+ bne,pn %xcc, 2f
9037+ add %g7, %o0, %g7
9038+ mov %g7, %o0
9039+ retl
9040+ nop
9041+2: BACKOFF_SPIN(%o2, %o3, 1b)
9042+ENDPROC(atomic64_add_ret_unchecked)
9043+
9044 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9045 BACKOFF_SETUP(%o2)
9046 1: ldx [%o1], %g1
9047- sub %g1, %o0, %g7
9048+ subcc %g1, %o0, %g7
9049+
9050+#ifdef CONFIG_PAX_REFCOUNT
9051+ tvs %xcc, 6
9052+#endif
9053+
9054 casx [%o1], %g1, %g7
9055 cmp %g1, %g7
9056 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9057diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9058index 0c4e35e..745d3e4 100644
9059--- a/arch/sparc/lib/ksyms.c
9060+++ b/arch/sparc/lib/ksyms.c
9061@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9062
9063 /* Atomic counter implementation. */
9064 EXPORT_SYMBOL(atomic_add);
9065+EXPORT_SYMBOL(atomic_add_unchecked);
9066 EXPORT_SYMBOL(atomic_add_ret);
9067+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9068 EXPORT_SYMBOL(atomic_sub);
9069+EXPORT_SYMBOL(atomic_sub_unchecked);
9070 EXPORT_SYMBOL(atomic_sub_ret);
9071 EXPORT_SYMBOL(atomic64_add);
9072+EXPORT_SYMBOL(atomic64_add_unchecked);
9073 EXPORT_SYMBOL(atomic64_add_ret);
9074+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9075 EXPORT_SYMBOL(atomic64_sub);
9076+EXPORT_SYMBOL(atomic64_sub_unchecked);
9077 EXPORT_SYMBOL(atomic64_sub_ret);
9078 EXPORT_SYMBOL(atomic64_dec_if_positive);
9079
9080diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9081index 30c3ecc..736f015 100644
9082--- a/arch/sparc/mm/Makefile
9083+++ b/arch/sparc/mm/Makefile
9084@@ -2,7 +2,7 @@
9085 #
9086
9087 asflags-y := -ansi
9088-ccflags-y := -Werror
9089+#ccflags-y := -Werror
9090
9091 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9092 obj-y += fault_$(BITS).o
9093diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9094index e98bfda..ea8d221 100644
9095--- a/arch/sparc/mm/fault_32.c
9096+++ b/arch/sparc/mm/fault_32.c
9097@@ -21,6 +21,9 @@
9098 #include <linux/perf_event.h>
9099 #include <linux/interrupt.h>
9100 #include <linux/kdebug.h>
9101+#include <linux/slab.h>
9102+#include <linux/pagemap.h>
9103+#include <linux/compiler.h>
9104
9105 #include <asm/page.h>
9106 #include <asm/pgtable.h>
9107@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9108 return safe_compute_effective_address(regs, insn);
9109 }
9110
9111+#ifdef CONFIG_PAX_PAGEEXEC
9112+#ifdef CONFIG_PAX_DLRESOLVE
9113+static void pax_emuplt_close(struct vm_area_struct *vma)
9114+{
9115+ vma->vm_mm->call_dl_resolve = 0UL;
9116+}
9117+
9118+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9119+{
9120+ unsigned int *kaddr;
9121+
9122+ vmf->page = alloc_page(GFP_HIGHUSER);
9123+ if (!vmf->page)
9124+ return VM_FAULT_OOM;
9125+
9126+ kaddr = kmap(vmf->page);
9127+ memset(kaddr, 0, PAGE_SIZE);
9128+ kaddr[0] = 0x9DE3BFA8U; /* save */
9129+ flush_dcache_page(vmf->page);
9130+ kunmap(vmf->page);
9131+ return VM_FAULT_MAJOR;
9132+}
9133+
9134+static const struct vm_operations_struct pax_vm_ops = {
9135+ .close = pax_emuplt_close,
9136+ .fault = pax_emuplt_fault
9137+};
9138+
9139+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9140+{
9141+ int ret;
9142+
9143+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9144+ vma->vm_mm = current->mm;
9145+ vma->vm_start = addr;
9146+ vma->vm_end = addr + PAGE_SIZE;
9147+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9148+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9149+ vma->vm_ops = &pax_vm_ops;
9150+
9151+ ret = insert_vm_struct(current->mm, vma);
9152+ if (ret)
9153+ return ret;
9154+
9155+ ++current->mm->total_vm;
9156+ return 0;
9157+}
9158+#endif
9159+
9160+/*
9161+ * PaX: decide what to do with offenders (regs->pc = fault address)
9162+ *
9163+ * returns 1 when task should be killed
9164+ * 2 when patched PLT trampoline was detected
9165+ * 3 when unpatched PLT trampoline was detected
9166+ */
9167+static int pax_handle_fetch_fault(struct pt_regs *regs)
9168+{
9169+
9170+#ifdef CONFIG_PAX_EMUPLT
9171+ int err;
9172+
9173+ do { /* PaX: patched PLT emulation #1 */
9174+ unsigned int sethi1, sethi2, jmpl;
9175+
9176+ err = get_user(sethi1, (unsigned int *)regs->pc);
9177+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9178+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9179+
9180+ if (err)
9181+ break;
9182+
9183+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9184+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9185+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9186+ {
9187+ unsigned int addr;
9188+
9189+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9190+ addr = regs->u_regs[UREG_G1];
9191+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9192+ regs->pc = addr;
9193+ regs->npc = addr+4;
9194+ return 2;
9195+ }
9196+ } while (0);
9197+
9198+ do { /* PaX: patched PLT emulation #2 */
9199+ unsigned int ba;
9200+
9201+ err = get_user(ba, (unsigned int *)regs->pc);
9202+
9203+ if (err)
9204+ break;
9205+
9206+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9207+ unsigned int addr;
9208+
9209+ if ((ba & 0xFFC00000U) == 0x30800000U)
9210+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9211+ else
9212+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9213+ regs->pc = addr;
9214+ regs->npc = addr+4;
9215+ return 2;
9216+ }
9217+ } while (0);
9218+
9219+ do { /* PaX: patched PLT emulation #3 */
9220+ unsigned int sethi, bajmpl, nop;
9221+
9222+ err = get_user(sethi, (unsigned int *)regs->pc);
9223+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9224+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9225+
9226+ if (err)
9227+ break;
9228+
9229+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9230+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9231+ nop == 0x01000000U)
9232+ {
9233+ unsigned int addr;
9234+
9235+ addr = (sethi & 0x003FFFFFU) << 10;
9236+ regs->u_regs[UREG_G1] = addr;
9237+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9238+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9239+ else
9240+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9241+ regs->pc = addr;
9242+ regs->npc = addr+4;
9243+ return 2;
9244+ }
9245+ } while (0);
9246+
9247+ do { /* PaX: unpatched PLT emulation step 1 */
9248+ unsigned int sethi, ba, nop;
9249+
9250+ err = get_user(sethi, (unsigned int *)regs->pc);
9251+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9252+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9253+
9254+ if (err)
9255+ break;
9256+
9257+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9258+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9259+ nop == 0x01000000U)
9260+ {
9261+ unsigned int addr, save, call;
9262+
9263+ if ((ba & 0xFFC00000U) == 0x30800000U)
9264+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9265+ else
9266+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9267+
9268+ err = get_user(save, (unsigned int *)addr);
9269+ err |= get_user(call, (unsigned int *)(addr+4));
9270+ err |= get_user(nop, (unsigned int *)(addr+8));
9271+ if (err)
9272+ break;
9273+
9274+#ifdef CONFIG_PAX_DLRESOLVE
9275+ if (save == 0x9DE3BFA8U &&
9276+ (call & 0xC0000000U) == 0x40000000U &&
9277+ nop == 0x01000000U)
9278+ {
9279+ struct vm_area_struct *vma;
9280+ unsigned long call_dl_resolve;
9281+
9282+ down_read(&current->mm->mmap_sem);
9283+ call_dl_resolve = current->mm->call_dl_resolve;
9284+ up_read(&current->mm->mmap_sem);
9285+ if (likely(call_dl_resolve))
9286+ goto emulate;
9287+
9288+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9289+
9290+ down_write(&current->mm->mmap_sem);
9291+ if (current->mm->call_dl_resolve) {
9292+ call_dl_resolve = current->mm->call_dl_resolve;
9293+ up_write(&current->mm->mmap_sem);
9294+ if (vma)
9295+ kmem_cache_free(vm_area_cachep, vma);
9296+ goto emulate;
9297+ }
9298+
9299+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9300+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9301+ up_write(&current->mm->mmap_sem);
9302+ if (vma)
9303+ kmem_cache_free(vm_area_cachep, vma);
9304+ return 1;
9305+ }
9306+
9307+ if (pax_insert_vma(vma, call_dl_resolve)) {
9308+ up_write(&current->mm->mmap_sem);
9309+ kmem_cache_free(vm_area_cachep, vma);
9310+ return 1;
9311+ }
9312+
9313+ current->mm->call_dl_resolve = call_dl_resolve;
9314+ up_write(&current->mm->mmap_sem);
9315+
9316+emulate:
9317+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9318+ regs->pc = call_dl_resolve;
9319+ regs->npc = addr+4;
9320+ return 3;
9321+ }
9322+#endif
9323+
9324+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9325+ if ((save & 0xFFC00000U) == 0x05000000U &&
9326+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9327+ nop == 0x01000000U)
9328+ {
9329+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9330+ regs->u_regs[UREG_G2] = addr + 4;
9331+ addr = (save & 0x003FFFFFU) << 10;
9332+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9333+ regs->pc = addr;
9334+ regs->npc = addr+4;
9335+ return 3;
9336+ }
9337+ }
9338+ } while (0);
9339+
9340+ do { /* PaX: unpatched PLT emulation step 2 */
9341+ unsigned int save, call, nop;
9342+
9343+ err = get_user(save, (unsigned int *)(regs->pc-4));
9344+ err |= get_user(call, (unsigned int *)regs->pc);
9345+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9346+ if (err)
9347+ break;
9348+
9349+ if (save == 0x9DE3BFA8U &&
9350+ (call & 0xC0000000U) == 0x40000000U &&
9351+ nop == 0x01000000U)
9352+ {
9353+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9354+
9355+ regs->u_regs[UREG_RETPC] = regs->pc;
9356+ regs->pc = dl_resolve;
9357+ regs->npc = dl_resolve+4;
9358+ return 3;
9359+ }
9360+ } while (0);
9361+#endif
9362+
9363+ return 1;
9364+}
9365+
9366+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9367+{
9368+ unsigned long i;
9369+
9370+ printk(KERN_ERR "PAX: bytes at PC: ");
9371+ for (i = 0; i < 8; i++) {
9372+ unsigned int c;
9373+ if (get_user(c, (unsigned int *)pc+i))
9374+ printk(KERN_CONT "???????? ");
9375+ else
9376+ printk(KERN_CONT "%08x ", c);
9377+ }
9378+ printk("\n");
9379+}
9380+#endif
9381+
9382 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9383 int text_fault)
9384 {
9385@@ -230,6 +504,24 @@ good_area:
9386 if (!(vma->vm_flags & VM_WRITE))
9387 goto bad_area;
9388 } else {
9389+
9390+#ifdef CONFIG_PAX_PAGEEXEC
9391+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9392+ up_read(&mm->mmap_sem);
9393+ switch (pax_handle_fetch_fault(regs)) {
9394+
9395+#ifdef CONFIG_PAX_EMUPLT
9396+ case 2:
9397+ case 3:
9398+ return;
9399+#endif
9400+
9401+ }
9402+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9403+ do_group_exit(SIGKILL);
9404+ }
9405+#endif
9406+
9407 /* Allow reads even for write-only mappings */
9408 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9409 goto bad_area;
9410diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9411index 5062ff3..e0b75f3 100644
9412--- a/arch/sparc/mm/fault_64.c
9413+++ b/arch/sparc/mm/fault_64.c
9414@@ -21,6 +21,9 @@
9415 #include <linux/kprobes.h>
9416 #include <linux/kdebug.h>
9417 #include <linux/percpu.h>
9418+#include <linux/slab.h>
9419+#include <linux/pagemap.h>
9420+#include <linux/compiler.h>
9421
9422 #include <asm/page.h>
9423 #include <asm/pgtable.h>
9424@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9425 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9426 regs->tpc);
9427 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9428- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9429+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9430 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9431 dump_stack();
9432 unhandled_fault(regs->tpc, current, regs);
9433@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9434 show_regs(regs);
9435 }
9436
9437+#ifdef CONFIG_PAX_PAGEEXEC
9438+#ifdef CONFIG_PAX_DLRESOLVE
9439+static void pax_emuplt_close(struct vm_area_struct *vma)
9440+{
9441+ vma->vm_mm->call_dl_resolve = 0UL;
9442+}
9443+
9444+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9445+{
9446+ unsigned int *kaddr;
9447+
9448+ vmf->page = alloc_page(GFP_HIGHUSER);
9449+ if (!vmf->page)
9450+ return VM_FAULT_OOM;
9451+
9452+ kaddr = kmap(vmf->page);
9453+ memset(kaddr, 0, PAGE_SIZE);
9454+ kaddr[0] = 0x9DE3BFA8U; /* save */
9455+ flush_dcache_page(vmf->page);
9456+ kunmap(vmf->page);
9457+ return VM_FAULT_MAJOR;
9458+}
9459+
9460+static const struct vm_operations_struct pax_vm_ops = {
9461+ .close = pax_emuplt_close,
9462+ .fault = pax_emuplt_fault
9463+};
9464+
9465+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9466+{
9467+ int ret;
9468+
9469+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9470+ vma->vm_mm = current->mm;
9471+ vma->vm_start = addr;
9472+ vma->vm_end = addr + PAGE_SIZE;
9473+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9474+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9475+ vma->vm_ops = &pax_vm_ops;
9476+
9477+ ret = insert_vm_struct(current->mm, vma);
9478+ if (ret)
9479+ return ret;
9480+
9481+ ++current->mm->total_vm;
9482+ return 0;
9483+}
9484+#endif
9485+
9486+/*
9487+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9488+ *
9489+ * returns 1 when task should be killed
9490+ * 2 when patched PLT trampoline was detected
9491+ * 3 when unpatched PLT trampoline was detected
9492+ */
9493+static int pax_handle_fetch_fault(struct pt_regs *regs)
9494+{
9495+
9496+#ifdef CONFIG_PAX_EMUPLT
9497+ int err;
9498+
9499+ do { /* PaX: patched PLT emulation #1 */
9500+ unsigned int sethi1, sethi2, jmpl;
9501+
9502+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9503+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9504+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9505+
9506+ if (err)
9507+ break;
9508+
9509+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9510+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9511+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9512+ {
9513+ unsigned long addr;
9514+
9515+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9516+ addr = regs->u_regs[UREG_G1];
9517+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9518+
9519+ if (test_thread_flag(TIF_32BIT))
9520+ addr &= 0xFFFFFFFFUL;
9521+
9522+ regs->tpc = addr;
9523+ regs->tnpc = addr+4;
9524+ return 2;
9525+ }
9526+ } while (0);
9527+
9528+ do { /* PaX: patched PLT emulation #2 */
9529+ unsigned int ba;
9530+
9531+ err = get_user(ba, (unsigned int *)regs->tpc);
9532+
9533+ if (err)
9534+ break;
9535+
9536+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9537+ unsigned long addr;
9538+
9539+ if ((ba & 0xFFC00000U) == 0x30800000U)
9540+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9541+ else
9542+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9543+
9544+ if (test_thread_flag(TIF_32BIT))
9545+ addr &= 0xFFFFFFFFUL;
9546+
9547+ regs->tpc = addr;
9548+ regs->tnpc = addr+4;
9549+ return 2;
9550+ }
9551+ } while (0);
9552+
9553+ do { /* PaX: patched PLT emulation #3 */
9554+ unsigned int sethi, bajmpl, nop;
9555+
9556+ err = get_user(sethi, (unsigned int *)regs->tpc);
9557+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9558+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9559+
9560+ if (err)
9561+ break;
9562+
9563+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9564+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9565+ nop == 0x01000000U)
9566+ {
9567+ unsigned long addr;
9568+
9569+ addr = (sethi & 0x003FFFFFU) << 10;
9570+ regs->u_regs[UREG_G1] = addr;
9571+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9572+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9573+ else
9574+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9575+
9576+ if (test_thread_flag(TIF_32BIT))
9577+ addr &= 0xFFFFFFFFUL;
9578+
9579+ regs->tpc = addr;
9580+ regs->tnpc = addr+4;
9581+ return 2;
9582+ }
9583+ } while (0);
9584+
9585+ do { /* PaX: patched PLT emulation #4 */
9586+ unsigned int sethi, mov1, call, mov2;
9587+
9588+ err = get_user(sethi, (unsigned int *)regs->tpc);
9589+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9590+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9591+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9592+
9593+ if (err)
9594+ break;
9595+
9596+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9597+ mov1 == 0x8210000FU &&
9598+ (call & 0xC0000000U) == 0x40000000U &&
9599+ mov2 == 0x9E100001U)
9600+ {
9601+ unsigned long addr;
9602+
9603+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9604+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9605+
9606+ if (test_thread_flag(TIF_32BIT))
9607+ addr &= 0xFFFFFFFFUL;
9608+
9609+ regs->tpc = addr;
9610+ regs->tnpc = addr+4;
9611+ return 2;
9612+ }
9613+ } while (0);
9614+
9615+ do { /* PaX: patched PLT emulation #5 */
9616+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9617+
9618+ err = get_user(sethi, (unsigned int *)regs->tpc);
9619+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9620+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9621+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9622+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9623+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9624+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9625+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9626+
9627+ if (err)
9628+ break;
9629+
9630+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9631+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9632+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9633+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9634+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9635+ sllx == 0x83287020U &&
9636+ jmpl == 0x81C04005U &&
9637+ nop == 0x01000000U)
9638+ {
9639+ unsigned long addr;
9640+
9641+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9642+ regs->u_regs[UREG_G1] <<= 32;
9643+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9644+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9645+ regs->tpc = addr;
9646+ regs->tnpc = addr+4;
9647+ return 2;
9648+ }
9649+ } while (0);
9650+
9651+ do { /* PaX: patched PLT emulation #6 */
9652+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9653+
9654+ err = get_user(sethi, (unsigned int *)regs->tpc);
9655+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9656+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9657+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9658+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9659+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9660+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9661+
9662+ if (err)
9663+ break;
9664+
9665+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9666+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9667+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9668+ sllx == 0x83287020U &&
9669+ (or & 0xFFFFE000U) == 0x8A116000U &&
9670+ jmpl == 0x81C04005U &&
9671+ nop == 0x01000000U)
9672+ {
9673+ unsigned long addr;
9674+
9675+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9676+ regs->u_regs[UREG_G1] <<= 32;
9677+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9678+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9679+ regs->tpc = addr;
9680+ regs->tnpc = addr+4;
9681+ return 2;
9682+ }
9683+ } while (0);
9684+
9685+ do { /* PaX: unpatched PLT emulation step 1 */
9686+ unsigned int sethi, ba, nop;
9687+
9688+ err = get_user(sethi, (unsigned int *)regs->tpc);
9689+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9690+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9691+
9692+ if (err)
9693+ break;
9694+
9695+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9696+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9697+ nop == 0x01000000U)
9698+ {
9699+ unsigned long addr;
9700+ unsigned int save, call;
9701+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9702+
9703+ if ((ba & 0xFFC00000U) == 0x30800000U)
9704+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9705+ else
9706+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9707+
9708+ if (test_thread_flag(TIF_32BIT))
9709+ addr &= 0xFFFFFFFFUL;
9710+
9711+ err = get_user(save, (unsigned int *)addr);
9712+ err |= get_user(call, (unsigned int *)(addr+4));
9713+ err |= get_user(nop, (unsigned int *)(addr+8));
9714+ if (err)
9715+ break;
9716+
9717+#ifdef CONFIG_PAX_DLRESOLVE
9718+ if (save == 0x9DE3BFA8U &&
9719+ (call & 0xC0000000U) == 0x40000000U &&
9720+ nop == 0x01000000U)
9721+ {
9722+ struct vm_area_struct *vma;
9723+ unsigned long call_dl_resolve;
9724+
9725+ down_read(&current->mm->mmap_sem);
9726+ call_dl_resolve = current->mm->call_dl_resolve;
9727+ up_read(&current->mm->mmap_sem);
9728+ if (likely(call_dl_resolve))
9729+ goto emulate;
9730+
9731+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9732+
9733+ down_write(&current->mm->mmap_sem);
9734+ if (current->mm->call_dl_resolve) {
9735+ call_dl_resolve = current->mm->call_dl_resolve;
9736+ up_write(&current->mm->mmap_sem);
9737+ if (vma)
9738+ kmem_cache_free(vm_area_cachep, vma);
9739+ goto emulate;
9740+ }
9741+
9742+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9743+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9744+ up_write(&current->mm->mmap_sem);
9745+ if (vma)
9746+ kmem_cache_free(vm_area_cachep, vma);
9747+ return 1;
9748+ }
9749+
9750+ if (pax_insert_vma(vma, call_dl_resolve)) {
9751+ up_write(&current->mm->mmap_sem);
9752+ kmem_cache_free(vm_area_cachep, vma);
9753+ return 1;
9754+ }
9755+
9756+ current->mm->call_dl_resolve = call_dl_resolve;
9757+ up_write(&current->mm->mmap_sem);
9758+
9759+emulate:
9760+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9761+ regs->tpc = call_dl_resolve;
9762+ regs->tnpc = addr+4;
9763+ return 3;
9764+ }
9765+#endif
9766+
9767+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9768+ if ((save & 0xFFC00000U) == 0x05000000U &&
9769+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9770+ nop == 0x01000000U)
9771+ {
9772+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9773+ regs->u_regs[UREG_G2] = addr + 4;
9774+ addr = (save & 0x003FFFFFU) << 10;
9775+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9776+
9777+ if (test_thread_flag(TIF_32BIT))
9778+ addr &= 0xFFFFFFFFUL;
9779+
9780+ regs->tpc = addr;
9781+ regs->tnpc = addr+4;
9782+ return 3;
9783+ }
9784+
9785+ /* PaX: 64-bit PLT stub */
9786+ err = get_user(sethi1, (unsigned int *)addr);
9787+ err |= get_user(sethi2, (unsigned int *)(addr+4));
9788+ err |= get_user(or1, (unsigned int *)(addr+8));
9789+ err |= get_user(or2, (unsigned int *)(addr+12));
9790+ err |= get_user(sllx, (unsigned int *)(addr+16));
9791+ err |= get_user(add, (unsigned int *)(addr+20));
9792+ err |= get_user(jmpl, (unsigned int *)(addr+24));
9793+ err |= get_user(nop, (unsigned int *)(addr+28));
9794+ if (err)
9795+ break;
9796+
9797+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9798+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9799+ (or1 & 0xFFFFE000U) == 0x88112000U &&
9800+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9801+ sllx == 0x89293020U &&
9802+ add == 0x8A010005U &&
9803+ jmpl == 0x89C14000U &&
9804+ nop == 0x01000000U)
9805+ {
9806+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9807+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9808+ regs->u_regs[UREG_G4] <<= 32;
9809+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9810+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9811+ regs->u_regs[UREG_G4] = addr + 24;
9812+ addr = regs->u_regs[UREG_G5];
9813+ regs->tpc = addr;
9814+ regs->tnpc = addr+4;
9815+ return 3;
9816+ }
9817+ }
9818+ } while (0);
9819+
9820+#ifdef CONFIG_PAX_DLRESOLVE
9821+ do { /* PaX: unpatched PLT emulation step 2 */
9822+ unsigned int save, call, nop;
9823+
9824+ err = get_user(save, (unsigned int *)(regs->tpc-4));
9825+ err |= get_user(call, (unsigned int *)regs->tpc);
9826+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9827+ if (err)
9828+ break;
9829+
9830+ if (save == 0x9DE3BFA8U &&
9831+ (call & 0xC0000000U) == 0x40000000U &&
9832+ nop == 0x01000000U)
9833+ {
9834+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9835+
9836+ if (test_thread_flag(TIF_32BIT))
9837+ dl_resolve &= 0xFFFFFFFFUL;
9838+
9839+ regs->u_regs[UREG_RETPC] = regs->tpc;
9840+ regs->tpc = dl_resolve;
9841+ regs->tnpc = dl_resolve+4;
9842+ return 3;
9843+ }
9844+ } while (0);
9845+#endif
9846+
9847+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9848+ unsigned int sethi, ba, nop;
9849+
9850+ err = get_user(sethi, (unsigned int *)regs->tpc);
9851+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9852+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9853+
9854+ if (err)
9855+ break;
9856+
9857+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9858+ (ba & 0xFFF00000U) == 0x30600000U &&
9859+ nop == 0x01000000U)
9860+ {
9861+ unsigned long addr;
9862+
9863+ addr = (sethi & 0x003FFFFFU) << 10;
9864+ regs->u_regs[UREG_G1] = addr;
9865+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9866+
9867+ if (test_thread_flag(TIF_32BIT))
9868+ addr &= 0xFFFFFFFFUL;
9869+
9870+ regs->tpc = addr;
9871+ regs->tnpc = addr+4;
9872+ return 2;
9873+ }
9874+ } while (0);
9875+
9876+#endif
9877+
9878+ return 1;
9879+}
9880+
9881+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9882+{
9883+ unsigned long i;
9884+
9885+ printk(KERN_ERR "PAX: bytes at PC: ");
9886+ for (i = 0; i < 8; i++) {
9887+ unsigned int c;
9888+ if (get_user(c, (unsigned int *)pc+i))
9889+ printk(KERN_CONT "???????? ");
9890+ else
9891+ printk(KERN_CONT "%08x ", c);
9892+ }
9893+ printk("\n");
9894+}
9895+#endif
9896+
9897 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9898 {
9899 struct mm_struct *mm = current->mm;
9900@@ -341,6 +804,29 @@ retry:
9901 if (!vma)
9902 goto bad_area;
9903
9904+#ifdef CONFIG_PAX_PAGEEXEC
9905+ /* PaX: detect ITLB misses on non-exec pages */
9906+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9907+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9908+ {
9909+ if (address != regs->tpc)
9910+ goto good_area;
9911+
9912+ up_read(&mm->mmap_sem);
9913+ switch (pax_handle_fetch_fault(regs)) {
9914+
9915+#ifdef CONFIG_PAX_EMUPLT
9916+ case 2:
9917+ case 3:
9918+ return;
9919+#endif
9920+
9921+ }
9922+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9923+ do_group_exit(SIGKILL);
9924+ }
9925+#endif
9926+
9927 /* Pure DTLB misses do not tell us whether the fault causing
9928 * load/store/atomic was a write or not, it only says that there
9929 * was no match. So in such a case we (carefully) read the
9930diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9931index d2b5944..bd813f2 100644
9932--- a/arch/sparc/mm/hugetlbpage.c
9933+++ b/arch/sparc/mm/hugetlbpage.c
9934@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9935
9936 info.flags = 0;
9937 info.length = len;
9938- info.low_limit = TASK_UNMAPPED_BASE;
9939+ info.low_limit = mm->mmap_base;
9940 info.high_limit = min(task_size, VA_EXCLUDE_START);
9941 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9942 info.align_offset = 0;
9943@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9944 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9945 VM_BUG_ON(addr != -ENOMEM);
9946 info.low_limit = VA_EXCLUDE_END;
9947+
9948+#ifdef CONFIG_PAX_RANDMMAP
9949+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9950+ info.low_limit += mm->delta_mmap;
9951+#endif
9952+
9953 info.high_limit = task_size;
9954 addr = vm_unmapped_area(&info);
9955 }
9956@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9957 VM_BUG_ON(addr != -ENOMEM);
9958 info.flags = 0;
9959 info.low_limit = TASK_UNMAPPED_BASE;
9960+
9961+#ifdef CONFIG_PAX_RANDMMAP
9962+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9963+ info.low_limit += mm->delta_mmap;
9964+#endif
9965+
9966 info.high_limit = STACK_TOP32;
9967 addr = vm_unmapped_area(&info);
9968 }
9969@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9970 struct mm_struct *mm = current->mm;
9971 struct vm_area_struct *vma;
9972 unsigned long task_size = TASK_SIZE;
9973+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9974
9975 if (test_thread_flag(TIF_32BIT))
9976 task_size = STACK_TOP32;
9977@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9978 return addr;
9979 }
9980
9981+#ifdef CONFIG_PAX_RANDMMAP
9982+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
9983+#endif
9984+
9985 if (addr) {
9986 addr = ALIGN(addr, HPAGE_SIZE);
9987 vma = find_vma(mm, addr);
9988- if (task_size - len >= addr &&
9989- (!vma || addr + len <= vma->vm_start))
9990+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
9991 return addr;
9992 }
9993 if (mm->get_unmapped_area == arch_get_unmapped_area)
9994diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
9995index f4500c6..889656c 100644
9996--- a/arch/tile/include/asm/atomic_64.h
9997+++ b/arch/tile/include/asm/atomic_64.h
9998@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
9999
10000 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10001
10002+#define atomic64_read_unchecked(v) atomic64_read(v)
10003+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10004+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10005+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10006+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10007+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10008+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10009+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10010+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10011+
10012 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10013 #define smp_mb__before_atomic_dec() smp_mb()
10014 #define smp_mb__after_atomic_dec() smp_mb()
10015diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10016index a9a5299..0fce79e 100644
10017--- a/arch/tile/include/asm/cache.h
10018+++ b/arch/tile/include/asm/cache.h
10019@@ -15,11 +15,12 @@
10020 #ifndef _ASM_TILE_CACHE_H
10021 #define _ASM_TILE_CACHE_H
10022
10023+#include <linux/const.h>
10024 #include <arch/chip.h>
10025
10026 /* bytes per L1 data cache line */
10027 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10028-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10029+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10030
10031 /* bytes per L2 cache line */
10032 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10033diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10034index 8a082bc..7a6bf87 100644
10035--- a/arch/tile/include/asm/uaccess.h
10036+++ b/arch/tile/include/asm/uaccess.h
10037@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10038 const void __user *from,
10039 unsigned long n)
10040 {
10041- int sz = __compiletime_object_size(to);
10042+ size_t sz = __compiletime_object_size(to);
10043
10044- if (likely(sz == -1 || sz >= n))
10045+ if (likely(sz == (size_t)-1 || sz >= n))
10046 n = _copy_from_user(to, from, n);
10047 else
10048 copy_from_user_overflow();
10049diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
10050index 650ccff..45fe2d6 100644
10051--- a/arch/tile/mm/hugetlbpage.c
10052+++ b/arch/tile/mm/hugetlbpage.c
10053@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
10054 info.high_limit = TASK_SIZE;
10055 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10056 info.align_offset = 0;
10057+ info.threadstack_offset = 0;
10058 return vm_unmapped_area(&info);
10059 }
10060
10061@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
10062 info.high_limit = current->mm->mmap_base;
10063 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10064 info.align_offset = 0;
10065+ info.threadstack_offset = 0;
10066 addr = vm_unmapped_area(&info);
10067
10068 /*
10069diff --git a/arch/um/Makefile b/arch/um/Makefile
10070index 133f7de..1d6f2f1 100644
10071--- a/arch/um/Makefile
10072+++ b/arch/um/Makefile
10073@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10074 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10075 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10076
10077+ifdef CONSTIFY_PLUGIN
10078+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10079+endif
10080+
10081 #This will adjust *FLAGS accordingly to the platform.
10082 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10083
10084diff --git a/arch/um/defconfig b/arch/um/defconfig
10085index 08107a7..ab22afe 100644
10086--- a/arch/um/defconfig
10087+++ b/arch/um/defconfig
10088@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
10089 CONFIG_X86_L1_CACHE_SHIFT=5
10090 CONFIG_X86_XADD=y
10091 CONFIG_X86_PPRO_FENCE=y
10092-CONFIG_X86_WP_WORKS_OK=y
10093 CONFIG_X86_INVLPG=y
10094 CONFIG_X86_BSWAP=y
10095 CONFIG_X86_POPAD_OK=y
10096diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10097index 19e1bdd..3665b77 100644
10098--- a/arch/um/include/asm/cache.h
10099+++ b/arch/um/include/asm/cache.h
10100@@ -1,6 +1,7 @@
10101 #ifndef __UM_CACHE_H
10102 #define __UM_CACHE_H
10103
10104+#include <linux/const.h>
10105
10106 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10107 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10108@@ -12,6 +13,6 @@
10109 # define L1_CACHE_SHIFT 5
10110 #endif
10111
10112-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10113+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10114
10115 #endif
10116diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10117index 2e0a6b1..a64d0f5 100644
10118--- a/arch/um/include/asm/kmap_types.h
10119+++ b/arch/um/include/asm/kmap_types.h
10120@@ -8,6 +8,6 @@
10121
10122 /* No more #include "asm/arch/kmap_types.h" ! */
10123
10124-#define KM_TYPE_NR 14
10125+#define KM_TYPE_NR 15
10126
10127 #endif
10128diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10129index 5ff53d9..5850cdf 100644
10130--- a/arch/um/include/asm/page.h
10131+++ b/arch/um/include/asm/page.h
10132@@ -14,6 +14,9 @@
10133 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10134 #define PAGE_MASK (~(PAGE_SIZE-1))
10135
10136+#define ktla_ktva(addr) (addr)
10137+#define ktva_ktla(addr) (addr)
10138+
10139 #ifndef __ASSEMBLY__
10140
10141 struct page;
10142diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10143index 0032f92..cd151e0 100644
10144--- a/arch/um/include/asm/pgtable-3level.h
10145+++ b/arch/um/include/asm/pgtable-3level.h
10146@@ -58,6 +58,7 @@
10147 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10148 #define pud_populate(mm, pud, pmd) \
10149 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10150+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10151
10152 #ifdef CONFIG_64BIT
10153 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10154diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10155index bbcef52..6a2a483 100644
10156--- a/arch/um/kernel/process.c
10157+++ b/arch/um/kernel/process.c
10158@@ -367,22 +367,6 @@ int singlestepping(void * t)
10159 return 2;
10160 }
10161
10162-/*
10163- * Only x86 and x86_64 have an arch_align_stack().
10164- * All other arches have "#define arch_align_stack(x) (x)"
10165- * in their asm/system.h
10166- * As this is included in UML from asm-um/system-generic.h,
10167- * we can use it to behave as the subarch does.
10168- */
10169-#ifndef arch_align_stack
10170-unsigned long arch_align_stack(unsigned long sp)
10171-{
10172- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10173- sp -= get_random_int() % 8192;
10174- return sp & ~0xf;
10175-}
10176-#endif
10177-
10178 unsigned long get_wchan(struct task_struct *p)
10179 {
10180 unsigned long stack_page, sp, ip;
10181diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10182index ad8f795..2c7eec6 100644
10183--- a/arch/unicore32/include/asm/cache.h
10184+++ b/arch/unicore32/include/asm/cache.h
10185@@ -12,8 +12,10 @@
10186 #ifndef __UNICORE_CACHE_H__
10187 #define __UNICORE_CACHE_H__
10188
10189-#define L1_CACHE_SHIFT (5)
10190-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10191+#include <linux/const.h>
10192+
10193+#define L1_CACHE_SHIFT 5
10194+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10195
10196 /*
10197 * Memory returned by kmalloc() may be used for DMA, so we must make
10198diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10199index fe120da..24177f7 100644
10200--- a/arch/x86/Kconfig
10201+++ b/arch/x86/Kconfig
10202@@ -239,7 +239,7 @@ config X86_HT
10203
10204 config X86_32_LAZY_GS
10205 def_bool y
10206- depends on X86_32 && !CC_STACKPROTECTOR
10207+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10208
10209 config ARCH_HWEIGHT_CFLAGS
10210 string
10211@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
10212
10213 config X86_MSR
10214 tristate "/dev/cpu/*/msr - Model-specific register support"
10215+ depends on !GRKERNSEC_KMEM
10216 ---help---
10217 This device gives privileged processes access to the x86
10218 Model-Specific Registers (MSRs). It is a character device with
10219@@ -1096,7 +1097,7 @@ choice
10220
10221 config NOHIGHMEM
10222 bool "off"
10223- depends on !X86_NUMAQ
10224+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10225 ---help---
10226 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10227 However, the address space of 32-bit x86 processors is only 4
10228@@ -1133,7 +1134,7 @@ config NOHIGHMEM
10229
10230 config HIGHMEM4G
10231 bool "4GB"
10232- depends on !X86_NUMAQ
10233+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10234 ---help---
10235 Select this if you have a 32-bit processor and between 1 and 4
10236 gigabytes of physical RAM.
10237@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
10238 hex
10239 default 0xB0000000 if VMSPLIT_3G_OPT
10240 default 0x80000000 if VMSPLIT_2G
10241- default 0x78000000 if VMSPLIT_2G_OPT
10242+ default 0x70000000 if VMSPLIT_2G_OPT
10243 default 0x40000000 if VMSPLIT_1G
10244 default 0xC0000000
10245 depends on X86_32
10246@@ -1584,6 +1585,7 @@ config SECCOMP
10247
10248 config CC_STACKPROTECTOR
10249 bool "Enable -fstack-protector buffer overflow detection"
10250+ depends on X86_64 || !PAX_MEMORY_UDEREF
10251 ---help---
10252 This option turns on the -fstack-protector GCC feature. This
10253 feature puts, at the beginning of functions, a canary value on
10254@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
10255 config PHYSICAL_ALIGN
10256 hex "Alignment value to which kernel should be aligned" if X86_32
10257 default "0x1000000"
10258+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
10259+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
10260 range 0x2000 0x1000000
10261 ---help---
10262 This value puts the alignment restrictions on physical address
10263@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
10264 If unsure, say N.
10265
10266 config COMPAT_VDSO
10267- def_bool y
10268+ def_bool n
10269 prompt "Compat VDSO support"
10270 depends on X86_32 || IA32_EMULATION
10271+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10272 ---help---
10273 Map the 32-bit VDSO to the predictable old-style address too.
10274
10275diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10276index c026cca..14657ae 100644
10277--- a/arch/x86/Kconfig.cpu
10278+++ b/arch/x86/Kconfig.cpu
10279@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10280
10281 config X86_F00F_BUG
10282 def_bool y
10283- depends on M586MMX || M586TSC || M586 || M486
10284+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10285
10286 config X86_INVD_BUG
10287 def_bool y
10288@@ -327,7 +327,7 @@ config X86_INVD_BUG
10289
10290 config X86_ALIGNMENT_16
10291 def_bool y
10292- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10293+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10294
10295 config X86_INTEL_USERCOPY
10296 def_bool y
10297@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10298 # generates cmov.
10299 config X86_CMOV
10300 def_bool y
10301- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10302+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10303
10304 config X86_MINIMUM_CPU_FAMILY
10305 int
10306diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10307index c198b7e..63eea60 100644
10308--- a/arch/x86/Kconfig.debug
10309+++ b/arch/x86/Kconfig.debug
10310@@ -84,7 +84,7 @@ config X86_PTDUMP
10311 config DEBUG_RODATA
10312 bool "Write protect kernel read-only data structures"
10313 default y
10314- depends on DEBUG_KERNEL
10315+ depends on DEBUG_KERNEL && BROKEN
10316 ---help---
10317 Mark the kernel read-only data as write-protected in the pagetables,
10318 in order to catch accidental (and incorrect) writes to such const
10319@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10320
10321 config DEBUG_SET_MODULE_RONX
10322 bool "Set loadable kernel module data as NX and text as RO"
10323- depends on MODULES
10324+ depends on MODULES && BROKEN
10325 ---help---
10326 This option helps catch unintended modifications to loadable
10327 kernel module's text and read-only data. It also prevents execution
10328diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10329index 5c47726..8c4fa67 100644
10330--- a/arch/x86/Makefile
10331+++ b/arch/x86/Makefile
10332@@ -54,6 +54,7 @@ else
10333 UTS_MACHINE := x86_64
10334 CHECKFLAGS += -D__x86_64__ -m64
10335
10336+ biarch := $(call cc-option,-m64)
10337 KBUILD_AFLAGS += -m64
10338 KBUILD_CFLAGS += -m64
10339
10340@@ -234,3 +235,12 @@ define archhelp
10341 echo ' FDARGS="..." arguments for the booted kernel'
10342 echo ' FDINITRD=file initrd for the booted kernel'
10343 endef
10344+
10345+define OLD_LD
10346+
10347+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10348+*** Please upgrade your binutils to 2.18 or newer
10349+endef
10350+
10351+archprepare:
10352+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10353diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10354index 379814b..add62ce 100644
10355--- a/arch/x86/boot/Makefile
10356+++ b/arch/x86/boot/Makefile
10357@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10358 $(call cc-option, -fno-stack-protector) \
10359 $(call cc-option, -mpreferred-stack-boundary=2)
10360 KBUILD_CFLAGS += $(call cc-option, -m32)
10361+ifdef CONSTIFY_PLUGIN
10362+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10363+endif
10364 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10365 GCOV_PROFILE := n
10366
10367diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10368index 878e4b9..20537ab 100644
10369--- a/arch/x86/boot/bitops.h
10370+++ b/arch/x86/boot/bitops.h
10371@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10372 u8 v;
10373 const u32 *p = (const u32 *)addr;
10374
10375- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10376+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10377 return v;
10378 }
10379
10380@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10381
10382 static inline void set_bit(int nr, void *addr)
10383 {
10384- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10385+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10386 }
10387
10388 #endif /* BOOT_BITOPS_H */
10389diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10390index 5b75319..331a4ca 100644
10391--- a/arch/x86/boot/boot.h
10392+++ b/arch/x86/boot/boot.h
10393@@ -85,7 +85,7 @@ static inline void io_delay(void)
10394 static inline u16 ds(void)
10395 {
10396 u16 seg;
10397- asm("movw %%ds,%0" : "=rm" (seg));
10398+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10399 return seg;
10400 }
10401
10402@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10403 static inline int memcmp(const void *s1, const void *s2, size_t len)
10404 {
10405 u8 diff;
10406- asm("repe; cmpsb; setnz %0"
10407+ asm volatile("repe; cmpsb; setnz %0"
10408 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10409 return diff;
10410 }
10411diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10412index 5ef205c..342191d 100644
10413--- a/arch/x86/boot/compressed/Makefile
10414+++ b/arch/x86/boot/compressed/Makefile
10415@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10416 KBUILD_CFLAGS += $(cflags-y)
10417 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10418 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10419+ifdef CONSTIFY_PLUGIN
10420+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10421+endif
10422
10423 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10424 GCOV_PROFILE := n
10425diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10426index c205035..5853587 100644
10427--- a/arch/x86/boot/compressed/eboot.c
10428+++ b/arch/x86/boot/compressed/eboot.c
10429@@ -150,7 +150,6 @@ again:
10430 *addr = max_addr;
10431 }
10432
10433-free_pool:
10434 efi_call_phys1(sys_table->boottime->free_pool, map);
10435
10436 fail:
10437@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10438 if (i == map_size / desc_size)
10439 status = EFI_NOT_FOUND;
10440
10441-free_pool:
10442 efi_call_phys1(sys_table->boottime->free_pool, map);
10443 fail:
10444 return status;
10445diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
10446index a53440e..c3dbf1e 100644
10447--- a/arch/x86/boot/compressed/efi_stub_32.S
10448+++ b/arch/x86/boot/compressed/efi_stub_32.S
10449@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
10450 * parameter 2, ..., param n. To make things easy, we save the return
10451 * address of efi_call_phys in a global variable.
10452 */
10453- popl %ecx
10454- movl %ecx, saved_return_addr(%edx)
10455- /* get the function pointer into ECX*/
10456- popl %ecx
10457- movl %ecx, efi_rt_function_ptr(%edx)
10458+ popl saved_return_addr(%edx)
10459+ popl efi_rt_function_ptr(%edx)
10460
10461 /*
10462 * 3. Call the physical function.
10463 */
10464- call *%ecx
10465+ call *efi_rt_function_ptr(%edx)
10466
10467 /*
10468 * 4. Balance the stack. And because EAX contain the return value,
10469@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
10470 1: popl %edx
10471 subl $1b, %edx
10472
10473- movl efi_rt_function_ptr(%edx), %ecx
10474- pushl %ecx
10475+ pushl efi_rt_function_ptr(%edx)
10476
10477 /*
10478 * 10. Push the saved return address onto the stack and return.
10479 */
10480- movl saved_return_addr(%edx), %ecx
10481- pushl %ecx
10482- ret
10483+ jmpl *saved_return_addr(%edx)
10484 ENDPROC(efi_call_phys)
10485 .previous
10486
10487diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10488index 1e3184f..0d11e2e 100644
10489--- a/arch/x86/boot/compressed/head_32.S
10490+++ b/arch/x86/boot/compressed/head_32.S
10491@@ -118,7 +118,7 @@ preferred_addr:
10492 notl %eax
10493 andl %eax, %ebx
10494 #else
10495- movl $LOAD_PHYSICAL_ADDR, %ebx
10496+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10497 #endif
10498
10499 /* Target address to relocate to for decompression */
10500@@ -204,7 +204,7 @@ relocated:
10501 * and where it was actually loaded.
10502 */
10503 movl %ebp, %ebx
10504- subl $LOAD_PHYSICAL_ADDR, %ebx
10505+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10506 jz 2f /* Nothing to be done if loaded at compiled addr. */
10507 /*
10508 * Process relocations.
10509@@ -212,8 +212,7 @@ relocated:
10510
10511 1: subl $4, %edi
10512 movl (%edi), %ecx
10513- testl %ecx, %ecx
10514- jz 2f
10515+ jecxz 2f
10516 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10517 jmp 1b
10518 2:
10519diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10520index 16f24e6..47491a3 100644
10521--- a/arch/x86/boot/compressed/head_64.S
10522+++ b/arch/x86/boot/compressed/head_64.S
10523@@ -97,7 +97,7 @@ ENTRY(startup_32)
10524 notl %eax
10525 andl %eax, %ebx
10526 #else
10527- movl $LOAD_PHYSICAL_ADDR, %ebx
10528+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10529 #endif
10530
10531 /* Target address to relocate to for decompression */
10532@@ -272,7 +272,7 @@ preferred_addr:
10533 notq %rax
10534 andq %rax, %rbp
10535 #else
10536- movq $LOAD_PHYSICAL_ADDR, %rbp
10537+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10538 #endif
10539
10540 /* Target address to relocate to for decompression */
10541@@ -363,8 +363,8 @@ gdt:
10542 .long gdt
10543 .word 0
10544 .quad 0x0000000000000000 /* NULL descriptor */
10545- .quad 0x00af9a000000ffff /* __KERNEL_CS */
10546- .quad 0x00cf92000000ffff /* __KERNEL_DS */
10547+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
10548+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
10549 .quad 0x0080890000000000 /* TS descriptor */
10550 .quad 0x0000000000000000 /* TS continued */
10551 gdt_end:
10552diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10553index 7cb56c6..d382d84 100644
10554--- a/arch/x86/boot/compressed/misc.c
10555+++ b/arch/x86/boot/compressed/misc.c
10556@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10557 case PT_LOAD:
10558 #ifdef CONFIG_RELOCATABLE
10559 dest = output;
10560- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10561+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10562 #else
10563 dest = (void *)(phdr->p_paddr);
10564 #endif
10565@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10566 error("Destination address too large");
10567 #endif
10568 #ifndef CONFIG_RELOCATABLE
10569- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10570+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10571 error("Wrong destination address");
10572 #endif
10573
10574diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10575index 4d3ff03..e4972ff 100644
10576--- a/arch/x86/boot/cpucheck.c
10577+++ b/arch/x86/boot/cpucheck.c
10578@@ -74,7 +74,7 @@ static int has_fpu(void)
10579 u16 fcw = -1, fsw = -1;
10580 u32 cr0;
10581
10582- asm("movl %%cr0,%0" : "=r" (cr0));
10583+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10584 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10585 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10586 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10587@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10588 {
10589 u32 f0, f1;
10590
10591- asm("pushfl ; "
10592+ asm volatile("pushfl ; "
10593 "pushfl ; "
10594 "popl %0 ; "
10595 "movl %0,%1 ; "
10596@@ -115,7 +115,7 @@ static void get_flags(void)
10597 set_bit(X86_FEATURE_FPU, cpu.flags);
10598
10599 if (has_eflag(X86_EFLAGS_ID)) {
10600- asm("cpuid"
10601+ asm volatile("cpuid"
10602 : "=a" (max_intel_level),
10603 "=b" (cpu_vendor[0]),
10604 "=d" (cpu_vendor[1]),
10605@@ -124,7 +124,7 @@ static void get_flags(void)
10606
10607 if (max_intel_level >= 0x00000001 &&
10608 max_intel_level <= 0x0000ffff) {
10609- asm("cpuid"
10610+ asm volatile("cpuid"
10611 : "=a" (tfms),
10612 "=c" (cpu.flags[4]),
10613 "=d" (cpu.flags[0])
10614@@ -136,7 +136,7 @@ static void get_flags(void)
10615 cpu.model += ((tfms >> 16) & 0xf) << 4;
10616 }
10617
10618- asm("cpuid"
10619+ asm volatile("cpuid"
10620 : "=a" (max_amd_level)
10621 : "a" (0x80000000)
10622 : "ebx", "ecx", "edx");
10623@@ -144,7 +144,7 @@ static void get_flags(void)
10624 if (max_amd_level >= 0x80000001 &&
10625 max_amd_level <= 0x8000ffff) {
10626 u32 eax = 0x80000001;
10627- asm("cpuid"
10628+ asm volatile("cpuid"
10629 : "+a" (eax),
10630 "=c" (cpu.flags[6]),
10631 "=d" (cpu.flags[1])
10632@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10633 u32 ecx = MSR_K7_HWCR;
10634 u32 eax, edx;
10635
10636- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10637+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10638 eax &= ~(1 << 15);
10639- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10640+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10641
10642 get_flags(); /* Make sure it really did something */
10643 err = check_flags();
10644@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10645 u32 ecx = MSR_VIA_FCR;
10646 u32 eax, edx;
10647
10648- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10649+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10650 eax |= (1<<1)|(1<<7);
10651- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10652+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10653
10654 set_bit(X86_FEATURE_CX8, cpu.flags);
10655 err = check_flags();
10656@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10657 u32 eax, edx;
10658 u32 level = 1;
10659
10660- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10661- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10662- asm("cpuid"
10663+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10664+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10665+ asm volatile("cpuid"
10666 : "+a" (level), "=d" (cpu.flags[0])
10667 : : "ecx", "ebx");
10668- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10669+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10670
10671 err = check_flags();
10672 }
10673diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10674index 9ec06a1..2c25e79 100644
10675--- a/arch/x86/boot/header.S
10676+++ b/arch/x86/boot/header.S
10677@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10678 # single linked list of
10679 # struct setup_data
10680
10681-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10682+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10683
10684 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10685+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10686+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10687+#else
10688 #define VO_INIT_SIZE (VO__end - VO__text)
10689+#endif
10690 #if ZO_INIT_SIZE > VO_INIT_SIZE
10691 #define INIT_SIZE ZO_INIT_SIZE
10692 #else
10693diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10694index db75d07..8e6d0af 100644
10695--- a/arch/x86/boot/memory.c
10696+++ b/arch/x86/boot/memory.c
10697@@ -19,7 +19,7 @@
10698
10699 static int detect_memory_e820(void)
10700 {
10701- int count = 0;
10702+ unsigned int count = 0;
10703 struct biosregs ireg, oreg;
10704 struct e820entry *desc = boot_params.e820_map;
10705 static struct e820entry buf; /* static so it is zeroed */
10706diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10707index 11e8c6e..fdbb1ed 100644
10708--- a/arch/x86/boot/video-vesa.c
10709+++ b/arch/x86/boot/video-vesa.c
10710@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10711
10712 boot_params.screen_info.vesapm_seg = oreg.es;
10713 boot_params.screen_info.vesapm_off = oreg.di;
10714+ boot_params.screen_info.vesapm_size = oreg.cx;
10715 }
10716
10717 /*
10718diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10719index 43eda28..5ab5fdb 100644
10720--- a/arch/x86/boot/video.c
10721+++ b/arch/x86/boot/video.c
10722@@ -96,7 +96,7 @@ static void store_mode_params(void)
10723 static unsigned int get_entry(void)
10724 {
10725 char entry_buf[4];
10726- int i, len = 0;
10727+ unsigned int i, len = 0;
10728 int key;
10729 unsigned int v;
10730
10731diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10732index 9105655..5e37f27 100644
10733--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10734+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10735@@ -8,6 +8,8 @@
10736 * including this sentence is retained in full.
10737 */
10738
10739+#include <asm/alternative-asm.h>
10740+
10741 .extern crypto_ft_tab
10742 .extern crypto_it_tab
10743 .extern crypto_fl_tab
10744@@ -70,6 +72,8 @@
10745 je B192; \
10746 leaq 32(r9),r9;
10747
10748+#define ret pax_force_retaddr 0, 1; ret
10749+
10750 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10751 movq r1,r2; \
10752 movq r3,r4; \
10753diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10754index 477e9d7..3ab339f 100644
10755--- a/arch/x86/crypto/aesni-intel_asm.S
10756+++ b/arch/x86/crypto/aesni-intel_asm.S
10757@@ -31,6 +31,7 @@
10758
10759 #include <linux/linkage.h>
10760 #include <asm/inst.h>
10761+#include <asm/alternative-asm.h>
10762
10763 #ifdef __x86_64__
10764 .data
10765@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
10766 pop %r14
10767 pop %r13
10768 pop %r12
10769+ pax_force_retaddr 0, 1
10770 ret
10771 ENDPROC(aesni_gcm_dec)
10772
10773@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
10774 pop %r14
10775 pop %r13
10776 pop %r12
10777+ pax_force_retaddr 0, 1
10778 ret
10779 ENDPROC(aesni_gcm_enc)
10780
10781@@ -1722,6 +1725,7 @@ _key_expansion_256a:
10782 pxor %xmm1, %xmm0
10783 movaps %xmm0, (TKEYP)
10784 add $0x10, TKEYP
10785+ pax_force_retaddr_bts
10786 ret
10787 ENDPROC(_key_expansion_128)
10788 ENDPROC(_key_expansion_256a)
10789@@ -1748,6 +1752,7 @@ _key_expansion_192a:
10790 shufps $0b01001110, %xmm2, %xmm1
10791 movaps %xmm1, 0x10(TKEYP)
10792 add $0x20, TKEYP
10793+ pax_force_retaddr_bts
10794 ret
10795 ENDPROC(_key_expansion_192a)
10796
10797@@ -1768,6 +1773,7 @@ _key_expansion_192b:
10798
10799 movaps %xmm0, (TKEYP)
10800 add $0x10, TKEYP
10801+ pax_force_retaddr_bts
10802 ret
10803 ENDPROC(_key_expansion_192b)
10804
10805@@ -1781,6 +1787,7 @@ _key_expansion_256b:
10806 pxor %xmm1, %xmm2
10807 movaps %xmm2, (TKEYP)
10808 add $0x10, TKEYP
10809+ pax_force_retaddr_bts
10810 ret
10811 ENDPROC(_key_expansion_256b)
10812
10813@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
10814 #ifndef __x86_64__
10815 popl KEYP
10816 #endif
10817+ pax_force_retaddr 0, 1
10818 ret
10819 ENDPROC(aesni_set_key)
10820
10821@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
10822 popl KLEN
10823 popl KEYP
10824 #endif
10825+ pax_force_retaddr 0, 1
10826 ret
10827 ENDPROC(aesni_enc)
10828
10829@@ -1974,6 +1983,7 @@ _aesni_enc1:
10830 AESENC KEY STATE
10831 movaps 0x70(TKEYP), KEY
10832 AESENCLAST KEY STATE
10833+ pax_force_retaddr_bts
10834 ret
10835 ENDPROC(_aesni_enc1)
10836
10837@@ -2083,6 +2093,7 @@ _aesni_enc4:
10838 AESENCLAST KEY STATE2
10839 AESENCLAST KEY STATE3
10840 AESENCLAST KEY STATE4
10841+ pax_force_retaddr_bts
10842 ret
10843 ENDPROC(_aesni_enc4)
10844
10845@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
10846 popl KLEN
10847 popl KEYP
10848 #endif
10849+ pax_force_retaddr 0, 1
10850 ret
10851 ENDPROC(aesni_dec)
10852
10853@@ -2164,6 +2176,7 @@ _aesni_dec1:
10854 AESDEC KEY STATE
10855 movaps 0x70(TKEYP), KEY
10856 AESDECLAST KEY STATE
10857+ pax_force_retaddr_bts
10858 ret
10859 ENDPROC(_aesni_dec1)
10860
10861@@ -2273,6 +2286,7 @@ _aesni_dec4:
10862 AESDECLAST KEY STATE2
10863 AESDECLAST KEY STATE3
10864 AESDECLAST KEY STATE4
10865+ pax_force_retaddr_bts
10866 ret
10867 ENDPROC(_aesni_dec4)
10868
10869@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
10870 popl KEYP
10871 popl LEN
10872 #endif
10873+ pax_force_retaddr 0, 1
10874 ret
10875 ENDPROC(aesni_ecb_enc)
10876
10877@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
10878 popl KEYP
10879 popl LEN
10880 #endif
10881+ pax_force_retaddr 0, 1
10882 ret
10883 ENDPROC(aesni_ecb_dec)
10884
10885@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
10886 popl LEN
10887 popl IVP
10888 #endif
10889+ pax_force_retaddr 0, 1
10890 ret
10891 ENDPROC(aesni_cbc_enc)
10892
10893@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
10894 popl LEN
10895 popl IVP
10896 #endif
10897+ pax_force_retaddr 0, 1
10898 ret
10899 ENDPROC(aesni_cbc_dec)
10900
10901@@ -2550,6 +2568,7 @@ _aesni_inc_init:
10902 mov $1, TCTR_LOW
10903 MOVQ_R64_XMM TCTR_LOW INC
10904 MOVQ_R64_XMM CTR TCTR_LOW
10905+ pax_force_retaddr_bts
10906 ret
10907 ENDPROC(_aesni_inc_init)
10908
10909@@ -2579,6 +2598,7 @@ _aesni_inc:
10910 .Linc_low:
10911 movaps CTR, IV
10912 PSHUFB_XMM BSWAP_MASK IV
10913+ pax_force_retaddr_bts
10914 ret
10915 ENDPROC(_aesni_inc)
10916
10917@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
10918 .Lctr_enc_ret:
10919 movups IV, (IVP)
10920 .Lctr_enc_just_ret:
10921+ pax_force_retaddr 0, 1
10922 ret
10923 ENDPROC(aesni_ctr_enc)
10924
10925@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
10926 pxor INC, STATE4
10927 movdqu STATE4, 0x70(OUTP)
10928
10929+ pax_force_retaddr 0, 1
10930 ret
10931 ENDPROC(aesni_xts_crypt8)
10932
10933diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10934index 246c670..4d1ed00 100644
10935--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10936+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10937@@ -21,6 +21,7 @@
10938 */
10939
10940 #include <linux/linkage.h>
10941+#include <asm/alternative-asm.h>
10942
10943 .file "blowfish-x86_64-asm.S"
10944 .text
10945@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
10946 jnz .L__enc_xor;
10947
10948 write_block();
10949+ pax_force_retaddr 0, 1
10950 ret;
10951 .L__enc_xor:
10952 xor_block();
10953+ pax_force_retaddr 0, 1
10954 ret;
10955 ENDPROC(__blowfish_enc_blk)
10956
10957@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
10958
10959 movq %r11, %rbp;
10960
10961+ pax_force_retaddr 0, 1
10962 ret;
10963 ENDPROC(blowfish_dec_blk)
10964
10965@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
10966
10967 popq %rbx;
10968 popq %rbp;
10969+ pax_force_retaddr 0, 1
10970 ret;
10971
10972 .L__enc_xor4:
10973@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
10974
10975 popq %rbx;
10976 popq %rbp;
10977+ pax_force_retaddr 0, 1
10978 ret;
10979 ENDPROC(__blowfish_enc_blk_4way)
10980
10981@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
10982 popq %rbx;
10983 popq %rbp;
10984
10985+ pax_force_retaddr 0, 1
10986 ret;
10987 ENDPROC(blowfish_dec_blk_4way)
10988diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
10989index 310319c..ce174a4 100644
10990--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
10991+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
10992@@ -21,6 +21,7 @@
10993 */
10994
10995 #include <linux/linkage.h>
10996+#include <asm/alternative-asm.h>
10997
10998 .file "camellia-x86_64-asm_64.S"
10999 .text
11000@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
11001 enc_outunpack(mov, RT1);
11002
11003 movq RRBP, %rbp;
11004+ pax_force_retaddr 0, 1
11005 ret;
11006
11007 .L__enc_xor:
11008 enc_outunpack(xor, RT1);
11009
11010 movq RRBP, %rbp;
11011+ pax_force_retaddr 0, 1
11012 ret;
11013 ENDPROC(__camellia_enc_blk)
11014
11015@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
11016 dec_outunpack();
11017
11018 movq RRBP, %rbp;
11019+ pax_force_retaddr 0, 1
11020 ret;
11021 ENDPROC(camellia_dec_blk)
11022
11023@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
11024
11025 movq RRBP, %rbp;
11026 popq %rbx;
11027+ pax_force_retaddr 0, 1
11028 ret;
11029
11030 .L__enc2_xor:
11031@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
11032
11033 movq RRBP, %rbp;
11034 popq %rbx;
11035+ pax_force_retaddr 0, 1
11036 ret;
11037 ENDPROC(__camellia_enc_blk_2way)
11038
11039@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
11040
11041 movq RRBP, %rbp;
11042 movq RXOR, %rbx;
11043+ pax_force_retaddr 0, 1
11044 ret;
11045 ENDPROC(camellia_dec_blk_2way)
11046diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11047index c35fd5d..c1ee236 100644
11048--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11049+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11050@@ -24,6 +24,7 @@
11051 */
11052
11053 #include <linux/linkage.h>
11054+#include <asm/alternative-asm.h>
11055
11056 .file "cast5-avx-x86_64-asm_64.S"
11057
11058@@ -281,6 +282,7 @@ __cast5_enc_blk16:
11059 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11060 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11061
11062+ pax_force_retaddr 0, 1
11063 ret;
11064 ENDPROC(__cast5_enc_blk16)
11065
11066@@ -352,6 +354,7 @@ __cast5_dec_blk16:
11067 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11068 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11069
11070+ pax_force_retaddr 0, 1
11071 ret;
11072
11073 .L__skip_dec:
11074@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
11075 vmovdqu RR4, (6*4*4)(%r11);
11076 vmovdqu RL4, (7*4*4)(%r11);
11077
11078+ pax_force_retaddr
11079 ret;
11080 ENDPROC(cast5_ecb_enc_16way)
11081
11082@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
11083 vmovdqu RR4, (6*4*4)(%r11);
11084 vmovdqu RL4, (7*4*4)(%r11);
11085
11086+ pax_force_retaddr
11087 ret;
11088 ENDPROC(cast5_ecb_dec_16way)
11089
11090@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
11091
11092 popq %r12;
11093
11094+ pax_force_retaddr
11095 ret;
11096 ENDPROC(cast5_cbc_dec_16way)
11097
11098@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
11099
11100 popq %r12;
11101
11102+ pax_force_retaddr
11103 ret;
11104 ENDPROC(cast5_ctr_16way)
11105diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11106index e3531f8..18ded3a 100644
11107--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11108+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11109@@ -24,6 +24,7 @@
11110 */
11111
11112 #include <linux/linkage.h>
11113+#include <asm/alternative-asm.h>
11114 #include "glue_helper-asm-avx.S"
11115
11116 .file "cast6-avx-x86_64-asm_64.S"
11117@@ -295,6 +296,7 @@ __cast6_enc_blk8:
11118 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11119 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11120
11121+ pax_force_retaddr 0, 1
11122 ret;
11123 ENDPROC(__cast6_enc_blk8)
11124
11125@@ -340,6 +342,7 @@ __cast6_dec_blk8:
11126 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11127 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11128
11129+ pax_force_retaddr 0, 1
11130 ret;
11131 ENDPROC(__cast6_dec_blk8)
11132
11133@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
11134
11135 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11136
11137+ pax_force_retaddr
11138 ret;
11139 ENDPROC(cast6_ecb_enc_8way)
11140
11141@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
11142
11143 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11144
11145+ pax_force_retaddr
11146 ret;
11147 ENDPROC(cast6_ecb_dec_8way)
11148
11149@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
11150
11151 popq %r12;
11152
11153+ pax_force_retaddr
11154 ret;
11155 ENDPROC(cast6_cbc_dec_8way)
11156
11157@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
11158
11159 popq %r12;
11160
11161+ pax_force_retaddr
11162 ret;
11163 ENDPROC(cast6_ctr_8way)
11164
11165@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
11166 /* dst <= regs xor IVs(in dst) */
11167 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11168
11169+ pax_force_retaddr
11170 ret;
11171 ENDPROC(cast6_xts_enc_8way)
11172
11173@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
11174 /* dst <= regs xor IVs(in dst) */
11175 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11176
11177+ pax_force_retaddr
11178 ret;
11179 ENDPROC(cast6_xts_dec_8way)
11180diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11181index 9279e0b..9270820 100644
11182--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11183+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11184@@ -1,4 +1,5 @@
11185 #include <linux/linkage.h>
11186+#include <asm/alternative-asm.h>
11187
11188 # enter salsa20_encrypt_bytes
11189 ENTRY(salsa20_encrypt_bytes)
11190@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
11191 add %r11,%rsp
11192 mov %rdi,%rax
11193 mov %rsi,%rdx
11194+ pax_force_retaddr 0, 1
11195 ret
11196 # bytesatleast65:
11197 ._bytesatleast65:
11198@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
11199 add %r11,%rsp
11200 mov %rdi,%rax
11201 mov %rsi,%rdx
11202+ pax_force_retaddr
11203 ret
11204 ENDPROC(salsa20_keysetup)
11205
11206@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
11207 add %r11,%rsp
11208 mov %rdi,%rax
11209 mov %rsi,%rdx
11210+ pax_force_retaddr
11211 ret
11212 ENDPROC(salsa20_ivsetup)
11213diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11214index 2f202f4..d9164d6 100644
11215--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11216+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11217@@ -24,6 +24,7 @@
11218 */
11219
11220 #include <linux/linkage.h>
11221+#include <asm/alternative-asm.h>
11222 #include "glue_helper-asm-avx.S"
11223
11224 .file "serpent-avx-x86_64-asm_64.S"
11225@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
11226 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11227 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11228
11229+ pax_force_retaddr
11230 ret;
11231 ENDPROC(__serpent_enc_blk8_avx)
11232
11233@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
11234 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11235 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11236
11237+ pax_force_retaddr
11238 ret;
11239 ENDPROC(__serpent_dec_blk8_avx)
11240
11241@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
11242
11243 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11244
11245+ pax_force_retaddr
11246 ret;
11247 ENDPROC(serpent_ecb_enc_8way_avx)
11248
11249@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
11250
11251 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11252
11253+ pax_force_retaddr
11254 ret;
11255 ENDPROC(serpent_ecb_dec_8way_avx)
11256
11257@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
11258
11259 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11260
11261+ pax_force_retaddr
11262 ret;
11263 ENDPROC(serpent_cbc_dec_8way_avx)
11264
11265@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
11266
11267 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11268
11269+ pax_force_retaddr
11270 ret;
11271 ENDPROC(serpent_ctr_8way_avx)
11272
11273@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
11274 /* dst <= regs xor IVs(in dst) */
11275 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11276
11277+ pax_force_retaddr
11278 ret;
11279 ENDPROC(serpent_xts_enc_8way_avx)
11280
11281@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
11282 /* dst <= regs xor IVs(in dst) */
11283 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11284
11285+ pax_force_retaddr
11286 ret;
11287 ENDPROC(serpent_xts_dec_8way_avx)
11288diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11289index acc066c..1559cc4 100644
11290--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11291+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11292@@ -25,6 +25,7 @@
11293 */
11294
11295 #include <linux/linkage.h>
11296+#include <asm/alternative-asm.h>
11297
11298 .file "serpent-sse2-x86_64-asm_64.S"
11299 .text
11300@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
11301 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11302 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11303
11304+ pax_force_retaddr
11305 ret;
11306
11307 .L__enc_xor8:
11308 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11309 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11310
11311+ pax_force_retaddr
11312 ret;
11313 ENDPROC(__serpent_enc_blk_8way)
11314
11315@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
11316 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11317 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11318
11319+ pax_force_retaddr
11320 ret;
11321 ENDPROC(serpent_dec_blk_8way)
11322diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11323index a410950..3356d42 100644
11324--- a/arch/x86/crypto/sha1_ssse3_asm.S
11325+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11326@@ -29,6 +29,7 @@
11327 */
11328
11329 #include <linux/linkage.h>
11330+#include <asm/alternative-asm.h>
11331
11332 #define CTX %rdi // arg1
11333 #define BUF %rsi // arg2
11334@@ -104,6 +105,7 @@
11335 pop %r12
11336 pop %rbp
11337 pop %rbx
11338+ pax_force_retaddr 0, 1
11339 ret
11340
11341 ENDPROC(\name)
11342diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11343index 0505813..63b1d00 100644
11344--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11345+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11346@@ -24,6 +24,7 @@
11347 */
11348
11349 #include <linux/linkage.h>
11350+#include <asm/alternative-asm.h>
11351 #include "glue_helper-asm-avx.S"
11352
11353 .file "twofish-avx-x86_64-asm_64.S"
11354@@ -284,6 +285,7 @@ __twofish_enc_blk8:
11355 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11356 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11357
11358+ pax_force_retaddr 0, 1
11359 ret;
11360 ENDPROC(__twofish_enc_blk8)
11361
11362@@ -324,6 +326,7 @@ __twofish_dec_blk8:
11363 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11364 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11365
11366+ pax_force_retaddr 0, 1
11367 ret;
11368 ENDPROC(__twofish_dec_blk8)
11369
11370@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
11371
11372 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11373
11374+ pax_force_retaddr 0, 1
11375 ret;
11376 ENDPROC(twofish_ecb_enc_8way)
11377
11378@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
11379
11380 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11381
11382+ pax_force_retaddr 0, 1
11383 ret;
11384 ENDPROC(twofish_ecb_dec_8way)
11385
11386@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
11387
11388 popq %r12;
11389
11390+ pax_force_retaddr 0, 1
11391 ret;
11392 ENDPROC(twofish_cbc_dec_8way)
11393
11394@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
11395
11396 popq %r12;
11397
11398+ pax_force_retaddr 0, 1
11399 ret;
11400 ENDPROC(twofish_ctr_8way)
11401
11402@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
11403 /* dst <= regs xor IVs(in dst) */
11404 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11405
11406+ pax_force_retaddr 0, 1
11407 ret;
11408 ENDPROC(twofish_xts_enc_8way)
11409
11410@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
11411 /* dst <= regs xor IVs(in dst) */
11412 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11413
11414+ pax_force_retaddr 0, 1
11415 ret;
11416 ENDPROC(twofish_xts_dec_8way)
11417diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11418index 1c3b7ce..b365c5e 100644
11419--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11420+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11421@@ -21,6 +21,7 @@
11422 */
11423
11424 #include <linux/linkage.h>
11425+#include <asm/alternative-asm.h>
11426
11427 .file "twofish-x86_64-asm-3way.S"
11428 .text
11429@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
11430 popq %r13;
11431 popq %r14;
11432 popq %r15;
11433+ pax_force_retaddr 0, 1
11434 ret;
11435
11436 .L__enc_xor3:
11437@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
11438 popq %r13;
11439 popq %r14;
11440 popq %r15;
11441+ pax_force_retaddr 0, 1
11442 ret;
11443 ENDPROC(__twofish_enc_blk_3way)
11444
11445@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
11446 popq %r13;
11447 popq %r14;
11448 popq %r15;
11449+ pax_force_retaddr 0, 1
11450 ret;
11451 ENDPROC(twofish_dec_blk_3way)
11452diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11453index a039d21..29e7615 100644
11454--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11455+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11456@@ -22,6 +22,7 @@
11457
11458 #include <linux/linkage.h>
11459 #include <asm/asm-offsets.h>
11460+#include <asm/alternative-asm.h>
11461
11462 #define a_offset 0
11463 #define b_offset 4
11464@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
11465
11466 popq R1
11467 movq $1,%rax
11468+ pax_force_retaddr 0, 1
11469 ret
11470 ENDPROC(twofish_enc_blk)
11471
11472@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
11473
11474 popq R1
11475 movq $1,%rax
11476+ pax_force_retaddr 0, 1
11477 ret
11478 ENDPROC(twofish_dec_blk)
11479diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11480index 52ff81c..98af645 100644
11481--- a/arch/x86/ia32/ia32_aout.c
11482+++ b/arch/x86/ia32/ia32_aout.c
11483@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11484 unsigned long dump_start, dump_size;
11485 struct user32 dump;
11486
11487+ memset(&dump, 0, sizeof(dump));
11488+
11489 fs = get_fs();
11490 set_fs(KERNEL_DS);
11491 has_dumped = 1;
11492diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11493index cf1a471..3bc4cf8 100644
11494--- a/arch/x86/ia32/ia32_signal.c
11495+++ b/arch/x86/ia32/ia32_signal.c
11496@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
11497 sp -= frame_size;
11498 /* Align the stack pointer according to the i386 ABI,
11499 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11500- sp = ((sp + 4) & -16ul) - 4;
11501+ sp = ((sp - 12) & -16ul) - 4;
11502 return (void __user *) sp;
11503 }
11504
11505@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
11506 * These are actually not used anymore, but left because some
11507 * gdb versions depend on them as a marker.
11508 */
11509- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11510+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11511 } put_user_catch(err);
11512
11513 if (err)
11514@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11515 0xb8,
11516 __NR_ia32_rt_sigreturn,
11517 0x80cd,
11518- 0,
11519+ 0
11520 };
11521
11522 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
11523@@ -463,16 +463,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11524
11525 if (ksig->ka.sa.sa_flags & SA_RESTORER)
11526 restorer = ksig->ka.sa.sa_restorer;
11527+ else if (current->mm->context.vdso)
11528+ /* Return stub is in 32bit vsyscall page */
11529+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11530 else
11531- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11532- rt_sigreturn);
11533+ restorer = &frame->retcode;
11534 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11535
11536 /*
11537 * Not actually used anymore, but left because some gdb
11538 * versions need it.
11539 */
11540- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11541+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11542 } put_user_catch(err);
11543
11544 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
11545diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11546index 474dc1b..24aaa3e 100644
11547--- a/arch/x86/ia32/ia32entry.S
11548+++ b/arch/x86/ia32/ia32entry.S
11549@@ -15,8 +15,10 @@
11550 #include <asm/irqflags.h>
11551 #include <asm/asm.h>
11552 #include <asm/smap.h>
11553+#include <asm/pgtable.h>
11554 #include <linux/linkage.h>
11555 #include <linux/err.h>
11556+#include <asm/alternative-asm.h>
11557
11558 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11559 #include <linux/elf-em.h>
11560@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11561 ENDPROC(native_irq_enable_sysexit)
11562 #endif
11563
11564+ .macro pax_enter_kernel_user
11565+ pax_set_fptr_mask
11566+#ifdef CONFIG_PAX_MEMORY_UDEREF
11567+ call pax_enter_kernel_user
11568+#endif
11569+ .endm
11570+
11571+ .macro pax_exit_kernel_user
11572+#ifdef CONFIG_PAX_MEMORY_UDEREF
11573+ call pax_exit_kernel_user
11574+#endif
11575+#ifdef CONFIG_PAX_RANDKSTACK
11576+ pushq %rax
11577+ pushq %r11
11578+ call pax_randomize_kstack
11579+ popq %r11
11580+ popq %rax
11581+#endif
11582+ .endm
11583+
11584+ .macro pax_erase_kstack
11585+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11586+ call pax_erase_kstack
11587+#endif
11588+ .endm
11589+
11590 /*
11591 * 32bit SYSENTER instruction entry.
11592 *
11593@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11594 CFI_REGISTER rsp,rbp
11595 SWAPGS_UNSAFE_STACK
11596 movq PER_CPU_VAR(kernel_stack), %rsp
11597- addq $(KERNEL_STACK_OFFSET),%rsp
11598- /*
11599- * No need to follow this irqs on/off section: the syscall
11600- * disabled irqs, here we enable it straight after entry:
11601- */
11602- ENABLE_INTERRUPTS(CLBR_NONE)
11603 movl %ebp,%ebp /* zero extension */
11604 pushq_cfi $__USER32_DS
11605 /*CFI_REL_OFFSET ss,0*/
11606@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11607 CFI_REL_OFFSET rsp,0
11608 pushfq_cfi
11609 /*CFI_REL_OFFSET rflags,0*/
11610- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11611- CFI_REGISTER rip,r10
11612+ orl $X86_EFLAGS_IF,(%rsp)
11613+ GET_THREAD_INFO(%r11)
11614+ movl TI_sysenter_return(%r11), %r11d
11615+ CFI_REGISTER rip,r11
11616 pushq_cfi $__USER32_CS
11617 /*CFI_REL_OFFSET cs,0*/
11618 movl %eax, %eax
11619- pushq_cfi %r10
11620+ pushq_cfi %r11
11621 CFI_REL_OFFSET rip,0
11622 pushq_cfi %rax
11623 cld
11624 SAVE_ARGS 0,1,0
11625+ pax_enter_kernel_user
11626+
11627+#ifdef CONFIG_PAX_RANDKSTACK
11628+ pax_erase_kstack
11629+#endif
11630+
11631+ /*
11632+ * No need to follow this irqs on/off section: the syscall
11633+ * disabled irqs, here we enable it straight after entry:
11634+ */
11635+ ENABLE_INTERRUPTS(CLBR_NONE)
11636 /* no need to do an access_ok check here because rbp has been
11637 32bit zero extended */
11638+
11639+#ifdef CONFIG_PAX_MEMORY_UDEREF
11640+ mov pax_user_shadow_base,%r11
11641+ add %r11,%rbp
11642+#endif
11643+
11644 ASM_STAC
11645 1: movl (%rbp),%ebp
11646 _ASM_EXTABLE(1b,ia32_badarg)
11647 ASM_CLAC
11648- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11649- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11650+ GET_THREAD_INFO(%r11)
11651+ orl $TS_COMPAT,TI_status(%r11)
11652+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11653 CFI_REMEMBER_STATE
11654 jnz sysenter_tracesys
11655 cmpq $(IA32_NR_syscalls-1),%rax
11656@@ -162,12 +204,15 @@ sysenter_do_call:
11657 sysenter_dispatch:
11658 call *ia32_sys_call_table(,%rax,8)
11659 movq %rax,RAX-ARGOFFSET(%rsp)
11660+ GET_THREAD_INFO(%r11)
11661 DISABLE_INTERRUPTS(CLBR_NONE)
11662 TRACE_IRQS_OFF
11663- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11664+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11665 jnz sysexit_audit
11666 sysexit_from_sys_call:
11667- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11668+ pax_exit_kernel_user
11669+ pax_erase_kstack
11670+ andl $~TS_COMPAT,TI_status(%r11)
11671 /* clear IF, that popfq doesn't enable interrupts early */
11672 andl $~0x200,EFLAGS-R11(%rsp)
11673 movl RIP-R11(%rsp),%edx /* User %eip */
11674@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11675 movl %eax,%esi /* 2nd arg: syscall number */
11676 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11677 call __audit_syscall_entry
11678+
11679+ pax_erase_kstack
11680+
11681 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11682 cmpq $(IA32_NR_syscalls-1),%rax
11683 ja ia32_badsys
11684@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11685 .endm
11686
11687 .macro auditsys_exit exit
11688- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11689+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11690 jnz ia32_ret_from_sys_call
11691 TRACE_IRQS_ON
11692 ENABLE_INTERRUPTS(CLBR_NONE)
11693@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11694 1: setbe %al /* 1 if error, 0 if not */
11695 movzbl %al,%edi /* zero-extend that into %edi */
11696 call __audit_syscall_exit
11697+ GET_THREAD_INFO(%r11)
11698 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11699 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11700 DISABLE_INTERRUPTS(CLBR_NONE)
11701 TRACE_IRQS_OFF
11702- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11703+ testl %edi,TI_flags(%r11)
11704 jz \exit
11705 CLEAR_RREGS -ARGOFFSET
11706 jmp int_with_check
11707@@ -237,7 +286,7 @@ sysexit_audit:
11708
11709 sysenter_tracesys:
11710 #ifdef CONFIG_AUDITSYSCALL
11711- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11712+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11713 jz sysenter_auditsys
11714 #endif
11715 SAVE_REST
11716@@ -249,6 +298,9 @@ sysenter_tracesys:
11717 RESTORE_REST
11718 cmpq $(IA32_NR_syscalls-1),%rax
11719 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11720+
11721+ pax_erase_kstack
11722+
11723 jmp sysenter_do_call
11724 CFI_ENDPROC
11725 ENDPROC(ia32_sysenter_target)
11726@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11727 ENTRY(ia32_cstar_target)
11728 CFI_STARTPROC32 simple
11729 CFI_SIGNAL_FRAME
11730- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11731+ CFI_DEF_CFA rsp,0
11732 CFI_REGISTER rip,rcx
11733 /*CFI_REGISTER rflags,r11*/
11734 SWAPGS_UNSAFE_STACK
11735 movl %esp,%r8d
11736 CFI_REGISTER rsp,r8
11737 movq PER_CPU_VAR(kernel_stack),%rsp
11738+ SAVE_ARGS 8*6,0,0
11739+ pax_enter_kernel_user
11740+
11741+#ifdef CONFIG_PAX_RANDKSTACK
11742+ pax_erase_kstack
11743+#endif
11744+
11745 /*
11746 * No need to follow this irqs on/off section: the syscall
11747 * disabled irqs and here we enable it straight after entry:
11748 */
11749 ENABLE_INTERRUPTS(CLBR_NONE)
11750- SAVE_ARGS 8,0,0
11751 movl %eax,%eax /* zero extension */
11752 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11753 movq %rcx,RIP-ARGOFFSET(%rsp)
11754@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11755 /* no need to do an access_ok check here because r8 has been
11756 32bit zero extended */
11757 /* hardware stack frame is complete now */
11758+
11759+#ifdef CONFIG_PAX_MEMORY_UDEREF
11760+ mov pax_user_shadow_base,%r11
11761+ add %r11,%r8
11762+#endif
11763+
11764 ASM_STAC
11765 1: movl (%r8),%r9d
11766 _ASM_EXTABLE(1b,ia32_badarg)
11767 ASM_CLAC
11768- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11769- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11770+ GET_THREAD_INFO(%r11)
11771+ orl $TS_COMPAT,TI_status(%r11)
11772+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11773 CFI_REMEMBER_STATE
11774 jnz cstar_tracesys
11775 cmpq $IA32_NR_syscalls-1,%rax
11776@@ -319,12 +384,15 @@ cstar_do_call:
11777 cstar_dispatch:
11778 call *ia32_sys_call_table(,%rax,8)
11779 movq %rax,RAX-ARGOFFSET(%rsp)
11780+ GET_THREAD_INFO(%r11)
11781 DISABLE_INTERRUPTS(CLBR_NONE)
11782 TRACE_IRQS_OFF
11783- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11784+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11785 jnz sysretl_audit
11786 sysretl_from_sys_call:
11787- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11788+ pax_exit_kernel_user
11789+ pax_erase_kstack
11790+ andl $~TS_COMPAT,TI_status(%r11)
11791 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11792 movl RIP-ARGOFFSET(%rsp),%ecx
11793 CFI_REGISTER rip,rcx
11794@@ -352,7 +420,7 @@ sysretl_audit:
11795
11796 cstar_tracesys:
11797 #ifdef CONFIG_AUDITSYSCALL
11798- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11799+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11800 jz cstar_auditsys
11801 #endif
11802 xchgl %r9d,%ebp
11803@@ -366,6 +434,9 @@ cstar_tracesys:
11804 xchgl %ebp,%r9d
11805 cmpq $(IA32_NR_syscalls-1),%rax
11806 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11807+
11808+ pax_erase_kstack
11809+
11810 jmp cstar_do_call
11811 END(ia32_cstar_target)
11812
11813@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11814 CFI_REL_OFFSET rip,RIP-RIP
11815 PARAVIRT_ADJUST_EXCEPTION_FRAME
11816 SWAPGS
11817- /*
11818- * No need to follow this irqs on/off section: the syscall
11819- * disabled irqs and here we enable it straight after entry:
11820- */
11821- ENABLE_INTERRUPTS(CLBR_NONE)
11822 movl %eax,%eax
11823 pushq_cfi %rax
11824 cld
11825 /* note the registers are not zero extended to the sf.
11826 this could be a problem. */
11827 SAVE_ARGS 0,1,0
11828- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11829- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11830+ pax_enter_kernel_user
11831+
11832+#ifdef CONFIG_PAX_RANDKSTACK
11833+ pax_erase_kstack
11834+#endif
11835+
11836+ /*
11837+ * No need to follow this irqs on/off section: the syscall
11838+ * disabled irqs and here we enable it straight after entry:
11839+ */
11840+ ENABLE_INTERRUPTS(CLBR_NONE)
11841+ GET_THREAD_INFO(%r11)
11842+ orl $TS_COMPAT,TI_status(%r11)
11843+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11844 jnz ia32_tracesys
11845 cmpq $(IA32_NR_syscalls-1),%rax
11846 ja ia32_badsys
11847@@ -442,6 +520,9 @@ ia32_tracesys:
11848 RESTORE_REST
11849 cmpq $(IA32_NR_syscalls-1),%rax
11850 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11851+
11852+ pax_erase_kstack
11853+
11854 jmp ia32_do_call
11855 END(ia32_syscall)
11856
11857diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11858index 8e0ceec..af13504 100644
11859--- a/arch/x86/ia32/sys_ia32.c
11860+++ b/arch/x86/ia32/sys_ia32.c
11861@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11862 */
11863 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11864 {
11865- typeof(ubuf->st_uid) uid = 0;
11866- typeof(ubuf->st_gid) gid = 0;
11867+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
11868+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
11869 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11870 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11871 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11872diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11873index 372231c..a5aa1a1 100644
11874--- a/arch/x86/include/asm/alternative-asm.h
11875+++ b/arch/x86/include/asm/alternative-asm.h
11876@@ -18,6 +18,45 @@
11877 .endm
11878 #endif
11879
11880+#ifdef KERNEXEC_PLUGIN
11881+ .macro pax_force_retaddr_bts rip=0
11882+ btsq $63,\rip(%rsp)
11883+ .endm
11884+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11885+ .macro pax_force_retaddr rip=0, reload=0
11886+ btsq $63,\rip(%rsp)
11887+ .endm
11888+ .macro pax_force_fptr ptr
11889+ btsq $63,\ptr
11890+ .endm
11891+ .macro pax_set_fptr_mask
11892+ .endm
11893+#endif
11894+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11895+ .macro pax_force_retaddr rip=0, reload=0
11896+ .if \reload
11897+ pax_set_fptr_mask
11898+ .endif
11899+ orq %r10,\rip(%rsp)
11900+ .endm
11901+ .macro pax_force_fptr ptr
11902+ orq %r10,\ptr
11903+ .endm
11904+ .macro pax_set_fptr_mask
11905+ movabs $0x8000000000000000,%r10
11906+ .endm
11907+#endif
11908+#else
11909+ .macro pax_force_retaddr rip=0, reload=0
11910+ .endm
11911+ .macro pax_force_fptr ptr
11912+ .endm
11913+ .macro pax_force_retaddr_bts rip=0
11914+ .endm
11915+ .macro pax_set_fptr_mask
11916+ .endm
11917+#endif
11918+
11919 .macro altinstruction_entry orig alt feature orig_len alt_len
11920 .long \orig - .
11921 .long \alt - .
11922diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11923index 58ed6d9..f1cbe58 100644
11924--- a/arch/x86/include/asm/alternative.h
11925+++ b/arch/x86/include/asm/alternative.h
11926@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11927 ".pushsection .discard,\"aw\",@progbits\n" \
11928 DISCARD_ENTRY(1) \
11929 ".popsection\n" \
11930- ".pushsection .altinstr_replacement, \"ax\"\n" \
11931+ ".pushsection .altinstr_replacement, \"a\"\n" \
11932 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11933 ".popsection"
11934
11935@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11936 DISCARD_ENTRY(1) \
11937 DISCARD_ENTRY(2) \
11938 ".popsection\n" \
11939- ".pushsection .altinstr_replacement, \"ax\"\n" \
11940+ ".pushsection .altinstr_replacement, \"a\"\n" \
11941 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11942 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11943 ".popsection"
11944diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11945index 3388034..050f0b9 100644
11946--- a/arch/x86/include/asm/apic.h
11947+++ b/arch/x86/include/asm/apic.h
11948@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11949
11950 #ifdef CONFIG_X86_LOCAL_APIC
11951
11952-extern unsigned int apic_verbosity;
11953+extern int apic_verbosity;
11954 extern int local_apic_timer_c2_ok;
11955
11956 extern int disable_apic;
11957diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11958index 20370c6..a2eb9b0 100644
11959--- a/arch/x86/include/asm/apm.h
11960+++ b/arch/x86/include/asm/apm.h
11961@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11962 __asm__ __volatile__(APM_DO_ZERO_SEGS
11963 "pushl %%edi\n\t"
11964 "pushl %%ebp\n\t"
11965- "lcall *%%cs:apm_bios_entry\n\t"
11966+ "lcall *%%ss:apm_bios_entry\n\t"
11967 "setc %%al\n\t"
11968 "popl %%ebp\n\t"
11969 "popl %%edi\n\t"
11970@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11971 __asm__ __volatile__(APM_DO_ZERO_SEGS
11972 "pushl %%edi\n\t"
11973 "pushl %%ebp\n\t"
11974- "lcall *%%cs:apm_bios_entry\n\t"
11975+ "lcall *%%ss:apm_bios_entry\n\t"
11976 "setc %%bl\n\t"
11977 "popl %%ebp\n\t"
11978 "popl %%edi\n\t"
11979diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11980index 722aa3b..3a0bb27 100644
11981--- a/arch/x86/include/asm/atomic.h
11982+++ b/arch/x86/include/asm/atomic.h
11983@@ -22,7 +22,18 @@
11984 */
11985 static inline int atomic_read(const atomic_t *v)
11986 {
11987- return (*(volatile int *)&(v)->counter);
11988+ return (*(volatile const int *)&(v)->counter);
11989+}
11990+
11991+/**
11992+ * atomic_read_unchecked - read atomic variable
11993+ * @v: pointer of type atomic_unchecked_t
11994+ *
11995+ * Atomically reads the value of @v.
11996+ */
11997+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
11998+{
11999+ return (*(volatile const int *)&(v)->counter);
12000 }
12001
12002 /**
12003@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12004 }
12005
12006 /**
12007+ * atomic_set_unchecked - set atomic variable
12008+ * @v: pointer of type atomic_unchecked_t
12009+ * @i: required value
12010+ *
12011+ * Atomically sets the value of @v to @i.
12012+ */
12013+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12014+{
12015+ v->counter = i;
12016+}
12017+
12018+/**
12019 * atomic_add - add integer to atomic variable
12020 * @i: integer value to add
12021 * @v: pointer of type atomic_t
12022@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12023 */
12024 static inline void atomic_add(int i, atomic_t *v)
12025 {
12026- asm volatile(LOCK_PREFIX "addl %1,%0"
12027+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12028+
12029+#ifdef CONFIG_PAX_REFCOUNT
12030+ "jno 0f\n"
12031+ LOCK_PREFIX "subl %1,%0\n"
12032+ "int $4\n0:\n"
12033+ _ASM_EXTABLE(0b, 0b)
12034+#endif
12035+
12036+ : "+m" (v->counter)
12037+ : "ir" (i));
12038+}
12039+
12040+/**
12041+ * atomic_add_unchecked - add integer to atomic variable
12042+ * @i: integer value to add
12043+ * @v: pointer of type atomic_unchecked_t
12044+ *
12045+ * Atomically adds @i to @v.
12046+ */
12047+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12048+{
12049+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12050 : "+m" (v->counter)
12051 : "ir" (i));
12052 }
12053@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12054 */
12055 static inline void atomic_sub(int i, atomic_t *v)
12056 {
12057- asm volatile(LOCK_PREFIX "subl %1,%0"
12058+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12059+
12060+#ifdef CONFIG_PAX_REFCOUNT
12061+ "jno 0f\n"
12062+ LOCK_PREFIX "addl %1,%0\n"
12063+ "int $4\n0:\n"
12064+ _ASM_EXTABLE(0b, 0b)
12065+#endif
12066+
12067+ : "+m" (v->counter)
12068+ : "ir" (i));
12069+}
12070+
12071+/**
12072+ * atomic_sub_unchecked - subtract integer from atomic variable
12073+ * @i: integer value to subtract
12074+ * @v: pointer of type atomic_unchecked_t
12075+ *
12076+ * Atomically subtracts @i from @v.
12077+ */
12078+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12079+{
12080+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12081 : "+m" (v->counter)
12082 : "ir" (i));
12083 }
12084@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12085 {
12086 unsigned char c;
12087
12088- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12089+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12090+
12091+#ifdef CONFIG_PAX_REFCOUNT
12092+ "jno 0f\n"
12093+ LOCK_PREFIX "addl %2,%0\n"
12094+ "int $4\n0:\n"
12095+ _ASM_EXTABLE(0b, 0b)
12096+#endif
12097+
12098+ "sete %1\n"
12099 : "+m" (v->counter), "=qm" (c)
12100 : "ir" (i) : "memory");
12101 return c;
12102@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12103 */
12104 static inline void atomic_inc(atomic_t *v)
12105 {
12106- asm volatile(LOCK_PREFIX "incl %0"
12107+ asm volatile(LOCK_PREFIX "incl %0\n"
12108+
12109+#ifdef CONFIG_PAX_REFCOUNT
12110+ "jno 0f\n"
12111+ LOCK_PREFIX "decl %0\n"
12112+ "int $4\n0:\n"
12113+ _ASM_EXTABLE(0b, 0b)
12114+#endif
12115+
12116+ : "+m" (v->counter));
12117+}
12118+
12119+/**
12120+ * atomic_inc_unchecked - increment atomic variable
12121+ * @v: pointer of type atomic_unchecked_t
12122+ *
12123+ * Atomically increments @v by 1.
12124+ */
12125+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12126+{
12127+ asm volatile(LOCK_PREFIX "incl %0\n"
12128 : "+m" (v->counter));
12129 }
12130
12131@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12132 */
12133 static inline void atomic_dec(atomic_t *v)
12134 {
12135- asm volatile(LOCK_PREFIX "decl %0"
12136+ asm volatile(LOCK_PREFIX "decl %0\n"
12137+
12138+#ifdef CONFIG_PAX_REFCOUNT
12139+ "jno 0f\n"
12140+ LOCK_PREFIX "incl %0\n"
12141+ "int $4\n0:\n"
12142+ _ASM_EXTABLE(0b, 0b)
12143+#endif
12144+
12145+ : "+m" (v->counter));
12146+}
12147+
12148+/**
12149+ * atomic_dec_unchecked - decrement atomic variable
12150+ * @v: pointer of type atomic_unchecked_t
12151+ *
12152+ * Atomically decrements @v by 1.
12153+ */
12154+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12155+{
12156+ asm volatile(LOCK_PREFIX "decl %0\n"
12157 : "+m" (v->counter));
12158 }
12159
12160@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12161 {
12162 unsigned char c;
12163
12164- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12165+ asm volatile(LOCK_PREFIX "decl %0\n"
12166+
12167+#ifdef CONFIG_PAX_REFCOUNT
12168+ "jno 0f\n"
12169+ LOCK_PREFIX "incl %0\n"
12170+ "int $4\n0:\n"
12171+ _ASM_EXTABLE(0b, 0b)
12172+#endif
12173+
12174+ "sete %1\n"
12175 : "+m" (v->counter), "=qm" (c)
12176 : : "memory");
12177 return c != 0;
12178@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12179 {
12180 unsigned char c;
12181
12182- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12183+ asm volatile(LOCK_PREFIX "incl %0\n"
12184+
12185+#ifdef CONFIG_PAX_REFCOUNT
12186+ "jno 0f\n"
12187+ LOCK_PREFIX "decl %0\n"
12188+ "int $4\n0:\n"
12189+ _ASM_EXTABLE(0b, 0b)
12190+#endif
12191+
12192+ "sete %1\n"
12193+ : "+m" (v->counter), "=qm" (c)
12194+ : : "memory");
12195+ return c != 0;
12196+}
12197+
12198+/**
12199+ * atomic_inc_and_test_unchecked - increment and test
12200+ * @v: pointer of type atomic_unchecked_t
12201+ *
12202+ * Atomically increments @v by 1
12203+ * and returns true if the result is zero, or false for all
12204+ * other cases.
12205+ */
12206+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12207+{
12208+ unsigned char c;
12209+
12210+ asm volatile(LOCK_PREFIX "incl %0\n"
12211+ "sete %1\n"
12212 : "+m" (v->counter), "=qm" (c)
12213 : : "memory");
12214 return c != 0;
12215@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12216 {
12217 unsigned char c;
12218
12219- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12220+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12221+
12222+#ifdef CONFIG_PAX_REFCOUNT
12223+ "jno 0f\n"
12224+ LOCK_PREFIX "subl %2,%0\n"
12225+ "int $4\n0:\n"
12226+ _ASM_EXTABLE(0b, 0b)
12227+#endif
12228+
12229+ "sets %1\n"
12230 : "+m" (v->counter), "=qm" (c)
12231 : "ir" (i) : "memory");
12232 return c;
12233@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12234 */
12235 static inline int atomic_add_return(int i, atomic_t *v)
12236 {
12237+ return i + xadd_check_overflow(&v->counter, i);
12238+}
12239+
12240+/**
12241+ * atomic_add_return_unchecked - add integer and return
12242+ * @i: integer value to add
12243+ * @v: pointer of type atomic_unchecked_t
12244+ *
12245+ * Atomically adds @i to @v and returns @i + @v
12246+ */
12247+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12248+{
12249 return i + xadd(&v->counter, i);
12250 }
12251
12252@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12253 }
12254
12255 #define atomic_inc_return(v) (atomic_add_return(1, v))
12256+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12257+{
12258+ return atomic_add_return_unchecked(1, v);
12259+}
12260 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12261
12262 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12263@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12264 return cmpxchg(&v->counter, old, new);
12265 }
12266
12267+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12268+{
12269+ return cmpxchg(&v->counter, old, new);
12270+}
12271+
12272 static inline int atomic_xchg(atomic_t *v, int new)
12273 {
12274 return xchg(&v->counter, new);
12275 }
12276
12277+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12278+{
12279+ return xchg(&v->counter, new);
12280+}
12281+
12282 /**
12283 * __atomic_add_unless - add unless the number is already a given value
12284 * @v: pointer of type atomic_t
12285@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12286 */
12287 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12288 {
12289- int c, old;
12290+ int c, old, new;
12291 c = atomic_read(v);
12292 for (;;) {
12293- if (unlikely(c == (u)))
12294+ if (unlikely(c == u))
12295 break;
12296- old = atomic_cmpxchg((v), c, c + (a));
12297+
12298+ asm volatile("addl %2,%0\n"
12299+
12300+#ifdef CONFIG_PAX_REFCOUNT
12301+ "jno 0f\n"
12302+ "subl %2,%0\n"
12303+ "int $4\n0:\n"
12304+ _ASM_EXTABLE(0b, 0b)
12305+#endif
12306+
12307+ : "=r" (new)
12308+ : "0" (c), "ir" (a));
12309+
12310+ old = atomic_cmpxchg(v, c, new);
12311 if (likely(old == c))
12312 break;
12313 c = old;
12314@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12315 }
12316
12317 /**
12318+ * atomic_inc_not_zero_hint - increment if not null
12319+ * @v: pointer of type atomic_t
12320+ * @hint: probable value of the atomic before the increment
12321+ *
12322+ * This version of atomic_inc_not_zero() gives a hint of probable
12323+ * value of the atomic. This helps processor to not read the memory
12324+ * before doing the atomic read/modify/write cycle, lowering
12325+ * number of bus transactions on some arches.
12326+ *
12327+ * Returns: 0 if increment was not done, 1 otherwise.
12328+ */
12329+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12330+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12331+{
12332+ int val, c = hint, new;
12333+
12334+ /* sanity test, should be removed by compiler if hint is a constant */
12335+ if (!hint)
12336+ return __atomic_add_unless(v, 1, 0);
12337+
12338+ do {
12339+ asm volatile("incl %0\n"
12340+
12341+#ifdef CONFIG_PAX_REFCOUNT
12342+ "jno 0f\n"
12343+ "decl %0\n"
12344+ "int $4\n0:\n"
12345+ _ASM_EXTABLE(0b, 0b)
12346+#endif
12347+
12348+ : "=r" (new)
12349+ : "0" (c));
12350+
12351+ val = atomic_cmpxchg(v, c, new);
12352+ if (val == c)
12353+ return 1;
12354+ c = val;
12355+ } while (c);
12356+
12357+ return 0;
12358+}
12359+
12360+/**
12361 * atomic_inc_short - increment of a short integer
12362 * @v: pointer to type int
12363 *
12364@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12365 #endif
12366
12367 /* These are x86-specific, used by some header files */
12368-#define atomic_clear_mask(mask, addr) \
12369- asm volatile(LOCK_PREFIX "andl %0,%1" \
12370- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12371+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12372+{
12373+ asm volatile(LOCK_PREFIX "andl %1,%0"
12374+ : "+m" (v->counter)
12375+ : "r" (~(mask))
12376+ : "memory");
12377+}
12378
12379-#define atomic_set_mask(mask, addr) \
12380- asm volatile(LOCK_PREFIX "orl %0,%1" \
12381- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12382- : "memory")
12383+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12384+{
12385+ asm volatile(LOCK_PREFIX "andl %1,%0"
12386+ : "+m" (v->counter)
12387+ : "r" (~(mask))
12388+ : "memory");
12389+}
12390+
12391+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12392+{
12393+ asm volatile(LOCK_PREFIX "orl %1,%0"
12394+ : "+m" (v->counter)
12395+ : "r" (mask)
12396+ : "memory");
12397+}
12398+
12399+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12400+{
12401+ asm volatile(LOCK_PREFIX "orl %1,%0"
12402+ : "+m" (v->counter)
12403+ : "r" (mask)
12404+ : "memory");
12405+}
12406
12407 /* Atomic operations are already serializing on x86 */
12408 #define smp_mb__before_atomic_dec() barrier()
12409diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12410index b154de7..aadebd8 100644
12411--- a/arch/x86/include/asm/atomic64_32.h
12412+++ b/arch/x86/include/asm/atomic64_32.h
12413@@ -12,6 +12,14 @@ typedef struct {
12414 u64 __aligned(8) counter;
12415 } atomic64_t;
12416
12417+#ifdef CONFIG_PAX_REFCOUNT
12418+typedef struct {
12419+ u64 __aligned(8) counter;
12420+} atomic64_unchecked_t;
12421+#else
12422+typedef atomic64_t atomic64_unchecked_t;
12423+#endif
12424+
12425 #define ATOMIC64_INIT(val) { (val) }
12426
12427 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12428@@ -37,21 +45,31 @@ typedef struct {
12429 ATOMIC64_DECL_ONE(sym##_386)
12430
12431 ATOMIC64_DECL_ONE(add_386);
12432+ATOMIC64_DECL_ONE(add_unchecked_386);
12433 ATOMIC64_DECL_ONE(sub_386);
12434+ATOMIC64_DECL_ONE(sub_unchecked_386);
12435 ATOMIC64_DECL_ONE(inc_386);
12436+ATOMIC64_DECL_ONE(inc_unchecked_386);
12437 ATOMIC64_DECL_ONE(dec_386);
12438+ATOMIC64_DECL_ONE(dec_unchecked_386);
12439 #endif
12440
12441 #define alternative_atomic64(f, out, in...) \
12442 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12443
12444 ATOMIC64_DECL(read);
12445+ATOMIC64_DECL(read_unchecked);
12446 ATOMIC64_DECL(set);
12447+ATOMIC64_DECL(set_unchecked);
12448 ATOMIC64_DECL(xchg);
12449 ATOMIC64_DECL(add_return);
12450+ATOMIC64_DECL(add_return_unchecked);
12451 ATOMIC64_DECL(sub_return);
12452+ATOMIC64_DECL(sub_return_unchecked);
12453 ATOMIC64_DECL(inc_return);
12454+ATOMIC64_DECL(inc_return_unchecked);
12455 ATOMIC64_DECL(dec_return);
12456+ATOMIC64_DECL(dec_return_unchecked);
12457 ATOMIC64_DECL(dec_if_positive);
12458 ATOMIC64_DECL(inc_not_zero);
12459 ATOMIC64_DECL(add_unless);
12460@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12461 }
12462
12463 /**
12464+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12465+ * @p: pointer to type atomic64_unchecked_t
12466+ * @o: expected value
12467+ * @n: new value
12468+ *
12469+ * Atomically sets @v to @n if it was equal to @o and returns
12470+ * the old value.
12471+ */
12472+
12473+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12474+{
12475+ return cmpxchg64(&v->counter, o, n);
12476+}
12477+
12478+/**
12479 * atomic64_xchg - xchg atomic64 variable
12480 * @v: pointer to type atomic64_t
12481 * @n: value to assign
12482@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12483 }
12484
12485 /**
12486+ * atomic64_set_unchecked - set atomic64 variable
12487+ * @v: pointer to type atomic64_unchecked_t
12488+ * @n: value to assign
12489+ *
12490+ * Atomically sets the value of @v to @n.
12491+ */
12492+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12493+{
12494+ unsigned high = (unsigned)(i >> 32);
12495+ unsigned low = (unsigned)i;
12496+ alternative_atomic64(set, /* no output */,
12497+ "S" (v), "b" (low), "c" (high)
12498+ : "eax", "edx", "memory");
12499+}
12500+
12501+/**
12502 * atomic64_read - read atomic64 variable
12503 * @v: pointer to type atomic64_t
12504 *
12505@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12506 }
12507
12508 /**
12509+ * atomic64_read_unchecked - read atomic64 variable
12510+ * @v: pointer to type atomic64_unchecked_t
12511+ *
12512+ * Atomically reads the value of @v and returns it.
12513+ */
12514+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12515+{
12516+ long long r;
12517+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12518+ return r;
12519+ }
12520+
12521+/**
12522 * atomic64_add_return - add and return
12523 * @i: integer value to add
12524 * @v: pointer to type atomic64_t
12525@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12526 return i;
12527 }
12528
12529+/**
12530+ * atomic64_add_return_unchecked - add and return
12531+ * @i: integer value to add
12532+ * @v: pointer to type atomic64_unchecked_t
12533+ *
12534+ * Atomically adds @i to @v and returns @i + *@v
12535+ */
12536+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12537+{
12538+ alternative_atomic64(add_return_unchecked,
12539+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12540+ ASM_NO_INPUT_CLOBBER("memory"));
12541+ return i;
12542+}
12543+
12544 /*
12545 * Other variants with different arithmetic operators:
12546 */
12547@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12548 return a;
12549 }
12550
12551+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12552+{
12553+ long long a;
12554+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12555+ "S" (v) : "memory", "ecx");
12556+ return a;
12557+}
12558+
12559 static inline long long atomic64_dec_return(atomic64_t *v)
12560 {
12561 long long a;
12562@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12563 }
12564
12565 /**
12566+ * atomic64_add_unchecked - add integer to atomic64 variable
12567+ * @i: integer value to add
12568+ * @v: pointer to type atomic64_unchecked_t
12569+ *
12570+ * Atomically adds @i to @v.
12571+ */
12572+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12573+{
12574+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12575+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12576+ ASM_NO_INPUT_CLOBBER("memory"));
12577+ return i;
12578+}
12579+
12580+/**
12581 * atomic64_sub - subtract the atomic64 variable
12582 * @i: integer value to subtract
12583 * @v: pointer to type atomic64_t
12584diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12585index 0e1cbfc..5623683 100644
12586--- a/arch/x86/include/asm/atomic64_64.h
12587+++ b/arch/x86/include/asm/atomic64_64.h
12588@@ -18,7 +18,19 @@
12589 */
12590 static inline long atomic64_read(const atomic64_t *v)
12591 {
12592- return (*(volatile long *)&(v)->counter);
12593+ return (*(volatile const long *)&(v)->counter);
12594+}
12595+
12596+/**
12597+ * atomic64_read_unchecked - read atomic64 variable
12598+ * @v: pointer of type atomic64_unchecked_t
12599+ *
12600+ * Atomically reads the value of @v.
12601+ * Doesn't imply a read memory barrier.
12602+ */
12603+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12604+{
12605+ return (*(volatile const long *)&(v)->counter);
12606 }
12607
12608 /**
12609@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12610 }
12611
12612 /**
12613+ * atomic64_set_unchecked - set atomic64 variable
12614+ * @v: pointer to type atomic64_unchecked_t
12615+ * @i: required value
12616+ *
12617+ * Atomically sets the value of @v to @i.
12618+ */
12619+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12620+{
12621+ v->counter = i;
12622+}
12623+
12624+/**
12625 * atomic64_add - add integer to atomic64 variable
12626 * @i: integer value to add
12627 * @v: pointer to type atomic64_t
12628@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12629 */
12630 static inline void atomic64_add(long i, atomic64_t *v)
12631 {
12632+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12633+
12634+#ifdef CONFIG_PAX_REFCOUNT
12635+ "jno 0f\n"
12636+ LOCK_PREFIX "subq %1,%0\n"
12637+ "int $4\n0:\n"
12638+ _ASM_EXTABLE(0b, 0b)
12639+#endif
12640+
12641+ : "=m" (v->counter)
12642+ : "er" (i), "m" (v->counter));
12643+}
12644+
12645+/**
12646+ * atomic64_add_unchecked - add integer to atomic64 variable
12647+ * @i: integer value to add
12648+ * @v: pointer to type atomic64_unchecked_t
12649+ *
12650+ * Atomically adds @i to @v.
12651+ */
12652+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12653+{
12654 asm volatile(LOCK_PREFIX "addq %1,%0"
12655 : "=m" (v->counter)
12656 : "er" (i), "m" (v->counter));
12657@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12658 */
12659 static inline void atomic64_sub(long i, atomic64_t *v)
12660 {
12661- asm volatile(LOCK_PREFIX "subq %1,%0"
12662+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12663+
12664+#ifdef CONFIG_PAX_REFCOUNT
12665+ "jno 0f\n"
12666+ LOCK_PREFIX "addq %1,%0\n"
12667+ "int $4\n0:\n"
12668+ _ASM_EXTABLE(0b, 0b)
12669+#endif
12670+
12671+ : "=m" (v->counter)
12672+ : "er" (i), "m" (v->counter));
12673+}
12674+
12675+/**
12676+ * atomic64_sub_unchecked - subtract the atomic64 variable
12677+ * @i: integer value to subtract
12678+ * @v: pointer to type atomic64_unchecked_t
12679+ *
12680+ * Atomically subtracts @i from @v.
12681+ */
12682+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12683+{
12684+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12685 : "=m" (v->counter)
12686 : "er" (i), "m" (v->counter));
12687 }
12688@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12689 {
12690 unsigned char c;
12691
12692- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12693+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12694+
12695+#ifdef CONFIG_PAX_REFCOUNT
12696+ "jno 0f\n"
12697+ LOCK_PREFIX "addq %2,%0\n"
12698+ "int $4\n0:\n"
12699+ _ASM_EXTABLE(0b, 0b)
12700+#endif
12701+
12702+ "sete %1\n"
12703 : "=m" (v->counter), "=qm" (c)
12704 : "er" (i), "m" (v->counter) : "memory");
12705 return c;
12706@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12707 */
12708 static inline void atomic64_inc(atomic64_t *v)
12709 {
12710+ asm volatile(LOCK_PREFIX "incq %0\n"
12711+
12712+#ifdef CONFIG_PAX_REFCOUNT
12713+ "jno 0f\n"
12714+ LOCK_PREFIX "decq %0\n"
12715+ "int $4\n0:\n"
12716+ _ASM_EXTABLE(0b, 0b)
12717+#endif
12718+
12719+ : "=m" (v->counter)
12720+ : "m" (v->counter));
12721+}
12722+
12723+/**
12724+ * atomic64_inc_unchecked - increment atomic64 variable
12725+ * @v: pointer to type atomic64_unchecked_t
12726+ *
12727+ * Atomically increments @v by 1.
12728+ */
12729+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12730+{
12731 asm volatile(LOCK_PREFIX "incq %0"
12732 : "=m" (v->counter)
12733 : "m" (v->counter));
12734@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12735 */
12736 static inline void atomic64_dec(atomic64_t *v)
12737 {
12738- asm volatile(LOCK_PREFIX "decq %0"
12739+ asm volatile(LOCK_PREFIX "decq %0\n"
12740+
12741+#ifdef CONFIG_PAX_REFCOUNT
12742+ "jno 0f\n"
12743+ LOCK_PREFIX "incq %0\n"
12744+ "int $4\n0:\n"
12745+ _ASM_EXTABLE(0b, 0b)
12746+#endif
12747+
12748+ : "=m" (v->counter)
12749+ : "m" (v->counter));
12750+}
12751+
12752+/**
12753+ * atomic64_dec_unchecked - decrement atomic64 variable
12754+ * @v: pointer to type atomic64_t
12755+ *
12756+ * Atomically decrements @v by 1.
12757+ */
12758+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12759+{
12760+ asm volatile(LOCK_PREFIX "decq %0\n"
12761 : "=m" (v->counter)
12762 : "m" (v->counter));
12763 }
12764@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12765 {
12766 unsigned char c;
12767
12768- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12769+ asm volatile(LOCK_PREFIX "decq %0\n"
12770+
12771+#ifdef CONFIG_PAX_REFCOUNT
12772+ "jno 0f\n"
12773+ LOCK_PREFIX "incq %0\n"
12774+ "int $4\n0:\n"
12775+ _ASM_EXTABLE(0b, 0b)
12776+#endif
12777+
12778+ "sete %1\n"
12779 : "=m" (v->counter), "=qm" (c)
12780 : "m" (v->counter) : "memory");
12781 return c != 0;
12782@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12783 {
12784 unsigned char c;
12785
12786- asm volatile(LOCK_PREFIX "incq %0; sete %1"
12787+ asm volatile(LOCK_PREFIX "incq %0\n"
12788+
12789+#ifdef CONFIG_PAX_REFCOUNT
12790+ "jno 0f\n"
12791+ LOCK_PREFIX "decq %0\n"
12792+ "int $4\n0:\n"
12793+ _ASM_EXTABLE(0b, 0b)
12794+#endif
12795+
12796+ "sete %1\n"
12797 : "=m" (v->counter), "=qm" (c)
12798 : "m" (v->counter) : "memory");
12799 return c != 0;
12800@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12801 {
12802 unsigned char c;
12803
12804- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12805+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
12806+
12807+#ifdef CONFIG_PAX_REFCOUNT
12808+ "jno 0f\n"
12809+ LOCK_PREFIX "subq %2,%0\n"
12810+ "int $4\n0:\n"
12811+ _ASM_EXTABLE(0b, 0b)
12812+#endif
12813+
12814+ "sets %1\n"
12815 : "=m" (v->counter), "=qm" (c)
12816 : "er" (i), "m" (v->counter) : "memory");
12817 return c;
12818@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12819 */
12820 static inline long atomic64_add_return(long i, atomic64_t *v)
12821 {
12822+ return i + xadd_check_overflow(&v->counter, i);
12823+}
12824+
12825+/**
12826+ * atomic64_add_return_unchecked - add and return
12827+ * @i: integer value to add
12828+ * @v: pointer to type atomic64_unchecked_t
12829+ *
12830+ * Atomically adds @i to @v and returns @i + @v
12831+ */
12832+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12833+{
12834 return i + xadd(&v->counter, i);
12835 }
12836
12837@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12838 }
12839
12840 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12841+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12842+{
12843+ return atomic64_add_return_unchecked(1, v);
12844+}
12845 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12846
12847 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12848@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12849 return cmpxchg(&v->counter, old, new);
12850 }
12851
12852+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12853+{
12854+ return cmpxchg(&v->counter, old, new);
12855+}
12856+
12857 static inline long atomic64_xchg(atomic64_t *v, long new)
12858 {
12859 return xchg(&v->counter, new);
12860@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12861 */
12862 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12863 {
12864- long c, old;
12865+ long c, old, new;
12866 c = atomic64_read(v);
12867 for (;;) {
12868- if (unlikely(c == (u)))
12869+ if (unlikely(c == u))
12870 break;
12871- old = atomic64_cmpxchg((v), c, c + (a));
12872+
12873+ asm volatile("add %2,%0\n"
12874+
12875+#ifdef CONFIG_PAX_REFCOUNT
12876+ "jno 0f\n"
12877+ "sub %2,%0\n"
12878+ "int $4\n0:\n"
12879+ _ASM_EXTABLE(0b, 0b)
12880+#endif
12881+
12882+ : "=r" (new)
12883+ : "0" (c), "ir" (a));
12884+
12885+ old = atomic64_cmpxchg(v, c, new);
12886 if (likely(old == c))
12887 break;
12888 c = old;
12889 }
12890- return c != (u);
12891+ return c != u;
12892 }
12893
12894 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12895diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12896index 6dfd019..28e188d 100644
12897--- a/arch/x86/include/asm/bitops.h
12898+++ b/arch/x86/include/asm/bitops.h
12899@@ -40,7 +40,7 @@
12900 * a mask operation on a byte.
12901 */
12902 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12903-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12904+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12905 #define CONST_MASK(nr) (1 << ((nr) & 7))
12906
12907 /**
12908@@ -486,7 +486,7 @@ static inline int fls(int x)
12909 * at position 64.
12910 */
12911 #ifdef CONFIG_X86_64
12912-static __always_inline int fls64(__u64 x)
12913+static __always_inline long fls64(__u64 x)
12914 {
12915 int bitpos = -1;
12916 /*
12917diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12918index 4fa687a..60f2d39 100644
12919--- a/arch/x86/include/asm/boot.h
12920+++ b/arch/x86/include/asm/boot.h
12921@@ -6,10 +6,15 @@
12922 #include <uapi/asm/boot.h>
12923
12924 /* Physical address where kernel should be loaded. */
12925-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12926+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12927 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12928 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12929
12930+#ifndef __ASSEMBLY__
12931+extern unsigned char __LOAD_PHYSICAL_ADDR[];
12932+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12933+#endif
12934+
12935 /* Minimum kernel alignment, as a power of two */
12936 #ifdef CONFIG_X86_64
12937 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12938diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12939index 48f99f1..d78ebf9 100644
12940--- a/arch/x86/include/asm/cache.h
12941+++ b/arch/x86/include/asm/cache.h
12942@@ -5,12 +5,13 @@
12943
12944 /* L1 cache line size */
12945 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12946-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12947+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12948
12949 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12950+#define __read_only __attribute__((__section__(".data..read_only")))
12951
12952 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12953-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12954+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12955
12956 #ifdef CONFIG_X86_VSMP
12957 #ifdef CONFIG_SMP
12958diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12959index 9863ee3..4a1f8e1 100644
12960--- a/arch/x86/include/asm/cacheflush.h
12961+++ b/arch/x86/include/asm/cacheflush.h
12962@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12963 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12964
12965 if (pg_flags == _PGMT_DEFAULT)
12966- return -1;
12967+ return ~0UL;
12968 else if (pg_flags == _PGMT_WC)
12969 return _PAGE_CACHE_WC;
12970 else if (pg_flags == _PGMT_UC_MINUS)
12971diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12972index 46fc474..b02b0f9 100644
12973--- a/arch/x86/include/asm/checksum_32.h
12974+++ b/arch/x86/include/asm/checksum_32.h
12975@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12976 int len, __wsum sum,
12977 int *src_err_ptr, int *dst_err_ptr);
12978
12979+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12980+ int len, __wsum sum,
12981+ int *src_err_ptr, int *dst_err_ptr);
12982+
12983+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
12984+ int len, __wsum sum,
12985+ int *src_err_ptr, int *dst_err_ptr);
12986+
12987 /*
12988 * Note: when you get a NULL pointer exception here this means someone
12989 * passed in an incorrect kernel address to one of these functions.
12990@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
12991 int *err_ptr)
12992 {
12993 might_sleep();
12994- return csum_partial_copy_generic((__force void *)src, dst,
12995+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
12996 len, sum, err_ptr, NULL);
12997 }
12998
12999@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13000 {
13001 might_sleep();
13002 if (access_ok(VERIFY_WRITE, dst, len))
13003- return csum_partial_copy_generic(src, (__force void *)dst,
13004+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13005 len, sum, NULL, err_ptr);
13006
13007 if (len)
13008diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13009index d47786a..ce1b05d 100644
13010--- a/arch/x86/include/asm/cmpxchg.h
13011+++ b/arch/x86/include/asm/cmpxchg.h
13012@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13013 __compiletime_error("Bad argument size for cmpxchg");
13014 extern void __xadd_wrong_size(void)
13015 __compiletime_error("Bad argument size for xadd");
13016+extern void __xadd_check_overflow_wrong_size(void)
13017+ __compiletime_error("Bad argument size for xadd_check_overflow");
13018 extern void __add_wrong_size(void)
13019 __compiletime_error("Bad argument size for add");
13020+extern void __add_check_overflow_wrong_size(void)
13021+ __compiletime_error("Bad argument size for add_check_overflow");
13022
13023 /*
13024 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13025@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13026 __ret; \
13027 })
13028
13029+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13030+ ({ \
13031+ __typeof__ (*(ptr)) __ret = (arg); \
13032+ switch (sizeof(*(ptr))) { \
13033+ case __X86_CASE_L: \
13034+ asm volatile (lock #op "l %0, %1\n" \
13035+ "jno 0f\n" \
13036+ "mov %0,%1\n" \
13037+ "int $4\n0:\n" \
13038+ _ASM_EXTABLE(0b, 0b) \
13039+ : "+r" (__ret), "+m" (*(ptr)) \
13040+ : : "memory", "cc"); \
13041+ break; \
13042+ case __X86_CASE_Q: \
13043+ asm volatile (lock #op "q %q0, %1\n" \
13044+ "jno 0f\n" \
13045+ "mov %0,%1\n" \
13046+ "int $4\n0:\n" \
13047+ _ASM_EXTABLE(0b, 0b) \
13048+ : "+r" (__ret), "+m" (*(ptr)) \
13049+ : : "memory", "cc"); \
13050+ break; \
13051+ default: \
13052+ __ ## op ## _check_overflow_wrong_size(); \
13053+ } \
13054+ __ret; \
13055+ })
13056+
13057 /*
13058 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13059 * Since this is generally used to protect other memory information, we
13060@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13061 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13062 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13063
13064+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13065+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13066+
13067 #define __add(ptr, inc, lock) \
13068 ({ \
13069 __typeof__ (*(ptr)) __ret = (inc); \
13070diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13071index 59c6c40..5e0b22c 100644
13072--- a/arch/x86/include/asm/compat.h
13073+++ b/arch/x86/include/asm/compat.h
13074@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13075 typedef u32 compat_uint_t;
13076 typedef u32 compat_ulong_t;
13077 typedef u64 __attribute__((aligned(4))) compat_u64;
13078-typedef u32 compat_uptr_t;
13079+typedef u32 __user compat_uptr_t;
13080
13081 struct compat_timespec {
13082 compat_time_t tv_sec;
13083diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13084index e99ac27..e89e28c 100644
13085--- a/arch/x86/include/asm/cpufeature.h
13086+++ b/arch/x86/include/asm/cpufeature.h
13087@@ -211,7 +211,7 @@
13088 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13089 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13090 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13091-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13092+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13093 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13094 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13095 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13096@@ -394,7 +394,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13097 ".section .discard,\"aw\",@progbits\n"
13098 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13099 ".previous\n"
13100- ".section .altinstr_replacement,\"ax\"\n"
13101+ ".section .altinstr_replacement,\"a\"\n"
13102 "3: movb $1,%0\n"
13103 "4:\n"
13104 ".previous\n"
13105diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13106index 8bf1c06..b6ae785 100644
13107--- a/arch/x86/include/asm/desc.h
13108+++ b/arch/x86/include/asm/desc.h
13109@@ -4,6 +4,7 @@
13110 #include <asm/desc_defs.h>
13111 #include <asm/ldt.h>
13112 #include <asm/mmu.h>
13113+#include <asm/pgtable.h>
13114
13115 #include <linux/smp.h>
13116 #include <linux/percpu.h>
13117@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13118
13119 desc->type = (info->read_exec_only ^ 1) << 1;
13120 desc->type |= info->contents << 2;
13121+ desc->type |= info->seg_not_present ^ 1;
13122
13123 desc->s = 1;
13124 desc->dpl = 0x3;
13125@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13126 }
13127
13128 extern struct desc_ptr idt_descr;
13129-extern gate_desc idt_table[];
13130 extern struct desc_ptr nmi_idt_descr;
13131-extern gate_desc nmi_idt_table[];
13132-
13133-struct gdt_page {
13134- struct desc_struct gdt[GDT_ENTRIES];
13135-} __attribute__((aligned(PAGE_SIZE)));
13136-
13137-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13138+extern gate_desc idt_table[256];
13139+extern gate_desc nmi_idt_table[256];
13140
13141+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13142 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13143 {
13144- return per_cpu(gdt_page, cpu).gdt;
13145+ return cpu_gdt_table[cpu];
13146 }
13147
13148 #ifdef CONFIG_X86_64
13149@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13150 unsigned long base, unsigned dpl, unsigned flags,
13151 unsigned short seg)
13152 {
13153- gate->a = (seg << 16) | (base & 0xffff);
13154- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13155+ gate->gate.offset_low = base;
13156+ gate->gate.seg = seg;
13157+ gate->gate.reserved = 0;
13158+ gate->gate.type = type;
13159+ gate->gate.s = 0;
13160+ gate->gate.dpl = dpl;
13161+ gate->gate.p = 1;
13162+ gate->gate.offset_high = base >> 16;
13163 }
13164
13165 #endif
13166@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13167
13168 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13169 {
13170+ pax_open_kernel();
13171 memcpy(&idt[entry], gate, sizeof(*gate));
13172+ pax_close_kernel();
13173 }
13174
13175 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13176 {
13177+ pax_open_kernel();
13178 memcpy(&ldt[entry], desc, 8);
13179+ pax_close_kernel();
13180 }
13181
13182 static inline void
13183@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13184 default: size = sizeof(*gdt); break;
13185 }
13186
13187+ pax_open_kernel();
13188 memcpy(&gdt[entry], desc, size);
13189+ pax_close_kernel();
13190 }
13191
13192 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13193@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13194
13195 static inline void native_load_tr_desc(void)
13196 {
13197+ pax_open_kernel();
13198 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13199+ pax_close_kernel();
13200 }
13201
13202 static inline void native_load_gdt(const struct desc_ptr *dtr)
13203@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13204 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13205 unsigned int i;
13206
13207+ pax_open_kernel();
13208 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13209 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13210+ pax_close_kernel();
13211 }
13212
13213 #define _LDT_empty(info) \
13214@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13215 preempt_enable();
13216 }
13217
13218-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13219+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13220 {
13221 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13222 }
13223@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13224 }
13225
13226 #ifdef CONFIG_X86_64
13227-static inline void set_nmi_gate(int gate, void *addr)
13228+static inline void set_nmi_gate(int gate, const void *addr)
13229 {
13230 gate_desc s;
13231
13232@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13233 }
13234 #endif
13235
13236-static inline void _set_gate(int gate, unsigned type, void *addr,
13237+static inline void _set_gate(int gate, unsigned type, const void *addr,
13238 unsigned dpl, unsigned ist, unsigned seg)
13239 {
13240 gate_desc s;
13241@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13242 * Pentium F0 0F bugfix can have resulted in the mapped
13243 * IDT being write-protected.
13244 */
13245-static inline void set_intr_gate(unsigned int n, void *addr)
13246+static inline void set_intr_gate(unsigned int n, const void *addr)
13247 {
13248 BUG_ON((unsigned)n > 0xFF);
13249 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13250@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13251 /*
13252 * This routine sets up an interrupt gate at directory privilege level 3.
13253 */
13254-static inline void set_system_intr_gate(unsigned int n, void *addr)
13255+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13256 {
13257 BUG_ON((unsigned)n > 0xFF);
13258 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13259 }
13260
13261-static inline void set_system_trap_gate(unsigned int n, void *addr)
13262+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13263 {
13264 BUG_ON((unsigned)n > 0xFF);
13265 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13266 }
13267
13268-static inline void set_trap_gate(unsigned int n, void *addr)
13269+static inline void set_trap_gate(unsigned int n, const void *addr)
13270 {
13271 BUG_ON((unsigned)n > 0xFF);
13272 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13273@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13274 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13275 {
13276 BUG_ON((unsigned)n > 0xFF);
13277- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13278+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13279 }
13280
13281-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13282+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13283 {
13284 BUG_ON((unsigned)n > 0xFF);
13285 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13286 }
13287
13288-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13289+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13290 {
13291 BUG_ON((unsigned)n > 0xFF);
13292 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13293 }
13294
13295+#ifdef CONFIG_X86_32
13296+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13297+{
13298+ struct desc_struct d;
13299+
13300+ if (likely(limit))
13301+ limit = (limit - 1UL) >> PAGE_SHIFT;
13302+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13303+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13304+}
13305+#endif
13306+
13307 #endif /* _ASM_X86_DESC_H */
13308diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13309index 278441f..b95a174 100644
13310--- a/arch/x86/include/asm/desc_defs.h
13311+++ b/arch/x86/include/asm/desc_defs.h
13312@@ -31,6 +31,12 @@ struct desc_struct {
13313 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13314 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13315 };
13316+ struct {
13317+ u16 offset_low;
13318+ u16 seg;
13319+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13320+ unsigned offset_high: 16;
13321+ } gate;
13322 };
13323 } __attribute__((packed));
13324
13325diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13326index ced283a..ffe04cc 100644
13327--- a/arch/x86/include/asm/div64.h
13328+++ b/arch/x86/include/asm/div64.h
13329@@ -39,7 +39,7 @@
13330 __mod; \
13331 })
13332
13333-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13334+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13335 {
13336 union {
13337 u64 v64;
13338diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13339index 9c999c1..3860cb8 100644
13340--- a/arch/x86/include/asm/elf.h
13341+++ b/arch/x86/include/asm/elf.h
13342@@ -243,7 +243,25 @@ extern int force_personality32;
13343 the loader. We need to make sure that it is out of the way of the program
13344 that it will "exec", and that there is sufficient room for the brk. */
13345
13346+#ifdef CONFIG_PAX_SEGMEXEC
13347+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13348+#else
13349 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13350+#endif
13351+
13352+#ifdef CONFIG_PAX_ASLR
13353+#ifdef CONFIG_X86_32
13354+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13355+
13356+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13357+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13358+#else
13359+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13360+
13361+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13362+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13363+#endif
13364+#endif
13365
13366 /* This yields a mask that user programs can use to figure out what
13367 instruction set this CPU supports. This could be done in user space,
13368@@ -296,16 +314,12 @@ do { \
13369
13370 #define ARCH_DLINFO \
13371 do { \
13372- if (vdso_enabled) \
13373- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13374- (unsigned long)current->mm->context.vdso); \
13375+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13376 } while (0)
13377
13378 #define ARCH_DLINFO_X32 \
13379 do { \
13380- if (vdso_enabled) \
13381- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13382- (unsigned long)current->mm->context.vdso); \
13383+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13384 } while (0)
13385
13386 #define AT_SYSINFO 32
13387@@ -320,7 +334,7 @@ else \
13388
13389 #endif /* !CONFIG_X86_32 */
13390
13391-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13392+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13393
13394 #define VDSO_ENTRY \
13395 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13396@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13397 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13398 #define compat_arch_setup_additional_pages syscall32_setup_pages
13399
13400-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13401-#define arch_randomize_brk arch_randomize_brk
13402-
13403 /*
13404 * True on X86_32 or when emulating IA32 on X86_64
13405 */
13406diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13407index 75ce3f4..882e801 100644
13408--- a/arch/x86/include/asm/emergency-restart.h
13409+++ b/arch/x86/include/asm/emergency-restart.h
13410@@ -13,6 +13,6 @@ enum reboot_type {
13411
13412 extern enum reboot_type reboot_type;
13413
13414-extern void machine_emergency_restart(void);
13415+extern void machine_emergency_restart(void) __noreturn;
13416
13417 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13418diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13419index e25cc33..425d099 100644
13420--- a/arch/x86/include/asm/fpu-internal.h
13421+++ b/arch/x86/include/asm/fpu-internal.h
13422@@ -127,7 +127,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13423 ({ \
13424 int err; \
13425 asm volatile(ASM_STAC "\n" \
13426- "1:" #insn "\n\t" \
13427+ "1:" \
13428+ __copyuser_seg \
13429+ #insn "\n\t" \
13430 "2: " ASM_CLAC "\n" \
13431 ".section .fixup,\"ax\"\n" \
13432 "3: movl $-1,%[err]\n" \
13433@@ -300,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13434 "emms\n\t" /* clear stack tags */
13435 "fildl %P[addr]", /* set F?P to defined value */
13436 X86_FEATURE_FXSAVE_LEAK,
13437- [addr] "m" (tsk->thread.fpu.has_fpu));
13438+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13439
13440 return fpu_restore_checking(&tsk->thread.fpu);
13441 }
13442diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13443index be27ba1..8f13ff9 100644
13444--- a/arch/x86/include/asm/futex.h
13445+++ b/arch/x86/include/asm/futex.h
13446@@ -12,6 +12,7 @@
13447 #include <asm/smap.h>
13448
13449 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13450+ typecheck(u32 __user *, uaddr); \
13451 asm volatile("\t" ASM_STAC "\n" \
13452 "1:\t" insn "\n" \
13453 "2:\t" ASM_CLAC "\n" \
13454@@ -20,15 +21,16 @@
13455 "\tjmp\t2b\n" \
13456 "\t.previous\n" \
13457 _ASM_EXTABLE(1b, 3b) \
13458- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13459+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13460 : "i" (-EFAULT), "0" (oparg), "1" (0))
13461
13462 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13463+ typecheck(u32 __user *, uaddr); \
13464 asm volatile("\t" ASM_STAC "\n" \
13465 "1:\tmovl %2, %0\n" \
13466 "\tmovl\t%0, %3\n" \
13467 "\t" insn "\n" \
13468- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13469+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13470 "\tjnz\t1b\n" \
13471 "3:\t" ASM_CLAC "\n" \
13472 "\t.section .fixup,\"ax\"\n" \
13473@@ -38,7 +40,7 @@
13474 _ASM_EXTABLE(1b, 4b) \
13475 _ASM_EXTABLE(2b, 4b) \
13476 : "=&a" (oldval), "=&r" (ret), \
13477- "+m" (*uaddr), "=&r" (tem) \
13478+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13479 : "r" (oparg), "i" (-EFAULT), "1" (0))
13480
13481 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13482@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13483
13484 switch (op) {
13485 case FUTEX_OP_SET:
13486- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13487+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13488 break;
13489 case FUTEX_OP_ADD:
13490- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13491+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13492 uaddr, oparg);
13493 break;
13494 case FUTEX_OP_OR:
13495@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13496 return -EFAULT;
13497
13498 asm volatile("\t" ASM_STAC "\n"
13499- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13500+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13501 "2:\t" ASM_CLAC "\n"
13502 "\t.section .fixup, \"ax\"\n"
13503 "3:\tmov %3, %0\n"
13504 "\tjmp 2b\n"
13505 "\t.previous\n"
13506 _ASM_EXTABLE(1b, 3b)
13507- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13508+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13509 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13510 : "memory"
13511 );
13512diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13513index 1da97ef..9c2ebff 100644
13514--- a/arch/x86/include/asm/hw_irq.h
13515+++ b/arch/x86/include/asm/hw_irq.h
13516@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
13517 extern void enable_IO_APIC(void);
13518
13519 /* Statistics */
13520-extern atomic_t irq_err_count;
13521-extern atomic_t irq_mis_count;
13522+extern atomic_unchecked_t irq_err_count;
13523+extern atomic_unchecked_t irq_mis_count;
13524
13525 /* EISA */
13526 extern void eisa_set_level_irq(unsigned int irq);
13527diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13528index a203659..9889f1c 100644
13529--- a/arch/x86/include/asm/i8259.h
13530+++ b/arch/x86/include/asm/i8259.h
13531@@ -62,7 +62,7 @@ struct legacy_pic {
13532 void (*init)(int auto_eoi);
13533 int (*irq_pending)(unsigned int irq);
13534 void (*make_irq)(unsigned int irq);
13535-};
13536+} __do_const;
13537
13538 extern struct legacy_pic *legacy_pic;
13539 extern struct legacy_pic null_legacy_pic;
13540diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13541index d8e8eef..1765f78 100644
13542--- a/arch/x86/include/asm/io.h
13543+++ b/arch/x86/include/asm/io.h
13544@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13545 "m" (*(volatile type __force *)addr) barrier); }
13546
13547 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13548-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13549-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13550+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13551+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13552
13553 build_mmio_read(__readb, "b", unsigned char, "=q", )
13554-build_mmio_read(__readw, "w", unsigned short, "=r", )
13555-build_mmio_read(__readl, "l", unsigned int, "=r", )
13556+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13557+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13558
13559 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13560 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13561@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13562 return ioremap_nocache(offset, size);
13563 }
13564
13565-extern void iounmap(volatile void __iomem *addr);
13566+extern void iounmap(const volatile void __iomem *addr);
13567
13568 extern void set_iounmap_nonlazy(void);
13569
13570@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13571
13572 #include <linux/vmalloc.h>
13573
13574+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13575+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13576+{
13577+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13578+}
13579+
13580+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13581+{
13582+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13583+}
13584+
13585 /*
13586 * Convert a virtual cached pointer to an uncached pointer
13587 */
13588diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13589index bba3cf8..06bc8da 100644
13590--- a/arch/x86/include/asm/irqflags.h
13591+++ b/arch/x86/include/asm/irqflags.h
13592@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13593 sti; \
13594 sysexit
13595
13596+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13597+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13598+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13599+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13600+
13601 #else
13602 #define INTERRUPT_RETURN iret
13603 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13604diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13605index 5a6d287..f815789 100644
13606--- a/arch/x86/include/asm/kprobes.h
13607+++ b/arch/x86/include/asm/kprobes.h
13608@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13609 #define RELATIVEJUMP_SIZE 5
13610 #define RELATIVECALL_OPCODE 0xe8
13611 #define RELATIVE_ADDR_SIZE 4
13612-#define MAX_STACK_SIZE 64
13613-#define MIN_STACK_SIZE(ADDR) \
13614- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13615- THREAD_SIZE - (unsigned long)(ADDR))) \
13616- ? (MAX_STACK_SIZE) \
13617- : (((unsigned long)current_thread_info()) + \
13618- THREAD_SIZE - (unsigned long)(ADDR)))
13619+#define MAX_STACK_SIZE 64UL
13620+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13621
13622 #define flush_insn_slot(p) do { } while (0)
13623
13624diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13625index 2d89e39..baee879 100644
13626--- a/arch/x86/include/asm/local.h
13627+++ b/arch/x86/include/asm/local.h
13628@@ -10,33 +10,97 @@ typedef struct {
13629 atomic_long_t a;
13630 } local_t;
13631
13632+typedef struct {
13633+ atomic_long_unchecked_t a;
13634+} local_unchecked_t;
13635+
13636 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13637
13638 #define local_read(l) atomic_long_read(&(l)->a)
13639+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13640 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13641+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13642
13643 static inline void local_inc(local_t *l)
13644 {
13645- asm volatile(_ASM_INC "%0"
13646+ asm volatile(_ASM_INC "%0\n"
13647+
13648+#ifdef CONFIG_PAX_REFCOUNT
13649+ "jno 0f\n"
13650+ _ASM_DEC "%0\n"
13651+ "int $4\n0:\n"
13652+ _ASM_EXTABLE(0b, 0b)
13653+#endif
13654+
13655+ : "+m" (l->a.counter));
13656+}
13657+
13658+static inline void local_inc_unchecked(local_unchecked_t *l)
13659+{
13660+ asm volatile(_ASM_INC "%0\n"
13661 : "+m" (l->a.counter));
13662 }
13663
13664 static inline void local_dec(local_t *l)
13665 {
13666- asm volatile(_ASM_DEC "%0"
13667+ asm volatile(_ASM_DEC "%0\n"
13668+
13669+#ifdef CONFIG_PAX_REFCOUNT
13670+ "jno 0f\n"
13671+ _ASM_INC "%0\n"
13672+ "int $4\n0:\n"
13673+ _ASM_EXTABLE(0b, 0b)
13674+#endif
13675+
13676+ : "+m" (l->a.counter));
13677+}
13678+
13679+static inline void local_dec_unchecked(local_unchecked_t *l)
13680+{
13681+ asm volatile(_ASM_DEC "%0\n"
13682 : "+m" (l->a.counter));
13683 }
13684
13685 static inline void local_add(long i, local_t *l)
13686 {
13687- asm volatile(_ASM_ADD "%1,%0"
13688+ asm volatile(_ASM_ADD "%1,%0\n"
13689+
13690+#ifdef CONFIG_PAX_REFCOUNT
13691+ "jno 0f\n"
13692+ _ASM_SUB "%1,%0\n"
13693+ "int $4\n0:\n"
13694+ _ASM_EXTABLE(0b, 0b)
13695+#endif
13696+
13697+ : "+m" (l->a.counter)
13698+ : "ir" (i));
13699+}
13700+
13701+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13702+{
13703+ asm volatile(_ASM_ADD "%1,%0\n"
13704 : "+m" (l->a.counter)
13705 : "ir" (i));
13706 }
13707
13708 static inline void local_sub(long i, local_t *l)
13709 {
13710- asm volatile(_ASM_SUB "%1,%0"
13711+ asm volatile(_ASM_SUB "%1,%0\n"
13712+
13713+#ifdef CONFIG_PAX_REFCOUNT
13714+ "jno 0f\n"
13715+ _ASM_ADD "%1,%0\n"
13716+ "int $4\n0:\n"
13717+ _ASM_EXTABLE(0b, 0b)
13718+#endif
13719+
13720+ : "+m" (l->a.counter)
13721+ : "ir" (i));
13722+}
13723+
13724+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13725+{
13726+ asm volatile(_ASM_SUB "%1,%0\n"
13727 : "+m" (l->a.counter)
13728 : "ir" (i));
13729 }
13730@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13731 {
13732 unsigned char c;
13733
13734- asm volatile(_ASM_SUB "%2,%0; sete %1"
13735+ asm volatile(_ASM_SUB "%2,%0\n"
13736+
13737+#ifdef CONFIG_PAX_REFCOUNT
13738+ "jno 0f\n"
13739+ _ASM_ADD "%2,%0\n"
13740+ "int $4\n0:\n"
13741+ _ASM_EXTABLE(0b, 0b)
13742+#endif
13743+
13744+ "sete %1\n"
13745 : "+m" (l->a.counter), "=qm" (c)
13746 : "ir" (i) : "memory");
13747 return c;
13748@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13749 {
13750 unsigned char c;
13751
13752- asm volatile(_ASM_DEC "%0; sete %1"
13753+ asm volatile(_ASM_DEC "%0\n"
13754+
13755+#ifdef CONFIG_PAX_REFCOUNT
13756+ "jno 0f\n"
13757+ _ASM_INC "%0\n"
13758+ "int $4\n0:\n"
13759+ _ASM_EXTABLE(0b, 0b)
13760+#endif
13761+
13762+ "sete %1\n"
13763 : "+m" (l->a.counter), "=qm" (c)
13764 : : "memory");
13765 return c != 0;
13766@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13767 {
13768 unsigned char c;
13769
13770- asm volatile(_ASM_INC "%0; sete %1"
13771+ asm volatile(_ASM_INC "%0\n"
13772+
13773+#ifdef CONFIG_PAX_REFCOUNT
13774+ "jno 0f\n"
13775+ _ASM_DEC "%0\n"
13776+ "int $4\n0:\n"
13777+ _ASM_EXTABLE(0b, 0b)
13778+#endif
13779+
13780+ "sete %1\n"
13781 : "+m" (l->a.counter), "=qm" (c)
13782 : : "memory");
13783 return c != 0;
13784@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13785 {
13786 unsigned char c;
13787
13788- asm volatile(_ASM_ADD "%2,%0; sets %1"
13789+ asm volatile(_ASM_ADD "%2,%0\n"
13790+
13791+#ifdef CONFIG_PAX_REFCOUNT
13792+ "jno 0f\n"
13793+ _ASM_SUB "%2,%0\n"
13794+ "int $4\n0:\n"
13795+ _ASM_EXTABLE(0b, 0b)
13796+#endif
13797+
13798+ "sets %1\n"
13799 : "+m" (l->a.counter), "=qm" (c)
13800 : "ir" (i) : "memory");
13801 return c;
13802@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13803 static inline long local_add_return(long i, local_t *l)
13804 {
13805 long __i = i;
13806+ asm volatile(_ASM_XADD "%0, %1\n"
13807+
13808+#ifdef CONFIG_PAX_REFCOUNT
13809+ "jno 0f\n"
13810+ _ASM_MOV "%0,%1\n"
13811+ "int $4\n0:\n"
13812+ _ASM_EXTABLE(0b, 0b)
13813+#endif
13814+
13815+ : "+r" (i), "+m" (l->a.counter)
13816+ : : "memory");
13817+ return i + __i;
13818+}
13819+
13820+/**
13821+ * local_add_return_unchecked - add and return
13822+ * @i: integer value to add
13823+ * @l: pointer to type local_unchecked_t
13824+ *
13825+ * Atomically adds @i to @l and returns @i + @l
13826+ */
13827+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13828+{
13829+ long __i = i;
13830 asm volatile(_ASM_XADD "%0, %1;"
13831 : "+r" (i), "+m" (l->a.counter)
13832 : : "memory");
13833@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13834
13835 #define local_cmpxchg(l, o, n) \
13836 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13837+#define local_cmpxchg_unchecked(l, o, n) \
13838+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
13839 /* Always has a lock prefix */
13840 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13841
13842diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13843new file mode 100644
13844index 0000000..2bfd3ba
13845--- /dev/null
13846+++ b/arch/x86/include/asm/mman.h
13847@@ -0,0 +1,15 @@
13848+#ifndef _X86_MMAN_H
13849+#define _X86_MMAN_H
13850+
13851+#include <uapi/asm/mman.h>
13852+
13853+#ifdef __KERNEL__
13854+#ifndef __ASSEMBLY__
13855+#ifdef CONFIG_X86_32
13856+#define arch_mmap_check i386_mmap_check
13857+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13858+#endif
13859+#endif
13860+#endif
13861+
13862+#endif /* X86_MMAN_H */
13863diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13864index 5f55e69..e20bfb1 100644
13865--- a/arch/x86/include/asm/mmu.h
13866+++ b/arch/x86/include/asm/mmu.h
13867@@ -9,7 +9,7 @@
13868 * we put the segment information here.
13869 */
13870 typedef struct {
13871- void *ldt;
13872+ struct desc_struct *ldt;
13873 int size;
13874
13875 #ifdef CONFIG_X86_64
13876@@ -18,7 +18,19 @@ typedef struct {
13877 #endif
13878
13879 struct mutex lock;
13880- void *vdso;
13881+ unsigned long vdso;
13882+
13883+#ifdef CONFIG_X86_32
13884+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13885+ unsigned long user_cs_base;
13886+ unsigned long user_cs_limit;
13887+
13888+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13889+ cpumask_t cpu_user_cs_mask;
13890+#endif
13891+
13892+#endif
13893+#endif
13894 } mm_context_t;
13895
13896 #ifdef CONFIG_SMP
13897diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13898index cdbf367..adb37ac 100644
13899--- a/arch/x86/include/asm/mmu_context.h
13900+++ b/arch/x86/include/asm/mmu_context.h
13901@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13902
13903 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13904 {
13905+
13906+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13907+ unsigned int i;
13908+ pgd_t *pgd;
13909+
13910+ pax_open_kernel();
13911+ pgd = get_cpu_pgd(smp_processor_id());
13912+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13913+ set_pgd_batched(pgd+i, native_make_pgd(0));
13914+ pax_close_kernel();
13915+#endif
13916+
13917 #ifdef CONFIG_SMP
13918 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13919 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13920@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13921 struct task_struct *tsk)
13922 {
13923 unsigned cpu = smp_processor_id();
13924+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13925+ int tlbstate = TLBSTATE_OK;
13926+#endif
13927
13928 if (likely(prev != next)) {
13929 #ifdef CONFIG_SMP
13930+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13931+ tlbstate = this_cpu_read(cpu_tlbstate.state);
13932+#endif
13933 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13934 this_cpu_write(cpu_tlbstate.active_mm, next);
13935 #endif
13936 cpumask_set_cpu(cpu, mm_cpumask(next));
13937
13938 /* Re-load page tables */
13939+#ifdef CONFIG_PAX_PER_CPU_PGD
13940+ pax_open_kernel();
13941+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13942+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13943+ pax_close_kernel();
13944+ load_cr3(get_cpu_pgd(cpu));
13945+#else
13946 load_cr3(next->pgd);
13947+#endif
13948
13949 /* stop flush ipis for the previous mm */
13950 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13951@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13952 */
13953 if (unlikely(prev->context.ldt != next->context.ldt))
13954 load_LDT_nolock(&next->context);
13955- }
13956+
13957+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13958+ if (!(__supported_pte_mask & _PAGE_NX)) {
13959+ smp_mb__before_clear_bit();
13960+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13961+ smp_mb__after_clear_bit();
13962+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13963+ }
13964+#endif
13965+
13966+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13967+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13968+ prev->context.user_cs_limit != next->context.user_cs_limit))
13969+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13970 #ifdef CONFIG_SMP
13971+ else if (unlikely(tlbstate != TLBSTATE_OK))
13972+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13973+#endif
13974+#endif
13975+
13976+ }
13977 else {
13978+
13979+#ifdef CONFIG_PAX_PER_CPU_PGD
13980+ pax_open_kernel();
13981+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13982+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13983+ pax_close_kernel();
13984+ load_cr3(get_cpu_pgd(cpu));
13985+#endif
13986+
13987+#ifdef CONFIG_SMP
13988 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13989 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
13990
13991@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13992 * tlb flush IPI delivery. We must reload CR3
13993 * to make sure to use no freed page tables.
13994 */
13995+
13996+#ifndef CONFIG_PAX_PER_CPU_PGD
13997 load_cr3(next->pgd);
13998+#endif
13999+
14000 load_LDT_nolock(&next->context);
14001+
14002+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14003+ if (!(__supported_pte_mask & _PAGE_NX))
14004+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14005+#endif
14006+
14007+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14008+#ifdef CONFIG_PAX_PAGEEXEC
14009+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14010+#endif
14011+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14012+#endif
14013+
14014 }
14015+#endif
14016 }
14017-#endif
14018 }
14019
14020 #define activate_mm(prev, next) \
14021diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14022index e3b7819..b257c64 100644
14023--- a/arch/x86/include/asm/module.h
14024+++ b/arch/x86/include/asm/module.h
14025@@ -5,6 +5,7 @@
14026
14027 #ifdef CONFIG_X86_64
14028 /* X86_64 does not define MODULE_PROC_FAMILY */
14029+#define MODULE_PROC_FAMILY ""
14030 #elif defined CONFIG_M486
14031 #define MODULE_PROC_FAMILY "486 "
14032 #elif defined CONFIG_M586
14033@@ -57,8 +58,20 @@
14034 #error unknown processor family
14035 #endif
14036
14037-#ifdef CONFIG_X86_32
14038-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14039+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14040+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14041+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14042+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14043+#else
14044+#define MODULE_PAX_KERNEXEC ""
14045 #endif
14046
14047+#ifdef CONFIG_PAX_MEMORY_UDEREF
14048+#define MODULE_PAX_UDEREF "UDEREF "
14049+#else
14050+#define MODULE_PAX_UDEREF ""
14051+#endif
14052+
14053+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14054+
14055 #endif /* _ASM_X86_MODULE_H */
14056diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14057index 86f9301..b365cda 100644
14058--- a/arch/x86/include/asm/nmi.h
14059+++ b/arch/x86/include/asm/nmi.h
14060@@ -40,11 +40,11 @@ struct nmiaction {
14061 nmi_handler_t handler;
14062 unsigned long flags;
14063 const char *name;
14064-};
14065+} __do_const;
14066
14067 #define register_nmi_handler(t, fn, fg, n, init...) \
14068 ({ \
14069- static struct nmiaction init fn##_na = { \
14070+ static const struct nmiaction init fn##_na = { \
14071 .handler = (fn), \
14072 .name = (n), \
14073 .flags = (fg), \
14074@@ -52,7 +52,7 @@ struct nmiaction {
14075 __register_nmi_handler((t), &fn##_na); \
14076 })
14077
14078-int __register_nmi_handler(unsigned int, struct nmiaction *);
14079+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14080
14081 void unregister_nmi_handler(unsigned int, const char *);
14082
14083diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
14084index c878924..21f4889 100644
14085--- a/arch/x86/include/asm/page.h
14086+++ b/arch/x86/include/asm/page.h
14087@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
14088 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
14089
14090 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
14091+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
14092
14093 #define __boot_va(x) __va(x)
14094 #define __boot_pa(x) __pa(x)
14095diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
14096index 0f1ddee..e2fc3d1 100644
14097--- a/arch/x86/include/asm/page_64.h
14098+++ b/arch/x86/include/asm/page_64.h
14099@@ -7,9 +7,9 @@
14100
14101 /* duplicated to the one in bootmem.h */
14102 extern unsigned long max_pfn;
14103-extern unsigned long phys_base;
14104+extern const unsigned long phys_base;
14105
14106-static inline unsigned long __phys_addr_nodebug(unsigned long x)
14107+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
14108 {
14109 unsigned long y = x - __START_KERNEL_map;
14110
14111diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14112index cfdc9ee..3f7b5d6 100644
14113--- a/arch/x86/include/asm/paravirt.h
14114+++ b/arch/x86/include/asm/paravirt.h
14115@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
14116 return (pmd_t) { ret };
14117 }
14118
14119-static inline pmdval_t pmd_val(pmd_t pmd)
14120+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14121 {
14122 pmdval_t ret;
14123
14124@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14125 val);
14126 }
14127
14128+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14129+{
14130+ pgdval_t val = native_pgd_val(pgd);
14131+
14132+ if (sizeof(pgdval_t) > sizeof(long))
14133+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14134+ val, (u64)val >> 32);
14135+ else
14136+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14137+ val);
14138+}
14139+
14140 static inline void pgd_clear(pgd_t *pgdp)
14141 {
14142 set_pgd(pgdp, __pgd(0));
14143@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14144 pv_mmu_ops.set_fixmap(idx, phys, flags);
14145 }
14146
14147+#ifdef CONFIG_PAX_KERNEXEC
14148+static inline unsigned long pax_open_kernel(void)
14149+{
14150+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14151+}
14152+
14153+static inline unsigned long pax_close_kernel(void)
14154+{
14155+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14156+}
14157+#else
14158+static inline unsigned long pax_open_kernel(void) { return 0; }
14159+static inline unsigned long pax_close_kernel(void) { return 0; }
14160+#endif
14161+
14162 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14163
14164 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14165@@ -926,7 +953,7 @@ extern void default_banner(void);
14166
14167 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14168 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14169-#define PARA_INDIRECT(addr) *%cs:addr
14170+#define PARA_INDIRECT(addr) *%ss:addr
14171 #endif
14172
14173 #define INTERRUPT_RETURN \
14174@@ -1001,6 +1028,21 @@ extern void default_banner(void);
14175 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14176 CLBR_NONE, \
14177 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14178+
14179+#define GET_CR0_INTO_RDI \
14180+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14181+ mov %rax,%rdi
14182+
14183+#define SET_RDI_INTO_CR0 \
14184+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14185+
14186+#define GET_CR3_INTO_RDI \
14187+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14188+ mov %rax,%rdi
14189+
14190+#define SET_RDI_INTO_CR3 \
14191+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14192+
14193 #endif /* CONFIG_X86_32 */
14194
14195 #endif /* __ASSEMBLY__ */
14196diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14197index 0db1fca..52310cc 100644
14198--- a/arch/x86/include/asm/paravirt_types.h
14199+++ b/arch/x86/include/asm/paravirt_types.h
14200@@ -84,7 +84,7 @@ struct pv_init_ops {
14201 */
14202 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14203 unsigned long addr, unsigned len);
14204-};
14205+} __no_const;
14206
14207
14208 struct pv_lazy_ops {
14209@@ -98,7 +98,7 @@ struct pv_time_ops {
14210 unsigned long long (*sched_clock)(void);
14211 unsigned long long (*steal_clock)(int cpu);
14212 unsigned long (*get_tsc_khz)(void);
14213-};
14214+} __no_const;
14215
14216 struct pv_cpu_ops {
14217 /* hooks for various privileged instructions */
14218@@ -192,7 +192,7 @@ struct pv_cpu_ops {
14219
14220 void (*start_context_switch)(struct task_struct *prev);
14221 void (*end_context_switch)(struct task_struct *next);
14222-};
14223+} __no_const;
14224
14225 struct pv_irq_ops {
14226 /*
14227@@ -223,7 +223,7 @@ struct pv_apic_ops {
14228 unsigned long start_eip,
14229 unsigned long start_esp);
14230 #endif
14231-};
14232+} __no_const;
14233
14234 struct pv_mmu_ops {
14235 unsigned long (*read_cr2)(void);
14236@@ -313,6 +313,7 @@ struct pv_mmu_ops {
14237 struct paravirt_callee_save make_pud;
14238
14239 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14240+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14241 #endif /* PAGETABLE_LEVELS == 4 */
14242 #endif /* PAGETABLE_LEVELS >= 3 */
14243
14244@@ -324,6 +325,12 @@ struct pv_mmu_ops {
14245 an mfn. We can tell which is which from the index. */
14246 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14247 phys_addr_t phys, pgprot_t flags);
14248+
14249+#ifdef CONFIG_PAX_KERNEXEC
14250+ unsigned long (*pax_open_kernel)(void);
14251+ unsigned long (*pax_close_kernel)(void);
14252+#endif
14253+
14254 };
14255
14256 struct arch_spinlock;
14257@@ -334,7 +341,7 @@ struct pv_lock_ops {
14258 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14259 int (*spin_trylock)(struct arch_spinlock *lock);
14260 void (*spin_unlock)(struct arch_spinlock *lock);
14261-};
14262+} __no_const;
14263
14264 /* This contains all the paravirt structures: we get a convenient
14265 * number for each function using the offset which we use to indicate
14266diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14267index b4389a4..7024269 100644
14268--- a/arch/x86/include/asm/pgalloc.h
14269+++ b/arch/x86/include/asm/pgalloc.h
14270@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14271 pmd_t *pmd, pte_t *pte)
14272 {
14273 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14274+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14275+}
14276+
14277+static inline void pmd_populate_user(struct mm_struct *mm,
14278+ pmd_t *pmd, pte_t *pte)
14279+{
14280+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14281 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14282 }
14283
14284@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14285
14286 #ifdef CONFIG_X86_PAE
14287 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14288+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14289+{
14290+ pud_populate(mm, pudp, pmd);
14291+}
14292 #else /* !CONFIG_X86_PAE */
14293 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14294 {
14295 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14296 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14297 }
14298+
14299+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14300+{
14301+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14302+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14303+}
14304 #endif /* CONFIG_X86_PAE */
14305
14306 #if PAGETABLE_LEVELS > 3
14307@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14308 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14309 }
14310
14311+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14312+{
14313+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14314+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14315+}
14316+
14317 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14318 {
14319 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14320diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14321index f2b489c..4f7e2e5 100644
14322--- a/arch/x86/include/asm/pgtable-2level.h
14323+++ b/arch/x86/include/asm/pgtable-2level.h
14324@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14325
14326 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14327 {
14328+ pax_open_kernel();
14329 *pmdp = pmd;
14330+ pax_close_kernel();
14331 }
14332
14333 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14334diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14335index 4cc9f2b..5fd9226 100644
14336--- a/arch/x86/include/asm/pgtable-3level.h
14337+++ b/arch/x86/include/asm/pgtable-3level.h
14338@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14339
14340 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14341 {
14342+ pax_open_kernel();
14343 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14344+ pax_close_kernel();
14345 }
14346
14347 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14348 {
14349+ pax_open_kernel();
14350 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14351+ pax_close_kernel();
14352 }
14353
14354 /*
14355diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14356index 1e67223..dd6e7ea 100644
14357--- a/arch/x86/include/asm/pgtable.h
14358+++ b/arch/x86/include/asm/pgtable.h
14359@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14360
14361 #ifndef __PAGETABLE_PUD_FOLDED
14362 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14363+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14364 #define pgd_clear(pgd) native_pgd_clear(pgd)
14365 #endif
14366
14367@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14368
14369 #define arch_end_context_switch(prev) do {} while(0)
14370
14371+#define pax_open_kernel() native_pax_open_kernel()
14372+#define pax_close_kernel() native_pax_close_kernel()
14373 #endif /* CONFIG_PARAVIRT */
14374
14375+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14376+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14377+
14378+#ifdef CONFIG_PAX_KERNEXEC
14379+static inline unsigned long native_pax_open_kernel(void)
14380+{
14381+ unsigned long cr0;
14382+
14383+ preempt_disable();
14384+ barrier();
14385+ cr0 = read_cr0() ^ X86_CR0_WP;
14386+ BUG_ON(cr0 & X86_CR0_WP);
14387+ write_cr0(cr0);
14388+ return cr0 ^ X86_CR0_WP;
14389+}
14390+
14391+static inline unsigned long native_pax_close_kernel(void)
14392+{
14393+ unsigned long cr0;
14394+
14395+ cr0 = read_cr0() ^ X86_CR0_WP;
14396+ BUG_ON(!(cr0 & X86_CR0_WP));
14397+ write_cr0(cr0);
14398+ barrier();
14399+ preempt_enable_no_resched();
14400+ return cr0 ^ X86_CR0_WP;
14401+}
14402+#else
14403+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14404+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14405+#endif
14406+
14407 /*
14408 * The following only work if pte_present() is true.
14409 * Undefined behaviour if not..
14410 */
14411+static inline int pte_user(pte_t pte)
14412+{
14413+ return pte_val(pte) & _PAGE_USER;
14414+}
14415+
14416 static inline int pte_dirty(pte_t pte)
14417 {
14418 return pte_flags(pte) & _PAGE_DIRTY;
14419@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
14420 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
14421 }
14422
14423+static inline unsigned long pgd_pfn(pgd_t pgd)
14424+{
14425+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
14426+}
14427+
14428 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
14429
14430 static inline int pmd_large(pmd_t pte)
14431@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14432 return pte_clear_flags(pte, _PAGE_RW);
14433 }
14434
14435+static inline pte_t pte_mkread(pte_t pte)
14436+{
14437+ return __pte(pte_val(pte) | _PAGE_USER);
14438+}
14439+
14440 static inline pte_t pte_mkexec(pte_t pte)
14441 {
14442- return pte_clear_flags(pte, _PAGE_NX);
14443+#ifdef CONFIG_X86_PAE
14444+ if (__supported_pte_mask & _PAGE_NX)
14445+ return pte_clear_flags(pte, _PAGE_NX);
14446+ else
14447+#endif
14448+ return pte_set_flags(pte, _PAGE_USER);
14449+}
14450+
14451+static inline pte_t pte_exprotect(pte_t pte)
14452+{
14453+#ifdef CONFIG_X86_PAE
14454+ if (__supported_pte_mask & _PAGE_NX)
14455+ return pte_set_flags(pte, _PAGE_NX);
14456+ else
14457+#endif
14458+ return pte_clear_flags(pte, _PAGE_USER);
14459 }
14460
14461 static inline pte_t pte_mkdirty(pte_t pte)
14462@@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14463 #endif
14464
14465 #ifndef __ASSEMBLY__
14466+
14467+#ifdef CONFIG_PAX_PER_CPU_PGD
14468+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14469+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14470+{
14471+ return cpu_pgd[cpu];
14472+}
14473+#endif
14474+
14475 #include <linux/mm_types.h>
14476 #include <linux/log2.h>
14477
14478@@ -529,7 +603,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
14479 * Currently stuck as a macro due to indirect forward reference to
14480 * linux/mmzone.h's __section_mem_map_addr() definition:
14481 */
14482-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
14483+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
14484
14485 /* Find an entry in the second-level page table.. */
14486 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
14487@@ -569,7 +643,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
14488 * Currently stuck as a macro due to indirect forward reference to
14489 * linux/mmzone.h's __section_mem_map_addr() definition:
14490 */
14491-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
14492+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
14493
14494 /* to find an entry in a page-table-directory. */
14495 static inline unsigned long pud_index(unsigned long address)
14496@@ -584,7 +658,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14497
14498 static inline int pgd_bad(pgd_t pgd)
14499 {
14500- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14501+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14502 }
14503
14504 static inline int pgd_none(pgd_t pgd)
14505@@ -607,7 +681,12 @@ static inline int pgd_none(pgd_t pgd)
14506 * pgd_offset() returns a (pgd_t *)
14507 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14508 */
14509-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14510+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14511+
14512+#ifdef CONFIG_PAX_PER_CPU_PGD
14513+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14514+#endif
14515+
14516 /*
14517 * a shortcut which implies the use of the kernel's pgd, instead
14518 * of a process's
14519@@ -618,6 +697,22 @@ static inline int pgd_none(pgd_t pgd)
14520 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14521 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14522
14523+#ifdef CONFIG_X86_32
14524+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14525+#else
14526+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14527+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14528+
14529+#ifdef CONFIG_PAX_MEMORY_UDEREF
14530+#ifdef __ASSEMBLY__
14531+#define pax_user_shadow_base pax_user_shadow_base(%rip)
14532+#else
14533+extern unsigned long pax_user_shadow_base;
14534+#endif
14535+#endif
14536+
14537+#endif
14538+
14539 #ifndef __ASSEMBLY__
14540
14541 extern int direct_gbpages;
14542@@ -784,11 +879,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14543 * dst and src can be on the same page, but the range must not overlap,
14544 * and must not cross a page boundary.
14545 */
14546-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14547+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14548 {
14549- memcpy(dst, src, count * sizeof(pgd_t));
14550+ pax_open_kernel();
14551+ while (count--)
14552+ *dst++ = *src++;
14553+ pax_close_kernel();
14554 }
14555
14556+#ifdef CONFIG_PAX_PER_CPU_PGD
14557+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14558+#endif
14559+
14560+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14561+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14562+#else
14563+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14564+#endif
14565+
14566 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
14567 static inline int page_level_shift(enum pg_level level)
14568 {
14569diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14570index 9ee3221..b979c6b 100644
14571--- a/arch/x86/include/asm/pgtable_32.h
14572+++ b/arch/x86/include/asm/pgtable_32.h
14573@@ -25,9 +25,6 @@
14574 struct mm_struct;
14575 struct vm_area_struct;
14576
14577-extern pgd_t swapper_pg_dir[1024];
14578-extern pgd_t initial_page_table[1024];
14579-
14580 static inline void pgtable_cache_init(void) { }
14581 static inline void check_pgt_cache(void) { }
14582 void paging_init(void);
14583@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14584 # include <asm/pgtable-2level.h>
14585 #endif
14586
14587+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14588+extern pgd_t initial_page_table[PTRS_PER_PGD];
14589+#ifdef CONFIG_X86_PAE
14590+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14591+#endif
14592+
14593 #if defined(CONFIG_HIGHPTE)
14594 #define pte_offset_map(dir, address) \
14595 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14596@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14597 /* Clear a kernel PTE and flush it from the TLB */
14598 #define kpte_clear_flush(ptep, vaddr) \
14599 do { \
14600+ pax_open_kernel(); \
14601 pte_clear(&init_mm, (vaddr), (ptep)); \
14602+ pax_close_kernel(); \
14603 __flush_tlb_one((vaddr)); \
14604 } while (0)
14605
14606 #endif /* !__ASSEMBLY__ */
14607
14608+#define HAVE_ARCH_UNMAPPED_AREA
14609+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14610+
14611 /*
14612 * kern_addr_valid() is (1) for FLATMEM and (0) for
14613 * SPARSEMEM and DISCONTIGMEM
14614diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14615index ed5903b..c7fe163 100644
14616--- a/arch/x86/include/asm/pgtable_32_types.h
14617+++ b/arch/x86/include/asm/pgtable_32_types.h
14618@@ -8,7 +8,7 @@
14619 */
14620 #ifdef CONFIG_X86_PAE
14621 # include <asm/pgtable-3level_types.h>
14622-# define PMD_SIZE (1UL << PMD_SHIFT)
14623+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14624 # define PMD_MASK (~(PMD_SIZE - 1))
14625 #else
14626 # include <asm/pgtable-2level_types.h>
14627@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14628 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14629 #endif
14630
14631+#ifdef CONFIG_PAX_KERNEXEC
14632+#ifndef __ASSEMBLY__
14633+extern unsigned char MODULES_EXEC_VADDR[];
14634+extern unsigned char MODULES_EXEC_END[];
14635+#endif
14636+#include <asm/boot.h>
14637+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14638+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14639+#else
14640+#define ktla_ktva(addr) (addr)
14641+#define ktva_ktla(addr) (addr)
14642+#endif
14643+
14644 #define MODULES_VADDR VMALLOC_START
14645 #define MODULES_END VMALLOC_END
14646 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14647diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14648index e22c1db..23a625a 100644
14649--- a/arch/x86/include/asm/pgtable_64.h
14650+++ b/arch/x86/include/asm/pgtable_64.h
14651@@ -16,10 +16,14 @@
14652
14653 extern pud_t level3_kernel_pgt[512];
14654 extern pud_t level3_ident_pgt[512];
14655+extern pud_t level3_vmalloc_start_pgt[512];
14656+extern pud_t level3_vmalloc_end_pgt[512];
14657+extern pud_t level3_vmemmap_pgt[512];
14658+extern pud_t level2_vmemmap_pgt[512];
14659 extern pmd_t level2_kernel_pgt[512];
14660 extern pmd_t level2_fixmap_pgt[512];
14661-extern pmd_t level2_ident_pgt[512];
14662-extern pgd_t init_level4_pgt[];
14663+extern pmd_t level2_ident_pgt[512*2];
14664+extern pgd_t init_level4_pgt[512];
14665
14666 #define swapper_pg_dir init_level4_pgt
14667
14668@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14669
14670 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14671 {
14672+ pax_open_kernel();
14673 *pmdp = pmd;
14674+ pax_close_kernel();
14675 }
14676
14677 static inline void native_pmd_clear(pmd_t *pmd)
14678@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14679
14680 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14681 {
14682+ pax_open_kernel();
14683 *pudp = pud;
14684+ pax_close_kernel();
14685 }
14686
14687 static inline void native_pud_clear(pud_t *pud)
14688@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14689
14690 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14691 {
14692+ pax_open_kernel();
14693+ *pgdp = pgd;
14694+ pax_close_kernel();
14695+}
14696+
14697+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14698+{
14699 *pgdp = pgd;
14700 }
14701
14702diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14703index 2d88344..4679fc3 100644
14704--- a/arch/x86/include/asm/pgtable_64_types.h
14705+++ b/arch/x86/include/asm/pgtable_64_types.h
14706@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
14707 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14708 #define MODULES_END _AC(0xffffffffff000000, UL)
14709 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14710+#define MODULES_EXEC_VADDR MODULES_VADDR
14711+#define MODULES_EXEC_END MODULES_END
14712+
14713+#define ktla_ktva(addr) (addr)
14714+#define ktva_ktla(addr) (addr)
14715
14716 #define EARLY_DYNAMIC_PAGE_TABLES 64
14717
14718diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14719index e642300..0ef8f31 100644
14720--- a/arch/x86/include/asm/pgtable_types.h
14721+++ b/arch/x86/include/asm/pgtable_types.h
14722@@ -16,13 +16,12 @@
14723 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14724 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14725 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14726-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14727+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14728 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14729 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14730 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14731-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14732-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14733-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14734+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14735+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14736 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14737
14738 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14739@@ -40,7 +39,6 @@
14740 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14741 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14742 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14743-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14744 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14745 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14746 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14747@@ -57,8 +55,10 @@
14748
14749 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14750 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14751-#else
14752+#elif defined(CONFIG_KMEMCHECK)
14753 #define _PAGE_NX (_AT(pteval_t, 0))
14754+#else
14755+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14756 #endif
14757
14758 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14759@@ -116,6 +116,9 @@
14760 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14761 _PAGE_ACCESSED)
14762
14763+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14764+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14765+
14766 #define __PAGE_KERNEL_EXEC \
14767 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14768 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14769@@ -126,7 +129,7 @@
14770 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14771 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14772 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14773-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14774+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14775 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14776 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14777 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14778@@ -188,8 +191,8 @@
14779 * bits are combined, this will alow user to access the high address mapped
14780 * VDSO in the presence of CONFIG_COMPAT_VDSO
14781 */
14782-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14783-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14784+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14785+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14786 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14787 #endif
14788
14789@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14790 {
14791 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14792 }
14793+#endif
14794
14795+#if PAGETABLE_LEVELS == 3
14796+#include <asm-generic/pgtable-nopud.h>
14797+#endif
14798+
14799+#if PAGETABLE_LEVELS == 2
14800+#include <asm-generic/pgtable-nopmd.h>
14801+#endif
14802+
14803+#ifndef __ASSEMBLY__
14804 #if PAGETABLE_LEVELS > 3
14805 typedef struct { pudval_t pud; } pud_t;
14806
14807@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14808 return pud.pud;
14809 }
14810 #else
14811-#include <asm-generic/pgtable-nopud.h>
14812-
14813 static inline pudval_t native_pud_val(pud_t pud)
14814 {
14815 return native_pgd_val(pud.pgd);
14816@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14817 return pmd.pmd;
14818 }
14819 #else
14820-#include <asm-generic/pgtable-nopmd.h>
14821-
14822 static inline pmdval_t native_pmd_val(pmd_t pmd)
14823 {
14824 return native_pgd_val(pmd.pud.pgd);
14825@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14826
14827 extern pteval_t __supported_pte_mask;
14828 extern void set_nx(void);
14829-extern int nx_enabled;
14830
14831 #define pgprot_writecombine pgprot_writecombine
14832 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14833diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14834index 22224b3..4080dab 100644
14835--- a/arch/x86/include/asm/processor.h
14836+++ b/arch/x86/include/asm/processor.h
14837@@ -282,7 +282,7 @@ struct tss_struct {
14838
14839 } ____cacheline_aligned;
14840
14841-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14842+extern struct tss_struct init_tss[NR_CPUS];
14843
14844 /*
14845 * Save the original ist values for checking stack pointers during debugging
14846@@ -823,11 +823,18 @@ static inline void spin_lock_prefetch(const void *x)
14847 */
14848 #define TASK_SIZE PAGE_OFFSET
14849 #define TASK_SIZE_MAX TASK_SIZE
14850+
14851+#ifdef CONFIG_PAX_SEGMEXEC
14852+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14853+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14854+#else
14855 #define STACK_TOP TASK_SIZE
14856-#define STACK_TOP_MAX STACK_TOP
14857+#endif
14858+
14859+#define STACK_TOP_MAX TASK_SIZE
14860
14861 #define INIT_THREAD { \
14862- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14863+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14864 .vm86_info = NULL, \
14865 .sysenter_cs = __KERNEL_CS, \
14866 .io_bitmap_ptr = NULL, \
14867@@ -841,7 +848,7 @@ static inline void spin_lock_prefetch(const void *x)
14868 */
14869 #define INIT_TSS { \
14870 .x86_tss = { \
14871- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14872+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14873 .ss0 = __KERNEL_DS, \
14874 .ss1 = __KERNEL_CS, \
14875 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14876@@ -852,11 +859,7 @@ static inline void spin_lock_prefetch(const void *x)
14877 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14878
14879 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14880-#define KSTK_TOP(info) \
14881-({ \
14882- unsigned long *__ptr = (unsigned long *)(info); \
14883- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14884-})
14885+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14886
14887 /*
14888 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14889@@ -871,7 +874,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14890 #define task_pt_regs(task) \
14891 ({ \
14892 struct pt_regs *__regs__; \
14893- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14894+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14895 __regs__ - 1; \
14896 })
14897
14898@@ -881,13 +884,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14899 /*
14900 * User space process size. 47bits minus one guard page.
14901 */
14902-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14903+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14904
14905 /* This decides where the kernel will search for a free chunk of vm
14906 * space during mmap's.
14907 */
14908 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14909- 0xc0000000 : 0xFFFFe000)
14910+ 0xc0000000 : 0xFFFFf000)
14911
14912 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14913 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14914@@ -898,11 +901,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14915 #define STACK_TOP_MAX TASK_SIZE_MAX
14916
14917 #define INIT_THREAD { \
14918- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14919+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14920 }
14921
14922 #define INIT_TSS { \
14923- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14924+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14925 }
14926
14927 /*
14928@@ -930,6 +933,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14929 */
14930 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14931
14932+#ifdef CONFIG_PAX_SEGMEXEC
14933+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14934+#endif
14935+
14936 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14937
14938 /* Get/set a process' ability to use the timestamp counter instruction */
14939@@ -970,7 +977,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
14940 return ratio;
14941 }
14942
14943-extern unsigned long arch_align_stack(unsigned long sp);
14944+#define arch_align_stack(x) ((x) & ~0xfUL)
14945 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14946
14947 void default_idle(void);
14948@@ -980,6 +987,6 @@ bool xen_set_default_idle(void);
14949 #define xen_set_default_idle 0
14950 #endif
14951
14952-void stop_this_cpu(void *dummy);
14953+void stop_this_cpu(void *dummy) __noreturn;
14954
14955 #endif /* _ASM_X86_PROCESSOR_H */
14956diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14957index 942a086..6c26446 100644
14958--- a/arch/x86/include/asm/ptrace.h
14959+++ b/arch/x86/include/asm/ptrace.h
14960@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14961 }
14962
14963 /*
14964- * user_mode_vm(regs) determines whether a register set came from user mode.
14965+ * user_mode(regs) determines whether a register set came from user mode.
14966 * This is true if V8086 mode was enabled OR if the register set was from
14967 * protected mode with RPL-3 CS value. This tricky test checks that with
14968 * one comparison. Many places in the kernel can bypass this full check
14969- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14970+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14971+ * be used.
14972 */
14973-static inline int user_mode(struct pt_regs *regs)
14974+static inline int user_mode_novm(struct pt_regs *regs)
14975 {
14976 #ifdef CONFIG_X86_32
14977 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14978 #else
14979- return !!(regs->cs & 3);
14980+ return !!(regs->cs & SEGMENT_RPL_MASK);
14981 #endif
14982 }
14983
14984-static inline int user_mode_vm(struct pt_regs *regs)
14985+static inline int user_mode(struct pt_regs *regs)
14986 {
14987 #ifdef CONFIG_X86_32
14988 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
14989 USER_RPL;
14990 #else
14991- return user_mode(regs);
14992+ return user_mode_novm(regs);
14993 #endif
14994 }
14995
14996@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
14997 #ifdef CONFIG_X86_64
14998 static inline bool user_64bit_mode(struct pt_regs *regs)
14999 {
15000+ unsigned long cs = regs->cs & 0xffff;
15001 #ifndef CONFIG_PARAVIRT
15002 /*
15003 * On non-paravirt systems, this is the only long mode CPL 3
15004 * selector. We do not allow long mode selectors in the LDT.
15005 */
15006- return regs->cs == __USER_CS;
15007+ return cs == __USER_CS;
15008 #else
15009 /* Headers are too twisted for this to go in paravirt.h. */
15010- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15011+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15012 #endif
15013 }
15014
15015@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15016 * Traps from the kernel do not save sp and ss.
15017 * Use the helper function to retrieve sp.
15018 */
15019- if (offset == offsetof(struct pt_regs, sp) &&
15020- regs->cs == __KERNEL_CS)
15021- return kernel_stack_pointer(regs);
15022+ if (offset == offsetof(struct pt_regs, sp)) {
15023+ unsigned long cs = regs->cs & 0xffff;
15024+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15025+ return kernel_stack_pointer(regs);
15026+ }
15027 #endif
15028 return *(unsigned long *)((unsigned long)regs + offset);
15029 }
15030diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15031index 9c6b890..5305f53 100644
15032--- a/arch/x86/include/asm/realmode.h
15033+++ b/arch/x86/include/asm/realmode.h
15034@@ -22,16 +22,14 @@ struct real_mode_header {
15035 #endif
15036 /* APM/BIOS reboot */
15037 u32 machine_real_restart_asm;
15038-#ifdef CONFIG_X86_64
15039 u32 machine_real_restart_seg;
15040-#endif
15041 };
15042
15043 /* This must match data at trampoline_32/64.S */
15044 struct trampoline_header {
15045 #ifdef CONFIG_X86_32
15046 u32 start;
15047- u16 gdt_pad;
15048+ u16 boot_cs;
15049 u16 gdt_limit;
15050 u32 gdt_base;
15051 #else
15052diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15053index a82c4f1..ac45053 100644
15054--- a/arch/x86/include/asm/reboot.h
15055+++ b/arch/x86/include/asm/reboot.h
15056@@ -6,13 +6,13 @@
15057 struct pt_regs;
15058
15059 struct machine_ops {
15060- void (*restart)(char *cmd);
15061- void (*halt)(void);
15062- void (*power_off)(void);
15063+ void (* __noreturn restart)(char *cmd);
15064+ void (* __noreturn halt)(void);
15065+ void (* __noreturn power_off)(void);
15066 void (*shutdown)(void);
15067 void (*crash_shutdown)(struct pt_regs *);
15068- void (*emergency_restart)(void);
15069-};
15070+ void (* __noreturn emergency_restart)(void);
15071+} __no_const;
15072
15073 extern struct machine_ops machine_ops;
15074
15075diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15076index cad82c9..2e5c5c1 100644
15077--- a/arch/x86/include/asm/rwsem.h
15078+++ b/arch/x86/include/asm/rwsem.h
15079@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15080 {
15081 asm volatile("# beginning down_read\n\t"
15082 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15083+
15084+#ifdef CONFIG_PAX_REFCOUNT
15085+ "jno 0f\n"
15086+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15087+ "int $4\n0:\n"
15088+ _ASM_EXTABLE(0b, 0b)
15089+#endif
15090+
15091 /* adds 0x00000001 */
15092 " jns 1f\n"
15093 " call call_rwsem_down_read_failed\n"
15094@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15095 "1:\n\t"
15096 " mov %1,%2\n\t"
15097 " add %3,%2\n\t"
15098+
15099+#ifdef CONFIG_PAX_REFCOUNT
15100+ "jno 0f\n"
15101+ "sub %3,%2\n"
15102+ "int $4\n0:\n"
15103+ _ASM_EXTABLE(0b, 0b)
15104+#endif
15105+
15106 " jle 2f\n\t"
15107 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15108 " jnz 1b\n\t"
15109@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15110 long tmp;
15111 asm volatile("# beginning down_write\n\t"
15112 LOCK_PREFIX " xadd %1,(%2)\n\t"
15113+
15114+#ifdef CONFIG_PAX_REFCOUNT
15115+ "jno 0f\n"
15116+ "mov %1,(%2)\n"
15117+ "int $4\n0:\n"
15118+ _ASM_EXTABLE(0b, 0b)
15119+#endif
15120+
15121 /* adds 0xffff0001, returns the old value */
15122 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
15123 /* was the active mask 0 before? */
15124@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15125 long tmp;
15126 asm volatile("# beginning __up_read\n\t"
15127 LOCK_PREFIX " xadd %1,(%2)\n\t"
15128+
15129+#ifdef CONFIG_PAX_REFCOUNT
15130+ "jno 0f\n"
15131+ "mov %1,(%2)\n"
15132+ "int $4\n0:\n"
15133+ _ASM_EXTABLE(0b, 0b)
15134+#endif
15135+
15136 /* subtracts 1, returns the old value */
15137 " jns 1f\n\t"
15138 " call call_rwsem_wake\n" /* expects old value in %edx */
15139@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15140 long tmp;
15141 asm volatile("# beginning __up_write\n\t"
15142 LOCK_PREFIX " xadd %1,(%2)\n\t"
15143+
15144+#ifdef CONFIG_PAX_REFCOUNT
15145+ "jno 0f\n"
15146+ "mov %1,(%2)\n"
15147+ "int $4\n0:\n"
15148+ _ASM_EXTABLE(0b, 0b)
15149+#endif
15150+
15151 /* subtracts 0xffff0001, returns the old value */
15152 " jns 1f\n\t"
15153 " call call_rwsem_wake\n" /* expects old value in %edx */
15154@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15155 {
15156 asm volatile("# beginning __downgrade_write\n\t"
15157 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15158+
15159+#ifdef CONFIG_PAX_REFCOUNT
15160+ "jno 0f\n"
15161+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15162+ "int $4\n0:\n"
15163+ _ASM_EXTABLE(0b, 0b)
15164+#endif
15165+
15166 /*
15167 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15168 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15169@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15170 */
15171 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15172 {
15173- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15174+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15175+
15176+#ifdef CONFIG_PAX_REFCOUNT
15177+ "jno 0f\n"
15178+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15179+ "int $4\n0:\n"
15180+ _ASM_EXTABLE(0b, 0b)
15181+#endif
15182+
15183 : "+m" (sem->count)
15184 : "er" (delta));
15185 }
15186@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15187 */
15188 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15189 {
15190- return delta + xadd(&sem->count, delta);
15191+ return delta + xadd_check_overflow(&sem->count, delta);
15192 }
15193
15194 #endif /* __KERNEL__ */
15195diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15196index c48a950..c6d7468 100644
15197--- a/arch/x86/include/asm/segment.h
15198+++ b/arch/x86/include/asm/segment.h
15199@@ -64,10 +64,15 @@
15200 * 26 - ESPFIX small SS
15201 * 27 - per-cpu [ offset to per-cpu data area ]
15202 * 28 - stack_canary-20 [ for stack protector ]
15203- * 29 - unused
15204- * 30 - unused
15205+ * 29 - PCI BIOS CS
15206+ * 30 - PCI BIOS DS
15207 * 31 - TSS for double fault handler
15208 */
15209+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15210+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15211+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15212+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15213+
15214 #define GDT_ENTRY_TLS_MIN 6
15215 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15216
15217@@ -79,6 +84,8 @@
15218
15219 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15220
15221+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15222+
15223 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15224
15225 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15226@@ -104,6 +111,12 @@
15227 #define __KERNEL_STACK_CANARY 0
15228 #endif
15229
15230+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15231+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15232+
15233+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15234+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15235+
15236 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15237
15238 /*
15239@@ -141,7 +154,7 @@
15240 */
15241
15242 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15243-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15244+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15245
15246
15247 #else
15248@@ -165,6 +178,8 @@
15249 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15250 #define __USER32_DS __USER_DS
15251
15252+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15253+
15254 #define GDT_ENTRY_TSS 8 /* needs two entries */
15255 #define GDT_ENTRY_LDT 10 /* needs two entries */
15256 #define GDT_ENTRY_TLS_MIN 12
15257@@ -185,6 +200,7 @@
15258 #endif
15259
15260 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15261+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15262 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15263 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15264 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15265@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15266 {
15267 unsigned long __limit;
15268 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15269- return __limit + 1;
15270+ return __limit;
15271 }
15272
15273 #endif /* !__ASSEMBLY__ */
15274diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15275index b073aae..39f9bdd 100644
15276--- a/arch/x86/include/asm/smp.h
15277+++ b/arch/x86/include/asm/smp.h
15278@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15279 /* cpus sharing the last level cache: */
15280 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15281 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15282-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15283+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15284
15285 static inline struct cpumask *cpu_sibling_mask(int cpu)
15286 {
15287@@ -79,7 +79,7 @@ struct smp_ops {
15288
15289 void (*send_call_func_ipi)(const struct cpumask *mask);
15290 void (*send_call_func_single_ipi)(int cpu);
15291-};
15292+} __no_const;
15293
15294 /* Globals due to paravirt */
15295 extern void set_cpu_sibling_map(int cpu);
15296@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15297 extern int safe_smp_processor_id(void);
15298
15299 #elif defined(CONFIG_X86_64_SMP)
15300-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15301-
15302-#define stack_smp_processor_id() \
15303-({ \
15304- struct thread_info *ti; \
15305- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15306- ti->cpu; \
15307-})
15308+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15309+#define stack_smp_processor_id() raw_smp_processor_id()
15310 #define safe_smp_processor_id() smp_processor_id()
15311
15312 #endif
15313diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15314index 33692ea..350a534 100644
15315--- a/arch/x86/include/asm/spinlock.h
15316+++ b/arch/x86/include/asm/spinlock.h
15317@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15318 static inline void arch_read_lock(arch_rwlock_t *rw)
15319 {
15320 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15321+
15322+#ifdef CONFIG_PAX_REFCOUNT
15323+ "jno 0f\n"
15324+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15325+ "int $4\n0:\n"
15326+ _ASM_EXTABLE(0b, 0b)
15327+#endif
15328+
15329 "jns 1f\n"
15330 "call __read_lock_failed\n\t"
15331 "1:\n"
15332@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15333 static inline void arch_write_lock(arch_rwlock_t *rw)
15334 {
15335 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15336+
15337+#ifdef CONFIG_PAX_REFCOUNT
15338+ "jno 0f\n"
15339+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15340+ "int $4\n0:\n"
15341+ _ASM_EXTABLE(0b, 0b)
15342+#endif
15343+
15344 "jz 1f\n"
15345 "call __write_lock_failed\n\t"
15346 "1:\n"
15347@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15348
15349 static inline void arch_read_unlock(arch_rwlock_t *rw)
15350 {
15351- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15352+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15353+
15354+#ifdef CONFIG_PAX_REFCOUNT
15355+ "jno 0f\n"
15356+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15357+ "int $4\n0:\n"
15358+ _ASM_EXTABLE(0b, 0b)
15359+#endif
15360+
15361 :"+m" (rw->lock) : : "memory");
15362 }
15363
15364 static inline void arch_write_unlock(arch_rwlock_t *rw)
15365 {
15366- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15367+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15368+
15369+#ifdef CONFIG_PAX_REFCOUNT
15370+ "jno 0f\n"
15371+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15372+ "int $4\n0:\n"
15373+ _ASM_EXTABLE(0b, 0b)
15374+#endif
15375+
15376 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15377 }
15378
15379diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15380index 6a99859..03cb807 100644
15381--- a/arch/x86/include/asm/stackprotector.h
15382+++ b/arch/x86/include/asm/stackprotector.h
15383@@ -47,7 +47,7 @@
15384 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15385 */
15386 #define GDT_STACK_CANARY_INIT \
15387- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15388+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15389
15390 /*
15391 * Initialize the stackprotector canary value.
15392@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15393
15394 static inline void load_stack_canary_segment(void)
15395 {
15396-#ifdef CONFIG_X86_32
15397+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15398 asm volatile ("mov %0, %%gs" : : "r" (0));
15399 #endif
15400 }
15401diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15402index 70bbe39..4ae2bd4 100644
15403--- a/arch/x86/include/asm/stacktrace.h
15404+++ b/arch/x86/include/asm/stacktrace.h
15405@@ -11,28 +11,20 @@
15406
15407 extern int kstack_depth_to_print;
15408
15409-struct thread_info;
15410+struct task_struct;
15411 struct stacktrace_ops;
15412
15413-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15414- unsigned long *stack,
15415- unsigned long bp,
15416- const struct stacktrace_ops *ops,
15417- void *data,
15418- unsigned long *end,
15419- int *graph);
15420+typedef unsigned long walk_stack_t(struct task_struct *task,
15421+ void *stack_start,
15422+ unsigned long *stack,
15423+ unsigned long bp,
15424+ const struct stacktrace_ops *ops,
15425+ void *data,
15426+ unsigned long *end,
15427+ int *graph);
15428
15429-extern unsigned long
15430-print_context_stack(struct thread_info *tinfo,
15431- unsigned long *stack, unsigned long bp,
15432- const struct stacktrace_ops *ops, void *data,
15433- unsigned long *end, int *graph);
15434-
15435-extern unsigned long
15436-print_context_stack_bp(struct thread_info *tinfo,
15437- unsigned long *stack, unsigned long bp,
15438- const struct stacktrace_ops *ops, void *data,
15439- unsigned long *end, int *graph);
15440+extern walk_stack_t print_context_stack;
15441+extern walk_stack_t print_context_stack_bp;
15442
15443 /* Generic stack tracer with callbacks */
15444
15445@@ -40,7 +32,7 @@ struct stacktrace_ops {
15446 void (*address)(void *data, unsigned long address, int reliable);
15447 /* On negative return stop dumping */
15448 int (*stack)(void *data, char *name);
15449- walk_stack_t walk_stack;
15450+ walk_stack_t *walk_stack;
15451 };
15452
15453 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15454diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15455index 4ec45b3..a4f0a8a 100644
15456--- a/arch/x86/include/asm/switch_to.h
15457+++ b/arch/x86/include/asm/switch_to.h
15458@@ -108,7 +108,7 @@ do { \
15459 "call __switch_to\n\t" \
15460 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15461 __switch_canary \
15462- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15463+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15464 "movq %%rax,%%rdi\n\t" \
15465 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15466 "jnz ret_from_fork\n\t" \
15467@@ -119,7 +119,7 @@ do { \
15468 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15469 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15470 [_tif_fork] "i" (_TIF_FORK), \
15471- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15472+ [thread_info] "m" (current_tinfo), \
15473 [current_task] "m" (current_task) \
15474 __switch_canary_iparam \
15475 : "memory", "cc" __EXTRA_CLOBBER)
15476diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15477index a1df6e8..e002940 100644
15478--- a/arch/x86/include/asm/thread_info.h
15479+++ b/arch/x86/include/asm/thread_info.h
15480@@ -10,6 +10,7 @@
15481 #include <linux/compiler.h>
15482 #include <asm/page.h>
15483 #include <asm/types.h>
15484+#include <asm/percpu.h>
15485
15486 /*
15487 * low level task data that entry.S needs immediate access to
15488@@ -23,7 +24,6 @@ struct exec_domain;
15489 #include <linux/atomic.h>
15490
15491 struct thread_info {
15492- struct task_struct *task; /* main task structure */
15493 struct exec_domain *exec_domain; /* execution domain */
15494 __u32 flags; /* low level flags */
15495 __u32 status; /* thread synchronous flags */
15496@@ -33,19 +33,13 @@ struct thread_info {
15497 mm_segment_t addr_limit;
15498 struct restart_block restart_block;
15499 void __user *sysenter_return;
15500-#ifdef CONFIG_X86_32
15501- unsigned long previous_esp; /* ESP of the previous stack in
15502- case of nested (IRQ) stacks
15503- */
15504- __u8 supervisor_stack[0];
15505-#endif
15506+ unsigned long lowest_stack;
15507 unsigned int sig_on_uaccess_error:1;
15508 unsigned int uaccess_err:1; /* uaccess failed */
15509 };
15510
15511-#define INIT_THREAD_INFO(tsk) \
15512+#define INIT_THREAD_INFO \
15513 { \
15514- .task = &tsk, \
15515 .exec_domain = &default_exec_domain, \
15516 .flags = 0, \
15517 .cpu = 0, \
15518@@ -56,7 +50,7 @@ struct thread_info {
15519 }, \
15520 }
15521
15522-#define init_thread_info (init_thread_union.thread_info)
15523+#define init_thread_info (init_thread_union.stack)
15524 #define init_stack (init_thread_union.stack)
15525
15526 #else /* !__ASSEMBLY__ */
15527@@ -97,6 +91,7 @@ struct thread_info {
15528 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15529 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15530 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15531+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15532
15533 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15534 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15535@@ -121,17 +116,18 @@ struct thread_info {
15536 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15537 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15538 #define _TIF_X32 (1 << TIF_X32)
15539+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15540
15541 /* work to do in syscall_trace_enter() */
15542 #define _TIF_WORK_SYSCALL_ENTRY \
15543 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15544 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15545- _TIF_NOHZ)
15546+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15547
15548 /* work to do in syscall_trace_leave() */
15549 #define _TIF_WORK_SYSCALL_EXIT \
15550 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15551- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15552+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15553
15554 /* work to do on interrupt/exception return */
15555 #define _TIF_WORK_MASK \
15556@@ -142,7 +138,7 @@ struct thread_info {
15557 /* work to do on any return to user space */
15558 #define _TIF_ALLWORK_MASK \
15559 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15560- _TIF_NOHZ)
15561+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15562
15563 /* Only used for 64 bit */
15564 #define _TIF_DO_NOTIFY_MASK \
15565@@ -158,45 +154,40 @@ struct thread_info {
15566
15567 #define PREEMPT_ACTIVE 0x10000000
15568
15569-#ifdef CONFIG_X86_32
15570-
15571-#define STACK_WARN (THREAD_SIZE/8)
15572-/*
15573- * macros/functions for gaining access to the thread information structure
15574- *
15575- * preempt_count needs to be 1 initially, until the scheduler is functional.
15576- */
15577-#ifndef __ASSEMBLY__
15578-
15579-
15580-/* how to get the current stack pointer from C */
15581-register unsigned long current_stack_pointer asm("esp") __used;
15582-
15583-/* how to get the thread information struct from C */
15584-static inline struct thread_info *current_thread_info(void)
15585-{
15586- return (struct thread_info *)
15587- (current_stack_pointer & ~(THREAD_SIZE - 1));
15588-}
15589-
15590-#else /* !__ASSEMBLY__ */
15591-
15592+#ifdef __ASSEMBLY__
15593 /* how to get the thread information struct from ASM */
15594 #define GET_THREAD_INFO(reg) \
15595- movl $-THREAD_SIZE, reg; \
15596- andl %esp, reg
15597+ mov PER_CPU_VAR(current_tinfo), reg
15598
15599 /* use this one if reg already contains %esp */
15600-#define GET_THREAD_INFO_WITH_ESP(reg) \
15601- andl $-THREAD_SIZE, reg
15602+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15603+#else
15604+/* how to get the thread information struct from C */
15605+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15606+
15607+static __always_inline struct thread_info *current_thread_info(void)
15608+{
15609+ return this_cpu_read_stable(current_tinfo);
15610+}
15611+#endif
15612+
15613+#ifdef CONFIG_X86_32
15614+
15615+#define STACK_WARN (THREAD_SIZE/8)
15616+/*
15617+ * macros/functions for gaining access to the thread information structure
15618+ *
15619+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15620+ */
15621+#ifndef __ASSEMBLY__
15622+
15623+/* how to get the current stack pointer from C */
15624+register unsigned long current_stack_pointer asm("esp") __used;
15625
15626 #endif
15627
15628 #else /* X86_32 */
15629
15630-#include <asm/percpu.h>
15631-#define KERNEL_STACK_OFFSET (5*8)
15632-
15633 /*
15634 * macros/functions for gaining access to the thread information structure
15635 * preempt_count needs to be 1 initially, until the scheduler is functional.
15636@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
15637 #ifndef __ASSEMBLY__
15638 DECLARE_PER_CPU(unsigned long, kernel_stack);
15639
15640-static inline struct thread_info *current_thread_info(void)
15641-{
15642- struct thread_info *ti;
15643- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15644- KERNEL_STACK_OFFSET - THREAD_SIZE);
15645- return ti;
15646-}
15647-
15648-#else /* !__ASSEMBLY__ */
15649-
15650-/* how to get the thread information struct from ASM */
15651-#define GET_THREAD_INFO(reg) \
15652- movq PER_CPU_VAR(kernel_stack),reg ; \
15653- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15654-
15655-/*
15656- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15657- * a certain register (to be used in assembler memory operands).
15658- */
15659-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15660-
15661+/* how to get the current stack pointer from C */
15662+register unsigned long current_stack_pointer asm("rsp") __used;
15663 #endif
15664
15665 #endif /* !X86_32 */
15666@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
15667 extern void arch_task_cache_init(void);
15668 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15669 extern void arch_release_task_struct(struct task_struct *tsk);
15670+
15671+#define __HAVE_THREAD_FUNCTIONS
15672+#define task_thread_info(task) (&(task)->tinfo)
15673+#define task_stack_page(task) ((task)->stack)
15674+#define setup_thread_stack(p, org) do {} while (0)
15675+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15676+
15677 #endif
15678 #endif /* _ASM_X86_THREAD_INFO_H */
15679diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15680index 5ee2687..70d5895 100644
15681--- a/arch/x86/include/asm/uaccess.h
15682+++ b/arch/x86/include/asm/uaccess.h
15683@@ -7,6 +7,7 @@
15684 #include <linux/compiler.h>
15685 #include <linux/thread_info.h>
15686 #include <linux/string.h>
15687+#include <linux/sched.h>
15688 #include <asm/asm.h>
15689 #include <asm/page.h>
15690 #include <asm/smap.h>
15691@@ -29,7 +30,12 @@
15692
15693 #define get_ds() (KERNEL_DS)
15694 #define get_fs() (current_thread_info()->addr_limit)
15695+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15696+void __set_fs(mm_segment_t x);
15697+void set_fs(mm_segment_t x);
15698+#else
15699 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15700+#endif
15701
15702 #define segment_eq(a, b) ((a).seg == (b).seg)
15703
15704@@ -77,8 +83,33 @@
15705 * checks that the pointer is in the user space range - after calling
15706 * this function, memory access functions may still return -EFAULT.
15707 */
15708-#define access_ok(type, addr, size) \
15709- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15710+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15711+#define access_ok(type, addr, size) \
15712+({ \
15713+ long __size = size; \
15714+ unsigned long __addr = (unsigned long)addr; \
15715+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15716+ unsigned long __end_ao = __addr + __size - 1; \
15717+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15718+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15719+ while(__addr_ao <= __end_ao) { \
15720+ char __c_ao; \
15721+ __addr_ao += PAGE_SIZE; \
15722+ if (__size > PAGE_SIZE) \
15723+ cond_resched(); \
15724+ if (__get_user(__c_ao, (char __user *)__addr)) \
15725+ break; \
15726+ if (type != VERIFY_WRITE) { \
15727+ __addr = __addr_ao; \
15728+ continue; \
15729+ } \
15730+ if (__put_user(__c_ao, (char __user *)__addr)) \
15731+ break; \
15732+ __addr = __addr_ao; \
15733+ } \
15734+ } \
15735+ __ret_ao; \
15736+})
15737
15738 /*
15739 * The exception table consists of pairs of addresses relative to the
15740@@ -176,13 +207,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15741 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15742 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15743
15744-
15745+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15746+#define __copyuser_seg "gs;"
15747+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15748+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15749+#else
15750+#define __copyuser_seg
15751+#define __COPYUSER_SET_ES
15752+#define __COPYUSER_RESTORE_ES
15753+#endif
15754
15755 #ifdef CONFIG_X86_32
15756 #define __put_user_asm_u64(x, addr, err, errret) \
15757 asm volatile(ASM_STAC "\n" \
15758- "1: movl %%eax,0(%2)\n" \
15759- "2: movl %%edx,4(%2)\n" \
15760+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15761+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15762 "3: " ASM_CLAC "\n" \
15763 ".section .fixup,\"ax\"\n" \
15764 "4: movl %3,%0\n" \
15765@@ -195,8 +234,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15766
15767 #define __put_user_asm_ex_u64(x, addr) \
15768 asm volatile(ASM_STAC "\n" \
15769- "1: movl %%eax,0(%1)\n" \
15770- "2: movl %%edx,4(%1)\n" \
15771+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15772+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15773 "3: " ASM_CLAC "\n" \
15774 _ASM_EXTABLE_EX(1b, 2b) \
15775 _ASM_EXTABLE_EX(2b, 3b) \
15776@@ -246,7 +285,7 @@ extern void __put_user_8(void);
15777 __typeof__(*(ptr)) __pu_val; \
15778 __chk_user_ptr(ptr); \
15779 might_fault(); \
15780- __pu_val = x; \
15781+ __pu_val = (x); \
15782 switch (sizeof(*(ptr))) { \
15783 case 1: \
15784 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15785@@ -345,7 +384,7 @@ do { \
15786
15787 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15788 asm volatile(ASM_STAC "\n" \
15789- "1: mov"itype" %2,%"rtype"1\n" \
15790+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15791 "2: " ASM_CLAC "\n" \
15792 ".section .fixup,\"ax\"\n" \
15793 "3: mov %3,%0\n" \
15794@@ -353,7 +392,7 @@ do { \
15795 " jmp 2b\n" \
15796 ".previous\n" \
15797 _ASM_EXTABLE(1b, 3b) \
15798- : "=r" (err), ltype(x) \
15799+ : "=r" (err), ltype (x) \
15800 : "m" (__m(addr)), "i" (errret), "0" (err))
15801
15802 #define __get_user_size_ex(x, ptr, size) \
15803@@ -378,7 +417,7 @@ do { \
15804 } while (0)
15805
15806 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15807- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15808+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15809 "2:\n" \
15810 _ASM_EXTABLE_EX(1b, 2b) \
15811 : ltype(x) : "m" (__m(addr)))
15812@@ -395,13 +434,24 @@ do { \
15813 int __gu_err; \
15814 unsigned long __gu_val; \
15815 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15816- (x) = (__force __typeof__(*(ptr)))__gu_val; \
15817+ (x) = (__typeof__(*(ptr)))__gu_val; \
15818 __gu_err; \
15819 })
15820
15821 /* FIXME: this hack is definitely wrong -AK */
15822 struct __large_struct { unsigned long buf[100]; };
15823-#define __m(x) (*(struct __large_struct __user *)(x))
15824+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15825+#define ____m(x) \
15826+({ \
15827+ unsigned long ____x = (unsigned long)(x); \
15828+ if (____x < pax_user_shadow_base) \
15829+ ____x += pax_user_shadow_base; \
15830+ (typeof(x))____x; \
15831+})
15832+#else
15833+#define ____m(x) (x)
15834+#endif
15835+#define __m(x) (*(struct __large_struct __user *)____m(x))
15836
15837 /*
15838 * Tell gcc we read from memory instead of writing: this is because
15839@@ -410,7 +460,7 @@ struct __large_struct { unsigned long buf[100]; };
15840 */
15841 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15842 asm volatile(ASM_STAC "\n" \
15843- "1: mov"itype" %"rtype"1,%2\n" \
15844+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15845 "2: " ASM_CLAC "\n" \
15846 ".section .fixup,\"ax\"\n" \
15847 "3: mov %3,%0\n" \
15848@@ -418,10 +468,10 @@ struct __large_struct { unsigned long buf[100]; };
15849 ".previous\n" \
15850 _ASM_EXTABLE(1b, 3b) \
15851 : "=r"(err) \
15852- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15853+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15854
15855 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15856- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15857+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15858 "2:\n" \
15859 _ASM_EXTABLE_EX(1b, 2b) \
15860 : : ltype(x), "m" (__m(addr)))
15861@@ -460,8 +510,12 @@ struct __large_struct { unsigned long buf[100]; };
15862 * On error, the variable @x is set to zero.
15863 */
15864
15865+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15866+#define __get_user(x, ptr) get_user((x), (ptr))
15867+#else
15868 #define __get_user(x, ptr) \
15869 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15870+#endif
15871
15872 /**
15873 * __put_user: - Write a simple value into user space, with less checking.
15874@@ -483,8 +537,12 @@ struct __large_struct { unsigned long buf[100]; };
15875 * Returns zero on success, or -EFAULT on error.
15876 */
15877
15878+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15879+#define __put_user(x, ptr) put_user((x), (ptr))
15880+#else
15881 #define __put_user(x, ptr) \
15882 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15883+#endif
15884
15885 #define __get_user_unaligned __get_user
15886 #define __put_user_unaligned __put_user
15887@@ -502,7 +560,7 @@ struct __large_struct { unsigned long buf[100]; };
15888 #define get_user_ex(x, ptr) do { \
15889 unsigned long __gue_val; \
15890 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15891- (x) = (__force __typeof__(*(ptr)))__gue_val; \
15892+ (x) = (__typeof__(*(ptr)))__gue_val; \
15893 } while (0)
15894
15895 #define put_user_try uaccess_try
15896@@ -519,8 +577,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15897 extern __must_check long strlen_user(const char __user *str);
15898 extern __must_check long strnlen_user(const char __user *str, long n);
15899
15900-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15901-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15902+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15903+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15904
15905 /*
15906 * movsl can be slow when source and dest are not both 8-byte aligned
15907diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15908index 7f760a9..04b1c65 100644
15909--- a/arch/x86/include/asm/uaccess_32.h
15910+++ b/arch/x86/include/asm/uaccess_32.h
15911@@ -11,15 +11,15 @@
15912 #include <asm/page.h>
15913
15914 unsigned long __must_check __copy_to_user_ll
15915- (void __user *to, const void *from, unsigned long n);
15916+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15917 unsigned long __must_check __copy_from_user_ll
15918- (void *to, const void __user *from, unsigned long n);
15919+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15920 unsigned long __must_check __copy_from_user_ll_nozero
15921- (void *to, const void __user *from, unsigned long n);
15922+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15923 unsigned long __must_check __copy_from_user_ll_nocache
15924- (void *to, const void __user *from, unsigned long n);
15925+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15926 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15927- (void *to, const void __user *from, unsigned long n);
15928+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15929
15930 /**
15931 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15932@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15933 static __always_inline unsigned long __must_check
15934 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15935 {
15936+ if ((long)n < 0)
15937+ return n;
15938+
15939+ check_object_size(from, n, true);
15940+
15941 if (__builtin_constant_p(n)) {
15942 unsigned long ret;
15943
15944@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15945 __copy_to_user(void __user *to, const void *from, unsigned long n)
15946 {
15947 might_fault();
15948+
15949 return __copy_to_user_inatomic(to, from, n);
15950 }
15951
15952 static __always_inline unsigned long
15953 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15954 {
15955+ if ((long)n < 0)
15956+ return n;
15957+
15958 /* Avoid zeroing the tail if the copy fails..
15959 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15960 * but as the zeroing behaviour is only significant when n is not
15961@@ -137,6 +146,12 @@ static __always_inline unsigned long
15962 __copy_from_user(void *to, const void __user *from, unsigned long n)
15963 {
15964 might_fault();
15965+
15966+ if ((long)n < 0)
15967+ return n;
15968+
15969+ check_object_size(to, n, false);
15970+
15971 if (__builtin_constant_p(n)) {
15972 unsigned long ret;
15973
15974@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15975 const void __user *from, unsigned long n)
15976 {
15977 might_fault();
15978+
15979+ if ((long)n < 0)
15980+ return n;
15981+
15982 if (__builtin_constant_p(n)) {
15983 unsigned long ret;
15984
15985@@ -181,15 +200,19 @@ static __always_inline unsigned long
15986 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
15987 unsigned long n)
15988 {
15989- return __copy_from_user_ll_nocache_nozero(to, from, n);
15990+ if ((long)n < 0)
15991+ return n;
15992+
15993+ return __copy_from_user_ll_nocache_nozero(to, from, n);
15994 }
15995
15996-unsigned long __must_check copy_to_user(void __user *to,
15997- const void *from, unsigned long n);
15998-unsigned long __must_check _copy_from_user(void *to,
15999- const void __user *from,
16000- unsigned long n);
16001-
16002+extern void copy_to_user_overflow(void)
16003+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16004+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16005+#else
16006+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16007+#endif
16008+;
16009
16010 extern void copy_from_user_overflow(void)
16011 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16012@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16013 #endif
16014 ;
16015
16016-static inline unsigned long __must_check copy_from_user(void *to,
16017- const void __user *from,
16018- unsigned long n)
16019+/**
16020+ * copy_to_user: - Copy a block of data into user space.
16021+ * @to: Destination address, in user space.
16022+ * @from: Source address, in kernel space.
16023+ * @n: Number of bytes to copy.
16024+ *
16025+ * Context: User context only. This function may sleep.
16026+ *
16027+ * Copy data from kernel space to user space.
16028+ *
16029+ * Returns number of bytes that could not be copied.
16030+ * On success, this will be zero.
16031+ */
16032+static inline unsigned long __must_check
16033+copy_to_user(void __user *to, const void *from, unsigned long n)
16034 {
16035- int sz = __compiletime_object_size(to);
16036+ size_t sz = __compiletime_object_size(from);
16037
16038- if (likely(sz == -1 || sz >= n))
16039- n = _copy_from_user(to, from, n);
16040- else
16041+ if (unlikely(sz != (size_t)-1 && sz < n))
16042+ copy_to_user_overflow();
16043+ else if (access_ok(VERIFY_WRITE, to, n))
16044+ n = __copy_to_user(to, from, n);
16045+ return n;
16046+}
16047+
16048+/**
16049+ * copy_from_user: - Copy a block of data from user space.
16050+ * @to: Destination address, in kernel space.
16051+ * @from: Source address, in user space.
16052+ * @n: Number of bytes to copy.
16053+ *
16054+ * Context: User context only. This function may sleep.
16055+ *
16056+ * Copy data from user space to kernel space.
16057+ *
16058+ * Returns number of bytes that could not be copied.
16059+ * On success, this will be zero.
16060+ *
16061+ * If some data could not be copied, this function will pad the copied
16062+ * data to the requested size using zero bytes.
16063+ */
16064+static inline unsigned long __must_check
16065+copy_from_user(void *to, const void __user *from, unsigned long n)
16066+{
16067+ size_t sz = __compiletime_object_size(to);
16068+
16069+ check_object_size(to, n, false);
16070+
16071+ if (unlikely(sz != (size_t)-1 && sz < n))
16072 copy_from_user_overflow();
16073-
16074+ else if (access_ok(VERIFY_READ, from, n))
16075+ n = __copy_from_user(to, from, n);
16076+ else if ((long)n > 0)
16077+ memset(to, 0, n);
16078 return n;
16079 }
16080
16081diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16082index 142810c..1f2a0a7 100644
16083--- a/arch/x86/include/asm/uaccess_64.h
16084+++ b/arch/x86/include/asm/uaccess_64.h
16085@@ -10,6 +10,9 @@
16086 #include <asm/alternative.h>
16087 #include <asm/cpufeature.h>
16088 #include <asm/page.h>
16089+#include <asm/pgtable.h>
16090+
16091+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16092
16093 /*
16094 * Copy To/From Userspace
16095@@ -17,13 +20,13 @@
16096
16097 /* Handles exceptions in both to and from, but doesn't do access_ok */
16098 __must_check unsigned long
16099-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16100+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16101 __must_check unsigned long
16102-copy_user_generic_string(void *to, const void *from, unsigned len);
16103+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16104 __must_check unsigned long
16105-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16106+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16107
16108-static __always_inline __must_check unsigned long
16109+static __always_inline __must_check __size_overflow(3) unsigned long
16110 copy_user_generic(void *to, const void *from, unsigned len)
16111 {
16112 unsigned ret;
16113@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16114 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16115 "=d" (len)),
16116 "1" (to), "2" (from), "3" (len)
16117- : "memory", "rcx", "r8", "r9", "r10", "r11");
16118+ : "memory", "rcx", "r8", "r9", "r11");
16119 return ret;
16120 }
16121
16122+static __always_inline __must_check unsigned long
16123+__copy_to_user(void __user *to, const void *from, unsigned long len);
16124+static __always_inline __must_check unsigned long
16125+__copy_from_user(void *to, const void __user *from, unsigned long len);
16126 __must_check unsigned long
16127-_copy_to_user(void __user *to, const void *from, unsigned len);
16128-__must_check unsigned long
16129-_copy_from_user(void *to, const void __user *from, unsigned len);
16130-__must_check unsigned long
16131-copy_in_user(void __user *to, const void __user *from, unsigned len);
16132+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16133+
16134+extern void copy_to_user_overflow(void)
16135+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16136+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16137+#else
16138+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16139+#endif
16140+;
16141+
16142+extern void copy_from_user_overflow(void)
16143+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16144+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16145+#else
16146+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16147+#endif
16148+;
16149
16150 static inline unsigned long __must_check copy_from_user(void *to,
16151 const void __user *from,
16152 unsigned long n)
16153 {
16154- int sz = __compiletime_object_size(to);
16155-
16156 might_fault();
16157- if (likely(sz == -1 || sz >= n))
16158- n = _copy_from_user(to, from, n);
16159-#ifdef CONFIG_DEBUG_VM
16160- else
16161- WARN(1, "Buffer overflow detected!\n");
16162-#endif
16163+
16164+ check_object_size(to, n, false);
16165+
16166+ if (access_ok(VERIFY_READ, from, n))
16167+ n = __copy_from_user(to, from, n);
16168+ else if (n < INT_MAX)
16169+ memset(to, 0, n);
16170 return n;
16171 }
16172
16173 static __always_inline __must_check
16174-int copy_to_user(void __user *dst, const void *src, unsigned size)
16175+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16176 {
16177 might_fault();
16178
16179- return _copy_to_user(dst, src, size);
16180+ if (access_ok(VERIFY_WRITE, dst, size))
16181+ size = __copy_to_user(dst, src, size);
16182+ return size;
16183 }
16184
16185 static __always_inline __must_check
16186-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16187+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16188 {
16189- int ret = 0;
16190+ size_t sz = __compiletime_object_size(dst);
16191+ unsigned ret = 0;
16192
16193 might_fault();
16194+
16195+ if (size > INT_MAX)
16196+ return size;
16197+
16198+ check_object_size(dst, size, false);
16199+
16200+#ifdef CONFIG_PAX_MEMORY_UDEREF
16201+ if (!__access_ok(VERIFY_READ, src, size))
16202+ return size;
16203+#endif
16204+
16205+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16206+ copy_from_user_overflow();
16207+ return size;
16208+ }
16209+
16210 if (!__builtin_constant_p(size))
16211- return copy_user_generic(dst, (__force void *)src, size);
16212+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16213 switch (size) {
16214- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16215+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16216 ret, "b", "b", "=q", 1);
16217 return ret;
16218- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16219+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16220 ret, "w", "w", "=r", 2);
16221 return ret;
16222- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16223+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16224 ret, "l", "k", "=r", 4);
16225 return ret;
16226- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16227+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16228 ret, "q", "", "=r", 8);
16229 return ret;
16230 case 10:
16231- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16232+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16233 ret, "q", "", "=r", 10);
16234 if (unlikely(ret))
16235 return ret;
16236 __get_user_asm(*(u16 *)(8 + (char *)dst),
16237- (u16 __user *)(8 + (char __user *)src),
16238+ (const u16 __user *)(8 + (const char __user *)src),
16239 ret, "w", "w", "=r", 2);
16240 return ret;
16241 case 16:
16242- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16243+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16244 ret, "q", "", "=r", 16);
16245 if (unlikely(ret))
16246 return ret;
16247 __get_user_asm(*(u64 *)(8 + (char *)dst),
16248- (u64 __user *)(8 + (char __user *)src),
16249+ (const u64 __user *)(8 + (const char __user *)src),
16250 ret, "q", "", "=r", 8);
16251 return ret;
16252 default:
16253- return copy_user_generic(dst, (__force void *)src, size);
16254+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16255 }
16256 }
16257
16258 static __always_inline __must_check
16259-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16260+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16261 {
16262- int ret = 0;
16263+ size_t sz = __compiletime_object_size(src);
16264+ unsigned ret = 0;
16265
16266 might_fault();
16267+
16268+ if (size > INT_MAX)
16269+ return size;
16270+
16271+ check_object_size(src, size, true);
16272+
16273+#ifdef CONFIG_PAX_MEMORY_UDEREF
16274+ if (!__access_ok(VERIFY_WRITE, dst, size))
16275+ return size;
16276+#endif
16277+
16278+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16279+ copy_to_user_overflow();
16280+ return size;
16281+ }
16282+
16283 if (!__builtin_constant_p(size))
16284- return copy_user_generic((__force void *)dst, src, size);
16285+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16286 switch (size) {
16287- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16288+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16289 ret, "b", "b", "iq", 1);
16290 return ret;
16291- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16292+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16293 ret, "w", "w", "ir", 2);
16294 return ret;
16295- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16296+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16297 ret, "l", "k", "ir", 4);
16298 return ret;
16299- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16300+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16301 ret, "q", "", "er", 8);
16302 return ret;
16303 case 10:
16304- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16305+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16306 ret, "q", "", "er", 10);
16307 if (unlikely(ret))
16308 return ret;
16309 asm("":::"memory");
16310- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16311+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16312 ret, "w", "w", "ir", 2);
16313 return ret;
16314 case 16:
16315- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16316+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16317 ret, "q", "", "er", 16);
16318 if (unlikely(ret))
16319 return ret;
16320 asm("":::"memory");
16321- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16322+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16323 ret, "q", "", "er", 8);
16324 return ret;
16325 default:
16326- return copy_user_generic((__force void *)dst, src, size);
16327+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16328 }
16329 }
16330
16331 static __always_inline __must_check
16332-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16333+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16334 {
16335- int ret = 0;
16336+ unsigned ret = 0;
16337
16338 might_fault();
16339+
16340+ if (size > INT_MAX)
16341+ return size;
16342+
16343+#ifdef CONFIG_PAX_MEMORY_UDEREF
16344+ if (!__access_ok(VERIFY_READ, src, size))
16345+ return size;
16346+ if (!__access_ok(VERIFY_WRITE, dst, size))
16347+ return size;
16348+#endif
16349+
16350 if (!__builtin_constant_p(size))
16351- return copy_user_generic((__force void *)dst,
16352- (__force void *)src, size);
16353+ return copy_user_generic((__force_kernel void *)____m(dst),
16354+ (__force_kernel const void *)____m(src), size);
16355 switch (size) {
16356 case 1: {
16357 u8 tmp;
16358- __get_user_asm(tmp, (u8 __user *)src,
16359+ __get_user_asm(tmp, (const u8 __user *)src,
16360 ret, "b", "b", "=q", 1);
16361 if (likely(!ret))
16362 __put_user_asm(tmp, (u8 __user *)dst,
16363@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16364 }
16365 case 2: {
16366 u16 tmp;
16367- __get_user_asm(tmp, (u16 __user *)src,
16368+ __get_user_asm(tmp, (const u16 __user *)src,
16369 ret, "w", "w", "=r", 2);
16370 if (likely(!ret))
16371 __put_user_asm(tmp, (u16 __user *)dst,
16372@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16373
16374 case 4: {
16375 u32 tmp;
16376- __get_user_asm(tmp, (u32 __user *)src,
16377+ __get_user_asm(tmp, (const u32 __user *)src,
16378 ret, "l", "k", "=r", 4);
16379 if (likely(!ret))
16380 __put_user_asm(tmp, (u32 __user *)dst,
16381@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16382 }
16383 case 8: {
16384 u64 tmp;
16385- __get_user_asm(tmp, (u64 __user *)src,
16386+ __get_user_asm(tmp, (const u64 __user *)src,
16387 ret, "q", "", "=r", 8);
16388 if (likely(!ret))
16389 __put_user_asm(tmp, (u64 __user *)dst,
16390@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16391 return ret;
16392 }
16393 default:
16394- return copy_user_generic((__force void *)dst,
16395- (__force void *)src, size);
16396+ return copy_user_generic((__force_kernel void *)____m(dst),
16397+ (__force_kernel const void *)____m(src), size);
16398 }
16399 }
16400
16401 static __must_check __always_inline int
16402-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16403+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16404 {
16405- return copy_user_generic(dst, (__force const void *)src, size);
16406+ if (size > INT_MAX)
16407+ return size;
16408+
16409+#ifdef CONFIG_PAX_MEMORY_UDEREF
16410+ if (!__access_ok(VERIFY_READ, src, size))
16411+ return size;
16412+#endif
16413+
16414+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16415 }
16416
16417-static __must_check __always_inline int
16418-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16419+static __must_check __always_inline unsigned long
16420+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16421 {
16422- return copy_user_generic((__force void *)dst, src, size);
16423+ if (size > INT_MAX)
16424+ return size;
16425+
16426+#ifdef CONFIG_PAX_MEMORY_UDEREF
16427+ if (!__access_ok(VERIFY_WRITE, dst, size))
16428+ return size;
16429+#endif
16430+
16431+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16432 }
16433
16434-extern long __copy_user_nocache(void *dst, const void __user *src,
16435- unsigned size, int zerorest);
16436+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16437+ unsigned long size, int zerorest) __size_overflow(3);
16438
16439-static inline int
16440-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16441+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16442 {
16443 might_sleep();
16444+
16445+ if (size > INT_MAX)
16446+ return size;
16447+
16448+#ifdef CONFIG_PAX_MEMORY_UDEREF
16449+ if (!__access_ok(VERIFY_READ, src, size))
16450+ return size;
16451+#endif
16452+
16453 return __copy_user_nocache(dst, src, size, 1);
16454 }
16455
16456-static inline int
16457-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16458- unsigned size)
16459+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16460+ unsigned long size)
16461 {
16462+ if (size > INT_MAX)
16463+ return size;
16464+
16465+#ifdef CONFIG_PAX_MEMORY_UDEREF
16466+ if (!__access_ok(VERIFY_READ, src, size))
16467+ return size;
16468+#endif
16469+
16470 return __copy_user_nocache(dst, src, size, 0);
16471 }
16472
16473-unsigned long
16474-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16475+extern unsigned long
16476+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16477
16478 #endif /* _ASM_X86_UACCESS_64_H */
16479diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16480index 5b238981..77fdd78 100644
16481--- a/arch/x86/include/asm/word-at-a-time.h
16482+++ b/arch/x86/include/asm/word-at-a-time.h
16483@@ -11,7 +11,7 @@
16484 * and shift, for example.
16485 */
16486 struct word_at_a_time {
16487- const unsigned long one_bits, high_bits;
16488+ unsigned long one_bits, high_bits;
16489 };
16490
16491 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16492diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16493index d8d9922..bf6cecb 100644
16494--- a/arch/x86/include/asm/x86_init.h
16495+++ b/arch/x86/include/asm/x86_init.h
16496@@ -129,7 +129,7 @@ struct x86_init_ops {
16497 struct x86_init_timers timers;
16498 struct x86_init_iommu iommu;
16499 struct x86_init_pci pci;
16500-};
16501+} __no_const;
16502
16503 /**
16504 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16505@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
16506 void (*setup_percpu_clockev)(void);
16507 void (*early_percpu_clock_init)(void);
16508 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16509-};
16510+} __no_const;
16511
16512 /**
16513 * struct x86_platform_ops - platform specific runtime functions
16514@@ -166,7 +166,7 @@ struct x86_platform_ops {
16515 void (*save_sched_clock_state)(void);
16516 void (*restore_sched_clock_state)(void);
16517 void (*apic_post_init)(void);
16518-};
16519+} __no_const;
16520
16521 struct pci_dev;
16522 struct msi_msg;
16523@@ -180,7 +180,7 @@ struct x86_msi_ops {
16524 void (*teardown_msi_irqs)(struct pci_dev *dev);
16525 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16526 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
16527-};
16528+} __no_const;
16529
16530 struct IO_APIC_route_entry;
16531 struct io_apic_irq_attr;
16532@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
16533 unsigned int destination, int vector,
16534 struct io_apic_irq_attr *attr);
16535 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
16536-};
16537+} __no_const;
16538
16539 extern struct x86_init_ops x86_init;
16540 extern struct x86_cpuinit_ops x86_cpuinit;
16541diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16542index 0415cda..b43d877 100644
16543--- a/arch/x86/include/asm/xsave.h
16544+++ b/arch/x86/include/asm/xsave.h
16545@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16546 return -EFAULT;
16547
16548 __asm__ __volatile__(ASM_STAC "\n"
16549- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16550+ "1:"
16551+ __copyuser_seg
16552+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16553 "2: " ASM_CLAC "\n"
16554 ".section .fixup,\"ax\"\n"
16555 "3: movl $-1,%[err]\n"
16556@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16557 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16558 {
16559 int err;
16560- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16561+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16562 u32 lmask = mask;
16563 u32 hmask = mask >> 32;
16564
16565 __asm__ __volatile__(ASM_STAC "\n"
16566- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16567+ "1:"
16568+ __copyuser_seg
16569+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16570 "2: " ASM_CLAC "\n"
16571 ".section .fixup,\"ax\"\n"
16572 "3: movl $-1,%[err]\n"
16573diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16574index bbae024..e1528f9 100644
16575--- a/arch/x86/include/uapi/asm/e820.h
16576+++ b/arch/x86/include/uapi/asm/e820.h
16577@@ -63,7 +63,7 @@ struct e820map {
16578 #define ISA_START_ADDRESS 0xa0000
16579 #define ISA_END_ADDRESS 0x100000
16580
16581-#define BIOS_BEGIN 0x000a0000
16582+#define BIOS_BEGIN 0x000c0000
16583 #define BIOS_END 0x00100000
16584
16585 #define BIOS_ROM_BASE 0xffe00000
16586diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16587index 7bd3bd3..5dac791 100644
16588--- a/arch/x86/kernel/Makefile
16589+++ b/arch/x86/kernel/Makefile
16590@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16591 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16592 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16593 obj-y += probe_roms.o
16594-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16595+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16596 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16597 obj-y += syscall_$(BITS).o
16598 obj-$(CONFIG_X86_64) += vsyscall_64.o
16599diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16600index 230c8ea..f915130 100644
16601--- a/arch/x86/kernel/acpi/boot.c
16602+++ b/arch/x86/kernel/acpi/boot.c
16603@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16604 * If your system is blacklisted here, but you find that acpi=force
16605 * works for you, please contact linux-acpi@vger.kernel.org
16606 */
16607-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16608+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16609 /*
16610 * Boxes that need ACPI disabled
16611 */
16612@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16613 };
16614
16615 /* second table for DMI checks that should run after early-quirks */
16616-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16617+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16618 /*
16619 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16620 * which includes some code which overrides all temperature
16621diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16622index b44577b..27d8443 100644
16623--- a/arch/x86/kernel/acpi/sleep.c
16624+++ b/arch/x86/kernel/acpi/sleep.c
16625@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16626 #else /* CONFIG_64BIT */
16627 #ifdef CONFIG_SMP
16628 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16629+
16630+ pax_open_kernel();
16631 early_gdt_descr.address =
16632 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16633+ pax_close_kernel();
16634+
16635 initial_gs = per_cpu_offset(smp_processor_id());
16636 #endif
16637 initial_code = (unsigned long)wakeup_long64;
16638diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16639index d1daa66..59fecba 100644
16640--- a/arch/x86/kernel/acpi/wakeup_32.S
16641+++ b/arch/x86/kernel/acpi/wakeup_32.S
16642@@ -29,13 +29,11 @@ wakeup_pmode_return:
16643 # and restore the stack ... but you need gdt for this to work
16644 movl saved_context_esp, %esp
16645
16646- movl %cs:saved_magic, %eax
16647- cmpl $0x12345678, %eax
16648+ cmpl $0x12345678, saved_magic
16649 jne bogus_magic
16650
16651 # jump to place where we left off
16652- movl saved_eip, %eax
16653- jmp *%eax
16654+ jmp *(saved_eip)
16655
16656 bogus_magic:
16657 jmp bogus_magic
16658diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16659index c15cf9a..0e63558 100644
16660--- a/arch/x86/kernel/alternative.c
16661+++ b/arch/x86/kernel/alternative.c
16662@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16663 */
16664 for (a = start; a < end; a++) {
16665 instr = (u8 *)&a->instr_offset + a->instr_offset;
16666+
16667+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16668+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16669+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16670+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16671+#endif
16672+
16673 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16674 BUG_ON(a->replacementlen > a->instrlen);
16675 BUG_ON(a->instrlen > sizeof(insnbuf));
16676@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16677 for (poff = start; poff < end; poff++) {
16678 u8 *ptr = (u8 *)poff + *poff;
16679
16680+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16681+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16682+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16683+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16684+#endif
16685+
16686 if (!*poff || ptr < text || ptr >= text_end)
16687 continue;
16688 /* turn DS segment override prefix into lock prefix */
16689- if (*ptr == 0x3e)
16690+ if (*ktla_ktva(ptr) == 0x3e)
16691 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16692 }
16693 mutex_unlock(&text_mutex);
16694@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16695 for (poff = start; poff < end; poff++) {
16696 u8 *ptr = (u8 *)poff + *poff;
16697
16698+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16699+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16700+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16701+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16702+#endif
16703+
16704 if (!*poff || ptr < text || ptr >= text_end)
16705 continue;
16706 /* turn lock prefix into DS segment override prefix */
16707- if (*ptr == 0xf0)
16708+ if (*ktla_ktva(ptr) == 0xf0)
16709 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16710 }
16711 mutex_unlock(&text_mutex);
16712@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16713
16714 BUG_ON(p->len > MAX_PATCH_LEN);
16715 /* prep the buffer with the original instructions */
16716- memcpy(insnbuf, p->instr, p->len);
16717+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16718 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16719 (unsigned long)p->instr, p->len);
16720
16721@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16722 if (!uniproc_patched || num_possible_cpus() == 1)
16723 free_init_pages("SMP alternatives",
16724 (unsigned long)__smp_locks,
16725- (unsigned long)__smp_locks_end);
16726+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16727 #endif
16728
16729 apply_paravirt(__parainstructions, __parainstructions_end);
16730@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16731 * instructions. And on the local CPU you need to be protected again NMI or MCE
16732 * handlers seeing an inconsistent instruction while you patch.
16733 */
16734-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16735+void *__kprobes text_poke_early(void *addr, const void *opcode,
16736 size_t len)
16737 {
16738 unsigned long flags;
16739 local_irq_save(flags);
16740- memcpy(addr, opcode, len);
16741+
16742+ pax_open_kernel();
16743+ memcpy(ktla_ktva(addr), opcode, len);
16744 sync_core();
16745+ pax_close_kernel();
16746+
16747 local_irq_restore(flags);
16748 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16749 that causes hangs on some VIA CPUs. */
16750@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16751 */
16752 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16753 {
16754- unsigned long flags;
16755- char *vaddr;
16756+ unsigned char *vaddr = ktla_ktva(addr);
16757 struct page *pages[2];
16758- int i;
16759+ size_t i;
16760
16761 if (!core_kernel_text((unsigned long)addr)) {
16762- pages[0] = vmalloc_to_page(addr);
16763- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16764+ pages[0] = vmalloc_to_page(vaddr);
16765+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16766 } else {
16767- pages[0] = virt_to_page(addr);
16768+ pages[0] = virt_to_page(vaddr);
16769 WARN_ON(!PageReserved(pages[0]));
16770- pages[1] = virt_to_page(addr + PAGE_SIZE);
16771+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16772 }
16773 BUG_ON(!pages[0]);
16774- local_irq_save(flags);
16775- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16776- if (pages[1])
16777- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16778- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16779- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16780- clear_fixmap(FIX_TEXT_POKE0);
16781- if (pages[1])
16782- clear_fixmap(FIX_TEXT_POKE1);
16783- local_flush_tlb();
16784- sync_core();
16785- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16786- that causes hangs on some VIA CPUs. */
16787+ text_poke_early(addr, opcode, len);
16788 for (i = 0; i < len; i++)
16789- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16790- local_irq_restore(flags);
16791+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16792 return addr;
16793 }
16794
16795diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16796index 904611b..004dde6 100644
16797--- a/arch/x86/kernel/apic/apic.c
16798+++ b/arch/x86/kernel/apic/apic.c
16799@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16800 /*
16801 * Debug level, exported for io_apic.c
16802 */
16803-unsigned int apic_verbosity;
16804+int apic_verbosity;
16805
16806 int pic_mode;
16807
16808@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16809 apic_write(APIC_ESR, 0);
16810 v1 = apic_read(APIC_ESR);
16811 ack_APIC_irq();
16812- atomic_inc(&irq_err_count);
16813+ atomic_inc_unchecked(&irq_err_count);
16814
16815 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16816 smp_processor_id(), v0 , v1);
16817diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16818index 00c77cf..2dc6a2d 100644
16819--- a/arch/x86/kernel/apic/apic_flat_64.c
16820+++ b/arch/x86/kernel/apic/apic_flat_64.c
16821@@ -157,7 +157,7 @@ static int flat_probe(void)
16822 return 1;
16823 }
16824
16825-static struct apic apic_flat = {
16826+static struct apic apic_flat __read_only = {
16827 .name = "flat",
16828 .probe = flat_probe,
16829 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16830@@ -271,7 +271,7 @@ static int physflat_probe(void)
16831 return 0;
16832 }
16833
16834-static struct apic apic_physflat = {
16835+static struct apic apic_physflat __read_only = {
16836
16837 .name = "physical flat",
16838 .probe = physflat_probe,
16839diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16840index e145f28..2752888 100644
16841--- a/arch/x86/kernel/apic/apic_noop.c
16842+++ b/arch/x86/kernel/apic/apic_noop.c
16843@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16844 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16845 }
16846
16847-struct apic apic_noop = {
16848+struct apic apic_noop __read_only = {
16849 .name = "noop",
16850 .probe = noop_probe,
16851 .acpi_madt_oem_check = NULL,
16852diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16853index d50e364..543bee3 100644
16854--- a/arch/x86/kernel/apic/bigsmp_32.c
16855+++ b/arch/x86/kernel/apic/bigsmp_32.c
16856@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16857 return dmi_bigsmp;
16858 }
16859
16860-static struct apic apic_bigsmp = {
16861+static struct apic apic_bigsmp __read_only = {
16862
16863 .name = "bigsmp",
16864 .probe = probe_bigsmp,
16865diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16866index 0874799..a7a7892 100644
16867--- a/arch/x86/kernel/apic/es7000_32.c
16868+++ b/arch/x86/kernel/apic/es7000_32.c
16869@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16870 return ret && es7000_apic_is_cluster();
16871 }
16872
16873-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16874-static struct apic __refdata apic_es7000_cluster = {
16875+static struct apic apic_es7000_cluster __read_only = {
16876
16877 .name = "es7000",
16878 .probe = probe_es7000,
16879@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16880 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16881 };
16882
16883-static struct apic __refdata apic_es7000 = {
16884+static struct apic apic_es7000 __read_only = {
16885
16886 .name = "es7000",
16887 .probe = probe_es7000,
16888diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16889index 9ed796c..e930fe4 100644
16890--- a/arch/x86/kernel/apic/io_apic.c
16891+++ b/arch/x86/kernel/apic/io_apic.c
16892@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16893 }
16894 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16895
16896-void lock_vector_lock(void)
16897+void lock_vector_lock(void) __acquires(vector_lock)
16898 {
16899 /* Used to the online set of cpus does not change
16900 * during assign_irq_vector.
16901@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
16902 raw_spin_lock(&vector_lock);
16903 }
16904
16905-void unlock_vector_lock(void)
16906+void unlock_vector_lock(void) __releases(vector_lock)
16907 {
16908 raw_spin_unlock(&vector_lock);
16909 }
16910@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
16911 ack_APIC_irq();
16912 }
16913
16914-atomic_t irq_mis_count;
16915+atomic_unchecked_t irq_mis_count;
16916
16917 #ifdef CONFIG_GENERIC_PENDING_IRQ
16918 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16919@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
16920 * at the cpu.
16921 */
16922 if (!(v & (1 << (i & 0x1f)))) {
16923- atomic_inc(&irq_mis_count);
16924+ atomic_inc_unchecked(&irq_mis_count);
16925
16926 eoi_ioapic_irq(irq, cfg);
16927 }
16928diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16929index d661ee9..791fd33 100644
16930--- a/arch/x86/kernel/apic/numaq_32.c
16931+++ b/arch/x86/kernel/apic/numaq_32.c
16932@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16933 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16934 }
16935
16936-/* Use __refdata to keep false positive warning calm. */
16937-static struct apic __refdata apic_numaq = {
16938+static struct apic apic_numaq __read_only = {
16939
16940 .name = "NUMAQ",
16941 .probe = probe_numaq,
16942diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16943index eb35ef9..f184a21 100644
16944--- a/arch/x86/kernel/apic/probe_32.c
16945+++ b/arch/x86/kernel/apic/probe_32.c
16946@@ -72,7 +72,7 @@ static int probe_default(void)
16947 return 1;
16948 }
16949
16950-static struct apic apic_default = {
16951+static struct apic apic_default __read_only = {
16952
16953 .name = "default",
16954 .probe = probe_default,
16955diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16956index 77c95c0..434f8a4 100644
16957--- a/arch/x86/kernel/apic/summit_32.c
16958+++ b/arch/x86/kernel/apic/summit_32.c
16959@@ -486,7 +486,7 @@ void setup_summit(void)
16960 }
16961 #endif
16962
16963-static struct apic apic_summit = {
16964+static struct apic apic_summit __read_only = {
16965
16966 .name = "summit",
16967 .probe = probe_summit,
16968diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16969index c88baa4..757aee1 100644
16970--- a/arch/x86/kernel/apic/x2apic_cluster.c
16971+++ b/arch/x86/kernel/apic/x2apic_cluster.c
16972@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16973 return notifier_from_errno(err);
16974 }
16975
16976-static struct notifier_block __refdata x2apic_cpu_notifier = {
16977+static struct notifier_block x2apic_cpu_notifier = {
16978 .notifier_call = update_clusterinfo,
16979 };
16980
16981@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
16982 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
16983 }
16984
16985-static struct apic apic_x2apic_cluster = {
16986+static struct apic apic_x2apic_cluster __read_only = {
16987
16988 .name = "cluster x2apic",
16989 .probe = x2apic_cluster_probe,
16990diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
16991index 562a76d..a003c0f 100644
16992--- a/arch/x86/kernel/apic/x2apic_phys.c
16993+++ b/arch/x86/kernel/apic/x2apic_phys.c
16994@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
16995 return apic == &apic_x2apic_phys;
16996 }
16997
16998-static struct apic apic_x2apic_phys = {
16999+static struct apic apic_x2apic_phys __read_only = {
17000
17001 .name = "physical x2apic",
17002 .probe = x2apic_phys_probe,
17003diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17004index 794f6eb..67e1db2 100644
17005--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17006+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17007@@ -342,7 +342,7 @@ static int uv_probe(void)
17008 return apic == &apic_x2apic_uv_x;
17009 }
17010
17011-static struct apic __refdata apic_x2apic_uv_x = {
17012+static struct apic apic_x2apic_uv_x __read_only = {
17013
17014 .name = "UV large system",
17015 .probe = uv_probe,
17016diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17017index 53a4e27..038760a 100644
17018--- a/arch/x86/kernel/apm_32.c
17019+++ b/arch/x86/kernel/apm_32.c
17020@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
17021 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17022 * even though they are called in protected mode.
17023 */
17024-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17025+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17026 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17027
17028 static const char driver_version[] = "1.16ac"; /* no spaces */
17029@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
17030 BUG_ON(cpu != 0);
17031 gdt = get_cpu_gdt_table(cpu);
17032 save_desc_40 = gdt[0x40 / 8];
17033+
17034+ pax_open_kernel();
17035 gdt[0x40 / 8] = bad_bios_desc;
17036+ pax_close_kernel();
17037
17038 apm_irq_save(flags);
17039 APM_DO_SAVE_SEGS;
17040@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
17041 &call->esi);
17042 APM_DO_RESTORE_SEGS;
17043 apm_irq_restore(flags);
17044+
17045+ pax_open_kernel();
17046 gdt[0x40 / 8] = save_desc_40;
17047+ pax_close_kernel();
17048+
17049 put_cpu();
17050
17051 return call->eax & 0xff;
17052@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
17053 BUG_ON(cpu != 0);
17054 gdt = get_cpu_gdt_table(cpu);
17055 save_desc_40 = gdt[0x40 / 8];
17056+
17057+ pax_open_kernel();
17058 gdt[0x40 / 8] = bad_bios_desc;
17059+ pax_close_kernel();
17060
17061 apm_irq_save(flags);
17062 APM_DO_SAVE_SEGS;
17063@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
17064 &call->eax);
17065 APM_DO_RESTORE_SEGS;
17066 apm_irq_restore(flags);
17067+
17068+ pax_open_kernel();
17069 gdt[0x40 / 8] = save_desc_40;
17070+ pax_close_kernel();
17071+
17072 put_cpu();
17073 return error;
17074 }
17075@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
17076 * code to that CPU.
17077 */
17078 gdt = get_cpu_gdt_table(0);
17079+
17080+ pax_open_kernel();
17081 set_desc_base(&gdt[APM_CS >> 3],
17082 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17083 set_desc_base(&gdt[APM_CS_16 >> 3],
17084 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17085 set_desc_base(&gdt[APM_DS >> 3],
17086 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17087+ pax_close_kernel();
17088
17089 proc_create("apm", 0, NULL, &apm_file_ops);
17090
17091diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17092index 2861082..6d4718e 100644
17093--- a/arch/x86/kernel/asm-offsets.c
17094+++ b/arch/x86/kernel/asm-offsets.c
17095@@ -33,6 +33,8 @@ void common(void) {
17096 OFFSET(TI_status, thread_info, status);
17097 OFFSET(TI_addr_limit, thread_info, addr_limit);
17098 OFFSET(TI_preempt_count, thread_info, preempt_count);
17099+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17100+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17101
17102 BLANK();
17103 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17104@@ -53,8 +55,26 @@ void common(void) {
17105 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17106 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17107 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17108+
17109+#ifdef CONFIG_PAX_KERNEXEC
17110+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17111 #endif
17112
17113+#ifdef CONFIG_PAX_MEMORY_UDEREF
17114+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17115+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17116+#ifdef CONFIG_X86_64
17117+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17118+#endif
17119+#endif
17120+
17121+#endif
17122+
17123+ BLANK();
17124+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17125+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17126+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17127+
17128 #ifdef CONFIG_XEN
17129 BLANK();
17130 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17131diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17132index e7c798b..2b2019b 100644
17133--- a/arch/x86/kernel/asm-offsets_64.c
17134+++ b/arch/x86/kernel/asm-offsets_64.c
17135@@ -77,6 +77,7 @@ int main(void)
17136 BLANK();
17137 #undef ENTRY
17138
17139+ DEFINE(TSS_size, sizeof(struct tss_struct));
17140 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17141 BLANK();
17142
17143diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17144index b0684e4..22ccfd7 100644
17145--- a/arch/x86/kernel/cpu/Makefile
17146+++ b/arch/x86/kernel/cpu/Makefile
17147@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17148 CFLAGS_REMOVE_perf_event.o = -pg
17149 endif
17150
17151-# Make sure load_percpu_segment has no stackprotector
17152-nostackp := $(call cc-option, -fno-stack-protector)
17153-CFLAGS_common.o := $(nostackp)
17154-
17155 obj-y := intel_cacheinfo.o scattered.o topology.o
17156 obj-y += proc.o capflags.o powerflags.o common.o
17157 obj-y += rdrand.o
17158diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17159index 5013a48..0782c53 100644
17160--- a/arch/x86/kernel/cpu/amd.c
17161+++ b/arch/x86/kernel/cpu/amd.c
17162@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17163 unsigned int size)
17164 {
17165 /* AMD errata T13 (order #21922) */
17166- if ((c->x86 == 6)) {
17167+ if (c->x86 == 6) {
17168 /* Duron Rev A0 */
17169 if (c->x86_model == 3 && c->x86_mask == 0)
17170 size = 64;
17171diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17172index 22018f7..bc6f5e3 100644
17173--- a/arch/x86/kernel/cpu/common.c
17174+++ b/arch/x86/kernel/cpu/common.c
17175@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17176
17177 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17178
17179-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17180-#ifdef CONFIG_X86_64
17181- /*
17182- * We need valid kernel segments for data and code in long mode too
17183- * IRET will check the segment types kkeil 2000/10/28
17184- * Also sysret mandates a special GDT layout
17185- *
17186- * TLS descriptors are currently at a different place compared to i386.
17187- * Hopefully nobody expects them at a fixed place (Wine?)
17188- */
17189- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17190- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17191- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17192- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17193- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17194- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17195-#else
17196- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17197- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17198- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17199- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17200- /*
17201- * Segments used for calling PnP BIOS have byte granularity.
17202- * They code segments and data segments have fixed 64k limits,
17203- * the transfer segment sizes are set at run time.
17204- */
17205- /* 32-bit code */
17206- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17207- /* 16-bit code */
17208- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17209- /* 16-bit data */
17210- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17211- /* 16-bit data */
17212- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17213- /* 16-bit data */
17214- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17215- /*
17216- * The APM segments have byte granularity and their bases
17217- * are set at run time. All have 64k limits.
17218- */
17219- /* 32-bit code */
17220- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17221- /* 16-bit code */
17222- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17223- /* data */
17224- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17225-
17226- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17227- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17228- GDT_STACK_CANARY_INIT
17229-#endif
17230-} };
17231-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17232-
17233 static int __init x86_xsave_setup(char *s)
17234 {
17235 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17236@@ -386,7 +332,7 @@ void switch_to_new_gdt(int cpu)
17237 {
17238 struct desc_ptr gdt_descr;
17239
17240- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17241+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17242 gdt_descr.size = GDT_SIZE - 1;
17243 load_gdt(&gdt_descr);
17244 /* Reload the per-cpu base */
17245@@ -882,6 +828,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17246 /* Filter out anything that depends on CPUID levels we don't have */
17247 filter_cpuid_features(c, true);
17248
17249+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17250+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17251+#endif
17252+
17253 /* If the model name is still unset, do table lookup. */
17254 if (!c->x86_model_id[0]) {
17255 const char *p;
17256@@ -1069,10 +1019,12 @@ static __init int setup_disablecpuid(char *arg)
17257 }
17258 __setup("clearcpuid=", setup_disablecpuid);
17259
17260+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17261+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17262+
17263 #ifdef CONFIG_X86_64
17264 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17265-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17266- (unsigned long) nmi_idt_table };
17267+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17268
17269 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17270 irq_stack_union) __aligned(PAGE_SIZE);
17271@@ -1086,7 +1038,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17272 EXPORT_PER_CPU_SYMBOL(current_task);
17273
17274 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17275- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17276+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17277 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17278
17279 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17280@@ -1231,7 +1183,7 @@ void __cpuinit cpu_init(void)
17281 load_ucode_ap();
17282
17283 cpu = stack_smp_processor_id();
17284- t = &per_cpu(init_tss, cpu);
17285+ t = init_tss + cpu;
17286 oist = &per_cpu(orig_ist, cpu);
17287
17288 #ifdef CONFIG_NUMA
17289@@ -1257,7 +1209,7 @@ void __cpuinit cpu_init(void)
17290 switch_to_new_gdt(cpu);
17291 loadsegment(fs, 0);
17292
17293- load_idt((const struct desc_ptr *)&idt_descr);
17294+ load_idt(&idt_descr);
17295
17296 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17297 syscall_init();
17298@@ -1266,7 +1218,6 @@ void __cpuinit cpu_init(void)
17299 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17300 barrier();
17301
17302- x86_configure_nx();
17303 enable_x2apic();
17304
17305 /*
17306@@ -1318,7 +1269,7 @@ void __cpuinit cpu_init(void)
17307 {
17308 int cpu = smp_processor_id();
17309 struct task_struct *curr = current;
17310- struct tss_struct *t = &per_cpu(init_tss, cpu);
17311+ struct tss_struct *t = init_tss + cpu;
17312 struct thread_struct *thread = &curr->thread;
17313
17314 show_ucode_info_early();
17315diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17316index 7c6f7d5..8cac382 100644
17317--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17318+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17319@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17320 };
17321
17322 #ifdef CONFIG_AMD_NB
17323+static struct attribute *default_attrs_amd_nb[] = {
17324+ &type.attr,
17325+ &level.attr,
17326+ &coherency_line_size.attr,
17327+ &physical_line_partition.attr,
17328+ &ways_of_associativity.attr,
17329+ &number_of_sets.attr,
17330+ &size.attr,
17331+ &shared_cpu_map.attr,
17332+ &shared_cpu_list.attr,
17333+ NULL,
17334+ NULL,
17335+ NULL,
17336+ NULL
17337+};
17338+
17339 static struct attribute ** __cpuinit amd_l3_attrs(void)
17340 {
17341 static struct attribute **attrs;
17342@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17343
17344 n = ARRAY_SIZE(default_attrs);
17345
17346- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17347- n += 2;
17348-
17349- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17350- n += 1;
17351-
17352- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17353- if (attrs == NULL)
17354- return attrs = default_attrs;
17355-
17356- for (n = 0; default_attrs[n]; n++)
17357- attrs[n] = default_attrs[n];
17358+ attrs = default_attrs_amd_nb;
17359
17360 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17361 attrs[n++] = &cache_disable_0.attr;
17362@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17363 .default_attrs = default_attrs,
17364 };
17365
17366+#ifdef CONFIG_AMD_NB
17367+static struct kobj_type ktype_cache_amd_nb = {
17368+ .sysfs_ops = &sysfs_ops,
17369+ .default_attrs = default_attrs_amd_nb,
17370+};
17371+#endif
17372+
17373 static struct kobj_type ktype_percpu_entry = {
17374 .sysfs_ops = &sysfs_ops,
17375 };
17376@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17377 return retval;
17378 }
17379
17380+#ifdef CONFIG_AMD_NB
17381+ amd_l3_attrs();
17382+#endif
17383+
17384 for (i = 0; i < num_cache_leaves; i++) {
17385+ struct kobj_type *ktype;
17386+
17387 this_object = INDEX_KOBJECT_PTR(cpu, i);
17388 this_object->cpu = cpu;
17389 this_object->index = i;
17390
17391 this_leaf = CPUID4_INFO_IDX(cpu, i);
17392
17393- ktype_cache.default_attrs = default_attrs;
17394+ ktype = &ktype_cache;
17395 #ifdef CONFIG_AMD_NB
17396 if (this_leaf->base.nb)
17397- ktype_cache.default_attrs = amd_l3_attrs();
17398+ ktype = &ktype_cache_amd_nb;
17399 #endif
17400 retval = kobject_init_and_add(&(this_object->kobj),
17401- &ktype_cache,
17402+ ktype,
17403 per_cpu(ici_cache_kobject, cpu),
17404 "index%1lu", i);
17405 if (unlikely(retval)) {
17406@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17407 return NOTIFY_OK;
17408 }
17409
17410-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17411+static struct notifier_block cacheinfo_cpu_notifier = {
17412 .notifier_call = cacheinfo_cpu_callback,
17413 };
17414
17415diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17416index 9239504..b2471ce 100644
17417--- a/arch/x86/kernel/cpu/mcheck/mce.c
17418+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17419@@ -45,6 +45,7 @@
17420 #include <asm/processor.h>
17421 #include <asm/mce.h>
17422 #include <asm/msr.h>
17423+#include <asm/local.h>
17424
17425 #include "mce-internal.h"
17426
17427@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17428 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17429 m->cs, m->ip);
17430
17431- if (m->cs == __KERNEL_CS)
17432+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17433 print_symbol("{%s}", m->ip);
17434 pr_cont("\n");
17435 }
17436@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17437
17438 #define PANIC_TIMEOUT 5 /* 5 seconds */
17439
17440-static atomic_t mce_paniced;
17441+static atomic_unchecked_t mce_paniced;
17442
17443 static int fake_panic;
17444-static atomic_t mce_fake_paniced;
17445+static atomic_unchecked_t mce_fake_paniced;
17446
17447 /* Panic in progress. Enable interrupts and wait for final IPI */
17448 static void wait_for_panic(void)
17449@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17450 /*
17451 * Make sure only one CPU runs in machine check panic
17452 */
17453- if (atomic_inc_return(&mce_paniced) > 1)
17454+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17455 wait_for_panic();
17456 barrier();
17457
17458@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17459 console_verbose();
17460 } else {
17461 /* Don't log too much for fake panic */
17462- if (atomic_inc_return(&mce_fake_paniced) > 1)
17463+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17464 return;
17465 }
17466 /* First print corrected ones that are still unlogged */
17467@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17468 if (!fake_panic) {
17469 if (panic_timeout == 0)
17470 panic_timeout = mca_cfg.panic_timeout;
17471- panic(msg);
17472+ panic("%s", msg);
17473 } else
17474 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
17475 }
17476@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
17477 * might have been modified by someone else.
17478 */
17479 rmb();
17480- if (atomic_read(&mce_paniced))
17481+ if (atomic_read_unchecked(&mce_paniced))
17482 wait_for_panic();
17483 if (!mca_cfg.monarch_timeout)
17484 goto out;
17485@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17486 }
17487
17488 /* Call the installed machine check handler for this CPU setup. */
17489-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17490+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17491 unexpected_machine_check;
17492
17493 /*
17494@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17495 return;
17496 }
17497
17498+ pax_open_kernel();
17499 machine_check_vector = do_machine_check;
17500+ pax_close_kernel();
17501
17502 __mcheck_cpu_init_generic();
17503 __mcheck_cpu_init_vendor(c);
17504@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17505 */
17506
17507 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17508-static int mce_chrdev_open_count; /* #times opened */
17509+static local_t mce_chrdev_open_count; /* #times opened */
17510 static int mce_chrdev_open_exclu; /* already open exclusive? */
17511
17512 static int mce_chrdev_open(struct inode *inode, struct file *file)
17513@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17514 spin_lock(&mce_chrdev_state_lock);
17515
17516 if (mce_chrdev_open_exclu ||
17517- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17518+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17519 spin_unlock(&mce_chrdev_state_lock);
17520
17521 return -EBUSY;
17522@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17523
17524 if (file->f_flags & O_EXCL)
17525 mce_chrdev_open_exclu = 1;
17526- mce_chrdev_open_count++;
17527+ local_inc(&mce_chrdev_open_count);
17528
17529 spin_unlock(&mce_chrdev_state_lock);
17530
17531@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17532 {
17533 spin_lock(&mce_chrdev_state_lock);
17534
17535- mce_chrdev_open_count--;
17536+ local_dec(&mce_chrdev_open_count);
17537 mce_chrdev_open_exclu = 0;
17538
17539 spin_unlock(&mce_chrdev_state_lock);
17540@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17541 return NOTIFY_OK;
17542 }
17543
17544-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17545+static struct notifier_block mce_cpu_notifier = {
17546 .notifier_call = mce_cpu_callback,
17547 };
17548
17549@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
17550
17551 for (i = 0; i < mca_cfg.banks; i++) {
17552 struct mce_bank *b = &mce_banks[i];
17553- struct device_attribute *a = &b->attr;
17554+ device_attribute_no_const *a = &b->attr;
17555
17556 sysfs_attr_init(&a->attr);
17557 a->attr.name = b->attrname;
17558@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
17559 static void mce_reset(void)
17560 {
17561 cpu_missing = 0;
17562- atomic_set(&mce_fake_paniced, 0);
17563+ atomic_set_unchecked(&mce_fake_paniced, 0);
17564 atomic_set(&mce_executing, 0);
17565 atomic_set(&mce_callin, 0);
17566 atomic_set(&global_nwo, 0);
17567diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17568index 1c044b1..37a2a43 100644
17569--- a/arch/x86/kernel/cpu/mcheck/p5.c
17570+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17571@@ -11,6 +11,7 @@
17572 #include <asm/processor.h>
17573 #include <asm/mce.h>
17574 #include <asm/msr.h>
17575+#include <asm/pgtable.h>
17576
17577 /* By default disabled */
17578 int mce_p5_enabled __read_mostly;
17579@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17580 if (!cpu_has(c, X86_FEATURE_MCE))
17581 return;
17582
17583+ pax_open_kernel();
17584 machine_check_vector = pentium_machine_check;
17585+ pax_close_kernel();
17586 /* Make sure the vector pointer is visible before we enable MCEs: */
17587 wmb();
17588
17589diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17590index 47a1870..8c019a7 100644
17591--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17592+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17593@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17594 return notifier_from_errno(err);
17595 }
17596
17597-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17598+static struct notifier_block thermal_throttle_cpu_notifier =
17599 {
17600 .notifier_call = thermal_throttle_cpu_callback,
17601 };
17602diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17603index e9a701a..35317d6 100644
17604--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17605+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17606@@ -10,6 +10,7 @@
17607 #include <asm/processor.h>
17608 #include <asm/mce.h>
17609 #include <asm/msr.h>
17610+#include <asm/pgtable.h>
17611
17612 /* Machine check handler for WinChip C6: */
17613 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17614@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17615 {
17616 u32 lo, hi;
17617
17618+ pax_open_kernel();
17619 machine_check_vector = winchip_machine_check;
17620+ pax_close_kernel();
17621 /* Make sure the vector pointer is visible before we enable MCEs: */
17622 wmb();
17623
17624diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17625index 726bf96..81f0526 100644
17626--- a/arch/x86/kernel/cpu/mtrr/main.c
17627+++ b/arch/x86/kernel/cpu/mtrr/main.c
17628@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17629 u64 size_or_mask, size_and_mask;
17630 static bool mtrr_aps_delayed_init;
17631
17632-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17633+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17634
17635 const struct mtrr_ops *mtrr_if;
17636
17637diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17638index df5e41f..816c719 100644
17639--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17640+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17641@@ -25,7 +25,7 @@ struct mtrr_ops {
17642 int (*validate_add_page)(unsigned long base, unsigned long size,
17643 unsigned int type);
17644 int (*have_wrcomb)(void);
17645-};
17646+} __do_const;
17647
17648 extern int generic_get_free_region(unsigned long base, unsigned long size,
17649 int replace_reg);
17650diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17651index 1025f3c..824f677 100644
17652--- a/arch/x86/kernel/cpu/perf_event.c
17653+++ b/arch/x86/kernel/cpu/perf_event.c
17654@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
17655 pr_info("no hardware sampling interrupt available.\n");
17656 }
17657
17658-static struct attribute_group x86_pmu_format_group = {
17659+static attribute_group_no_const x86_pmu_format_group = {
17660 .name = "format",
17661 .attrs = NULL,
17662 };
17663@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
17664 NULL,
17665 };
17666
17667-static struct attribute_group x86_pmu_events_group = {
17668+static attribute_group_no_const x86_pmu_events_group = {
17669 .name = "events",
17670 .attrs = events_attr,
17671 };
17672@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
17673 if (idx > GDT_ENTRIES)
17674 return 0;
17675
17676- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17677+ desc = get_cpu_gdt_table(smp_processor_id());
17678 }
17679
17680 return get_desc_base(desc + idx);
17681@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17682 break;
17683
17684 perf_callchain_store(entry, frame.return_address);
17685- fp = frame.next_frame;
17686+ fp = (const void __force_user *)frame.next_frame;
17687 }
17688 }
17689
17690diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17691index a9e2207..d70c83a 100644
17692--- a/arch/x86/kernel/cpu/perf_event_intel.c
17693+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17694@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
17695 * v2 and above have a perf capabilities MSR
17696 */
17697 if (version > 1) {
17698- u64 capabilities;
17699+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17700
17701- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17702- x86_pmu.intel_cap.capabilities = capabilities;
17703+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17704+ x86_pmu.intel_cap.capabilities = capabilities;
17705 }
17706
17707 intel_ds_init();
17708diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17709index 52441a2..f94fae8 100644
17710--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17711+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17712@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17713 static int __init uncore_type_init(struct intel_uncore_type *type)
17714 {
17715 struct intel_uncore_pmu *pmus;
17716- struct attribute_group *attr_group;
17717+ attribute_group_no_const *attr_group;
17718 struct attribute **attrs;
17719 int i, j;
17720
17721@@ -3518,7 +3518,7 @@ static int
17722 return NOTIFY_OK;
17723 }
17724
17725-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17726+static struct notifier_block uncore_cpu_nb = {
17727 .notifier_call = uncore_cpu_notifier,
17728 /*
17729 * to migrate uncore events, our notifier should be executed
17730diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17731index f952891..4722ad4 100644
17732--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17733+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17734@@ -488,7 +488,7 @@ struct intel_uncore_box {
17735 struct uncore_event_desc {
17736 struct kobj_attribute attr;
17737 const char *config;
17738-};
17739+} __do_const;
17740
17741 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17742 { \
17743diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17744index 1e4dbcf..b9a34c2 100644
17745--- a/arch/x86/kernel/cpuid.c
17746+++ b/arch/x86/kernel/cpuid.c
17747@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17748 return notifier_from_errno(err);
17749 }
17750
17751-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17752+static struct notifier_block cpuid_class_cpu_notifier =
17753 {
17754 .notifier_call = cpuid_class_cpu_callback,
17755 };
17756diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17757index 74467fe..18793d5 100644
17758--- a/arch/x86/kernel/crash.c
17759+++ b/arch/x86/kernel/crash.c
17760@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17761 {
17762 #ifdef CONFIG_X86_32
17763 struct pt_regs fixed_regs;
17764-#endif
17765
17766-#ifdef CONFIG_X86_32
17767- if (!user_mode_vm(regs)) {
17768+ if (!user_mode(regs)) {
17769 crash_fixup_ss_esp(&fixed_regs, regs);
17770 regs = &fixed_regs;
17771 }
17772diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
17773index afa64ad..dce67dd 100644
17774--- a/arch/x86/kernel/crash_dump_64.c
17775+++ b/arch/x86/kernel/crash_dump_64.c
17776@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
17777 return -ENOMEM;
17778
17779 if (userbuf) {
17780- if (copy_to_user(buf, vaddr + offset, csize)) {
17781+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
17782 iounmap(vaddr);
17783 return -EFAULT;
17784 }
17785diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17786index 155a13f..1672b9b 100644
17787--- a/arch/x86/kernel/doublefault_32.c
17788+++ b/arch/x86/kernel/doublefault_32.c
17789@@ -11,7 +11,7 @@
17790
17791 #define DOUBLEFAULT_STACKSIZE (1024)
17792 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17793-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17794+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17795
17796 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17797
17798@@ -21,7 +21,7 @@ static void doublefault_fn(void)
17799 unsigned long gdt, tss;
17800
17801 native_store_gdt(&gdt_desc);
17802- gdt = gdt_desc.address;
17803+ gdt = (unsigned long)gdt_desc.address;
17804
17805 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17806
17807@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17808 /* 0x2 bit is always set */
17809 .flags = X86_EFLAGS_SF | 0x2,
17810 .sp = STACK_START,
17811- .es = __USER_DS,
17812+ .es = __KERNEL_DS,
17813 .cs = __KERNEL_CS,
17814 .ss = __KERNEL_DS,
17815- .ds = __USER_DS,
17816+ .ds = __KERNEL_DS,
17817 .fs = __KERNEL_PERCPU,
17818
17819 .__cr3 = __pa_nodebug(swapper_pg_dir),
17820diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17821index deb6421..622e0ed 100644
17822--- a/arch/x86/kernel/dumpstack.c
17823+++ b/arch/x86/kernel/dumpstack.c
17824@@ -2,6 +2,9 @@
17825 * Copyright (C) 1991, 1992 Linus Torvalds
17826 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17827 */
17828+#ifdef CONFIG_GRKERNSEC_HIDESYM
17829+#define __INCLUDED_BY_HIDESYM 1
17830+#endif
17831 #include <linux/kallsyms.h>
17832 #include <linux/kprobes.h>
17833 #include <linux/uaccess.h>
17834@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17835 static void
17836 print_ftrace_graph_addr(unsigned long addr, void *data,
17837 const struct stacktrace_ops *ops,
17838- struct thread_info *tinfo, int *graph)
17839+ struct task_struct *task, int *graph)
17840 {
17841- struct task_struct *task;
17842 unsigned long ret_addr;
17843 int index;
17844
17845 if (addr != (unsigned long)return_to_handler)
17846 return;
17847
17848- task = tinfo->task;
17849 index = task->curr_ret_stack;
17850
17851 if (!task->ret_stack || index < *graph)
17852@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17853 static inline void
17854 print_ftrace_graph_addr(unsigned long addr, void *data,
17855 const struct stacktrace_ops *ops,
17856- struct thread_info *tinfo, int *graph)
17857+ struct task_struct *task, int *graph)
17858 { }
17859 #endif
17860
17861@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17862 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17863 */
17864
17865-static inline int valid_stack_ptr(struct thread_info *tinfo,
17866- void *p, unsigned int size, void *end)
17867+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17868 {
17869- void *t = tinfo;
17870 if (end) {
17871 if (p < end && p >= (end-THREAD_SIZE))
17872 return 1;
17873@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17874 }
17875
17876 unsigned long
17877-print_context_stack(struct thread_info *tinfo,
17878+print_context_stack(struct task_struct *task, void *stack_start,
17879 unsigned long *stack, unsigned long bp,
17880 const struct stacktrace_ops *ops, void *data,
17881 unsigned long *end, int *graph)
17882 {
17883 struct stack_frame *frame = (struct stack_frame *)bp;
17884
17885- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17886+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17887 unsigned long addr;
17888
17889 addr = *stack;
17890@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17891 } else {
17892 ops->address(data, addr, 0);
17893 }
17894- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17895+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17896 }
17897 stack++;
17898 }
17899@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17900 EXPORT_SYMBOL_GPL(print_context_stack);
17901
17902 unsigned long
17903-print_context_stack_bp(struct thread_info *tinfo,
17904+print_context_stack_bp(struct task_struct *task, void *stack_start,
17905 unsigned long *stack, unsigned long bp,
17906 const struct stacktrace_ops *ops, void *data,
17907 unsigned long *end, int *graph)
17908@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17909 struct stack_frame *frame = (struct stack_frame *)bp;
17910 unsigned long *ret_addr = &frame->return_address;
17911
17912- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17913+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17914 unsigned long addr = *ret_addr;
17915
17916 if (!__kernel_text_address(addr))
17917@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17918 ops->address(data, addr, 1);
17919 frame = frame->next_frame;
17920 ret_addr = &frame->return_address;
17921- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17922+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17923 }
17924
17925 return (unsigned long)frame;
17926@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
17927 }
17928 EXPORT_SYMBOL_GPL(oops_begin);
17929
17930+extern void gr_handle_kernel_exploit(void);
17931+
17932 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17933 {
17934 if (regs && kexec_should_crash(current))
17935@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17936 panic("Fatal exception in interrupt");
17937 if (panic_on_oops)
17938 panic("Fatal exception");
17939- do_exit(signr);
17940+
17941+ gr_handle_kernel_exploit();
17942+
17943+ do_group_exit(signr);
17944 }
17945
17946 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17947@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17948 print_modules();
17949 show_regs(regs);
17950 #ifdef CONFIG_X86_32
17951- if (user_mode_vm(regs)) {
17952+ if (user_mode(regs)) {
17953 sp = regs->sp;
17954 ss = regs->ss & 0xffff;
17955 } else {
17956@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17957 unsigned long flags = oops_begin();
17958 int sig = SIGSEGV;
17959
17960- if (!user_mode_vm(regs))
17961+ if (!user_mode(regs))
17962 report_bug(regs->ip, regs);
17963
17964 if (__die(str, regs, err))
17965diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17966index f2a1770..540657f 100644
17967--- a/arch/x86/kernel/dumpstack_32.c
17968+++ b/arch/x86/kernel/dumpstack_32.c
17969@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17970 bp = stack_frame(task, regs);
17971
17972 for (;;) {
17973- struct thread_info *context;
17974+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17975
17976- context = (struct thread_info *)
17977- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17978- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17979+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17980
17981- stack = (unsigned long *)context->previous_esp;
17982- if (!stack)
17983+ if (stack_start == task_stack_page(task))
17984 break;
17985+ stack = *(unsigned long **)stack_start;
17986 if (ops->stack(data, "IRQ") < 0)
17987 break;
17988 touch_nmi_watchdog();
17989@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
17990 int i;
17991
17992 show_regs_print_info(KERN_EMERG);
17993- __show_regs(regs, !user_mode_vm(regs));
17994+ __show_regs(regs, !user_mode(regs));
17995
17996 /*
17997 * When in-kernel, we also print out the stack and code at the
17998 * time of the fault..
17999 */
18000- if (!user_mode_vm(regs)) {
18001+ if (!user_mode(regs)) {
18002 unsigned int code_prologue = code_bytes * 43 / 64;
18003 unsigned int code_len = code_bytes;
18004 unsigned char c;
18005 u8 *ip;
18006+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18007
18008 pr_emerg("Stack:\n");
18009 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18010
18011 pr_emerg("Code:");
18012
18013- ip = (u8 *)regs->ip - code_prologue;
18014+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18015 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18016 /* try starting at IP */
18017- ip = (u8 *)regs->ip;
18018+ ip = (u8 *)regs->ip + cs_base;
18019 code_len = code_len - code_prologue + 1;
18020 }
18021 for (i = 0; i < code_len; i++, ip++) {
18022@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
18023 pr_cont(" Bad EIP value.");
18024 break;
18025 }
18026- if (ip == (u8 *)regs->ip)
18027+ if (ip == (u8 *)regs->ip + cs_base)
18028 pr_cont(" <%02x>", c);
18029 else
18030 pr_cont(" %02x", c);
18031@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
18032 {
18033 unsigned short ud2;
18034
18035+ ip = ktla_ktva(ip);
18036 if (ip < PAGE_OFFSET)
18037 return 0;
18038 if (probe_kernel_address((unsigned short *)ip, ud2))
18039@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
18040
18041 return ud2 == 0x0b0f;
18042 }
18043+
18044+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18045+void pax_check_alloca(unsigned long size)
18046+{
18047+ unsigned long sp = (unsigned long)&sp, stack_left;
18048+
18049+ /* all kernel stacks are of the same size */
18050+ stack_left = sp & (THREAD_SIZE - 1);
18051+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18052+}
18053+EXPORT_SYMBOL(pax_check_alloca);
18054+#endif
18055diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18056index addb207..99635fa 100644
18057--- a/arch/x86/kernel/dumpstack_64.c
18058+++ b/arch/x86/kernel/dumpstack_64.c
18059@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18060 unsigned long *irq_stack_end =
18061 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18062 unsigned used = 0;
18063- struct thread_info *tinfo;
18064 int graph = 0;
18065 unsigned long dummy;
18066+ void *stack_start;
18067
18068 if (!task)
18069 task = current;
18070@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18071 * current stack address. If the stacks consist of nested
18072 * exceptions
18073 */
18074- tinfo = task_thread_info(task);
18075 for (;;) {
18076 char *id;
18077 unsigned long *estack_end;
18078+
18079 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18080 &used, &id);
18081
18082@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18083 if (ops->stack(data, id) < 0)
18084 break;
18085
18086- bp = ops->walk_stack(tinfo, stack, bp, ops,
18087+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18088 data, estack_end, &graph);
18089 ops->stack(data, "<EOE>");
18090 /*
18091@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18092 * second-to-last pointer (index -2 to end) in the
18093 * exception stack:
18094 */
18095+ if ((u16)estack_end[-1] != __KERNEL_DS)
18096+ goto out;
18097 stack = (unsigned long *) estack_end[-2];
18098 continue;
18099 }
18100@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18101 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18102 if (ops->stack(data, "IRQ") < 0)
18103 break;
18104- bp = ops->walk_stack(tinfo, stack, bp,
18105+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18106 ops, data, irq_stack_end, &graph);
18107 /*
18108 * We link to the next stack (which would be
18109@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18110 /*
18111 * This handles the process stack:
18112 */
18113- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18114+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18115+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18116+out:
18117 put_cpu();
18118 }
18119 EXPORT_SYMBOL(dump_trace);
18120@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
18121
18122 return ud2 == 0x0b0f;
18123 }
18124+
18125+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18126+void pax_check_alloca(unsigned long size)
18127+{
18128+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18129+ unsigned cpu, used;
18130+ char *id;
18131+
18132+ /* check the process stack first */
18133+ stack_start = (unsigned long)task_stack_page(current);
18134+ stack_end = stack_start + THREAD_SIZE;
18135+ if (likely(stack_start <= sp && sp < stack_end)) {
18136+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18137+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18138+ return;
18139+ }
18140+
18141+ cpu = get_cpu();
18142+
18143+ /* check the irq stacks */
18144+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18145+ stack_start = stack_end - IRQ_STACK_SIZE;
18146+ if (stack_start <= sp && sp < stack_end) {
18147+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18148+ put_cpu();
18149+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18150+ return;
18151+ }
18152+
18153+ /* check the exception stacks */
18154+ used = 0;
18155+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18156+ stack_start = stack_end - EXCEPTION_STKSZ;
18157+ if (stack_end && stack_start <= sp && sp < stack_end) {
18158+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18159+ put_cpu();
18160+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18161+ return;
18162+ }
18163+
18164+ put_cpu();
18165+
18166+ /* unknown stack */
18167+ BUG();
18168+}
18169+EXPORT_SYMBOL(pax_check_alloca);
18170+#endif
18171diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
18172index d32abea..74daf4f 100644
18173--- a/arch/x86/kernel/e820.c
18174+++ b/arch/x86/kernel/e820.c
18175@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
18176
18177 static void early_panic(char *msg)
18178 {
18179- early_printk(msg);
18180- panic(msg);
18181+ early_printk("%s", msg);
18182+ panic("%s", msg);
18183 }
18184
18185 static int userdef __initdata;
18186diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18187index d15f575..d692043 100644
18188--- a/arch/x86/kernel/early_printk.c
18189+++ b/arch/x86/kernel/early_printk.c
18190@@ -7,6 +7,7 @@
18191 #include <linux/pci_regs.h>
18192 #include <linux/pci_ids.h>
18193 #include <linux/errno.h>
18194+#include <linux/sched.h>
18195 #include <asm/io.h>
18196 #include <asm/processor.h>
18197 #include <asm/fcntl.h>
18198diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18199index 8f3e2de..caecc4e 100644
18200--- a/arch/x86/kernel/entry_32.S
18201+++ b/arch/x86/kernel/entry_32.S
18202@@ -177,13 +177,153 @@
18203 /*CFI_REL_OFFSET gs, PT_GS*/
18204 .endm
18205 .macro SET_KERNEL_GS reg
18206+
18207+#ifdef CONFIG_CC_STACKPROTECTOR
18208 movl $(__KERNEL_STACK_CANARY), \reg
18209+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18210+ movl $(__USER_DS), \reg
18211+#else
18212+ xorl \reg, \reg
18213+#endif
18214+
18215 movl \reg, %gs
18216 .endm
18217
18218 #endif /* CONFIG_X86_32_LAZY_GS */
18219
18220-.macro SAVE_ALL
18221+.macro pax_enter_kernel
18222+#ifdef CONFIG_PAX_KERNEXEC
18223+ call pax_enter_kernel
18224+#endif
18225+.endm
18226+
18227+.macro pax_exit_kernel
18228+#ifdef CONFIG_PAX_KERNEXEC
18229+ call pax_exit_kernel
18230+#endif
18231+.endm
18232+
18233+#ifdef CONFIG_PAX_KERNEXEC
18234+ENTRY(pax_enter_kernel)
18235+#ifdef CONFIG_PARAVIRT
18236+ pushl %eax
18237+ pushl %ecx
18238+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18239+ mov %eax, %esi
18240+#else
18241+ mov %cr0, %esi
18242+#endif
18243+ bts $16, %esi
18244+ jnc 1f
18245+ mov %cs, %esi
18246+ cmp $__KERNEL_CS, %esi
18247+ jz 3f
18248+ ljmp $__KERNEL_CS, $3f
18249+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18250+2:
18251+#ifdef CONFIG_PARAVIRT
18252+ mov %esi, %eax
18253+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18254+#else
18255+ mov %esi, %cr0
18256+#endif
18257+3:
18258+#ifdef CONFIG_PARAVIRT
18259+ popl %ecx
18260+ popl %eax
18261+#endif
18262+ ret
18263+ENDPROC(pax_enter_kernel)
18264+
18265+ENTRY(pax_exit_kernel)
18266+#ifdef CONFIG_PARAVIRT
18267+ pushl %eax
18268+ pushl %ecx
18269+#endif
18270+ mov %cs, %esi
18271+ cmp $__KERNEXEC_KERNEL_CS, %esi
18272+ jnz 2f
18273+#ifdef CONFIG_PARAVIRT
18274+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18275+ mov %eax, %esi
18276+#else
18277+ mov %cr0, %esi
18278+#endif
18279+ btr $16, %esi
18280+ ljmp $__KERNEL_CS, $1f
18281+1:
18282+#ifdef CONFIG_PARAVIRT
18283+ mov %esi, %eax
18284+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18285+#else
18286+ mov %esi, %cr0
18287+#endif
18288+2:
18289+#ifdef CONFIG_PARAVIRT
18290+ popl %ecx
18291+ popl %eax
18292+#endif
18293+ ret
18294+ENDPROC(pax_exit_kernel)
18295+#endif
18296+
18297+ .macro pax_erase_kstack
18298+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18299+ call pax_erase_kstack
18300+#endif
18301+ .endm
18302+
18303+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18304+/*
18305+ * ebp: thread_info
18306+ */
18307+ENTRY(pax_erase_kstack)
18308+ pushl %edi
18309+ pushl %ecx
18310+ pushl %eax
18311+
18312+ mov TI_lowest_stack(%ebp), %edi
18313+ mov $-0xBEEF, %eax
18314+ std
18315+
18316+1: mov %edi, %ecx
18317+ and $THREAD_SIZE_asm - 1, %ecx
18318+ shr $2, %ecx
18319+ repne scasl
18320+ jecxz 2f
18321+
18322+ cmp $2*16, %ecx
18323+ jc 2f
18324+
18325+ mov $2*16, %ecx
18326+ repe scasl
18327+ jecxz 2f
18328+ jne 1b
18329+
18330+2: cld
18331+ mov %esp, %ecx
18332+ sub %edi, %ecx
18333+
18334+ cmp $THREAD_SIZE_asm, %ecx
18335+ jb 3f
18336+ ud2
18337+3:
18338+
18339+ shr $2, %ecx
18340+ rep stosl
18341+
18342+ mov TI_task_thread_sp0(%ebp), %edi
18343+ sub $128, %edi
18344+ mov %edi, TI_lowest_stack(%ebp)
18345+
18346+ popl %eax
18347+ popl %ecx
18348+ popl %edi
18349+ ret
18350+ENDPROC(pax_erase_kstack)
18351+#endif
18352+
18353+.macro __SAVE_ALL _DS
18354 cld
18355 PUSH_GS
18356 pushl_cfi %fs
18357@@ -206,7 +346,7 @@
18358 CFI_REL_OFFSET ecx, 0
18359 pushl_cfi %ebx
18360 CFI_REL_OFFSET ebx, 0
18361- movl $(__USER_DS), %edx
18362+ movl $\_DS, %edx
18363 movl %edx, %ds
18364 movl %edx, %es
18365 movl $(__KERNEL_PERCPU), %edx
18366@@ -214,6 +354,15 @@
18367 SET_KERNEL_GS %edx
18368 .endm
18369
18370+.macro SAVE_ALL
18371+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18372+ __SAVE_ALL __KERNEL_DS
18373+ pax_enter_kernel
18374+#else
18375+ __SAVE_ALL __USER_DS
18376+#endif
18377+.endm
18378+
18379 .macro RESTORE_INT_REGS
18380 popl_cfi %ebx
18381 CFI_RESTORE ebx
18382@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18383 popfl_cfi
18384 jmp syscall_exit
18385 CFI_ENDPROC
18386-END(ret_from_fork)
18387+ENDPROC(ret_from_fork)
18388
18389 ENTRY(ret_from_kernel_thread)
18390 CFI_STARTPROC
18391@@ -344,7 +493,15 @@ ret_from_intr:
18392 andl $SEGMENT_RPL_MASK, %eax
18393 #endif
18394 cmpl $USER_RPL, %eax
18395+
18396+#ifdef CONFIG_PAX_KERNEXEC
18397+ jae resume_userspace
18398+
18399+ pax_exit_kernel
18400+ jmp resume_kernel
18401+#else
18402 jb resume_kernel # not returning to v8086 or userspace
18403+#endif
18404
18405 ENTRY(resume_userspace)
18406 LOCKDEP_SYS_EXIT
18407@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18408 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18409 # int/exception return?
18410 jne work_pending
18411- jmp restore_all
18412-END(ret_from_exception)
18413+ jmp restore_all_pax
18414+ENDPROC(ret_from_exception)
18415
18416 #ifdef CONFIG_PREEMPT
18417 ENTRY(resume_kernel)
18418@@ -372,7 +529,7 @@ need_resched:
18419 jz restore_all
18420 call preempt_schedule_irq
18421 jmp need_resched
18422-END(resume_kernel)
18423+ENDPROC(resume_kernel)
18424 #endif
18425 CFI_ENDPROC
18426 /*
18427@@ -406,30 +563,45 @@ sysenter_past_esp:
18428 /*CFI_REL_OFFSET cs, 0*/
18429 /*
18430 * Push current_thread_info()->sysenter_return to the stack.
18431- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18432- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18433 */
18434- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18435+ pushl_cfi $0
18436 CFI_REL_OFFSET eip, 0
18437
18438 pushl_cfi %eax
18439 SAVE_ALL
18440+ GET_THREAD_INFO(%ebp)
18441+ movl TI_sysenter_return(%ebp),%ebp
18442+ movl %ebp,PT_EIP(%esp)
18443 ENABLE_INTERRUPTS(CLBR_NONE)
18444
18445 /*
18446 * Load the potential sixth argument from user stack.
18447 * Careful about security.
18448 */
18449+ movl PT_OLDESP(%esp),%ebp
18450+
18451+#ifdef CONFIG_PAX_MEMORY_UDEREF
18452+ mov PT_OLDSS(%esp),%ds
18453+1: movl %ds:(%ebp),%ebp
18454+ push %ss
18455+ pop %ds
18456+#else
18457 cmpl $__PAGE_OFFSET-3,%ebp
18458 jae syscall_fault
18459 ASM_STAC
18460 1: movl (%ebp),%ebp
18461 ASM_CLAC
18462+#endif
18463+
18464 movl %ebp,PT_EBP(%esp)
18465 _ASM_EXTABLE(1b,syscall_fault)
18466
18467 GET_THREAD_INFO(%ebp)
18468
18469+#ifdef CONFIG_PAX_RANDKSTACK
18470+ pax_erase_kstack
18471+#endif
18472+
18473 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18474 jnz sysenter_audit
18475 sysenter_do_call:
18476@@ -444,12 +616,24 @@ sysenter_do_call:
18477 testl $_TIF_ALLWORK_MASK, %ecx
18478 jne sysexit_audit
18479 sysenter_exit:
18480+
18481+#ifdef CONFIG_PAX_RANDKSTACK
18482+ pushl_cfi %eax
18483+ movl %esp, %eax
18484+ call pax_randomize_kstack
18485+ popl_cfi %eax
18486+#endif
18487+
18488+ pax_erase_kstack
18489+
18490 /* if something modifies registers it must also disable sysexit */
18491 movl PT_EIP(%esp), %edx
18492 movl PT_OLDESP(%esp), %ecx
18493 xorl %ebp,%ebp
18494 TRACE_IRQS_ON
18495 1: mov PT_FS(%esp), %fs
18496+2: mov PT_DS(%esp), %ds
18497+3: mov PT_ES(%esp), %es
18498 PTGS_TO_GS
18499 ENABLE_INTERRUPTS_SYSEXIT
18500
18501@@ -466,6 +650,9 @@ sysenter_audit:
18502 movl %eax,%edx /* 2nd arg: syscall number */
18503 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18504 call __audit_syscall_entry
18505+
18506+ pax_erase_kstack
18507+
18508 pushl_cfi %ebx
18509 movl PT_EAX(%esp),%eax /* reload syscall number */
18510 jmp sysenter_do_call
18511@@ -491,10 +678,16 @@ sysexit_audit:
18512
18513 CFI_ENDPROC
18514 .pushsection .fixup,"ax"
18515-2: movl $0,PT_FS(%esp)
18516+4: movl $0,PT_FS(%esp)
18517+ jmp 1b
18518+5: movl $0,PT_DS(%esp)
18519+ jmp 1b
18520+6: movl $0,PT_ES(%esp)
18521 jmp 1b
18522 .popsection
18523- _ASM_EXTABLE(1b,2b)
18524+ _ASM_EXTABLE(1b,4b)
18525+ _ASM_EXTABLE(2b,5b)
18526+ _ASM_EXTABLE(3b,6b)
18527 PTGS_TO_GS_EX
18528 ENDPROC(ia32_sysenter_target)
18529
18530@@ -509,6 +702,11 @@ ENTRY(system_call)
18531 pushl_cfi %eax # save orig_eax
18532 SAVE_ALL
18533 GET_THREAD_INFO(%ebp)
18534+
18535+#ifdef CONFIG_PAX_RANDKSTACK
18536+ pax_erase_kstack
18537+#endif
18538+
18539 # system call tracing in operation / emulation
18540 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18541 jnz syscall_trace_entry
18542@@ -527,6 +725,15 @@ syscall_exit:
18543 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18544 jne syscall_exit_work
18545
18546+restore_all_pax:
18547+
18548+#ifdef CONFIG_PAX_RANDKSTACK
18549+ movl %esp, %eax
18550+ call pax_randomize_kstack
18551+#endif
18552+
18553+ pax_erase_kstack
18554+
18555 restore_all:
18556 TRACE_IRQS_IRET
18557 restore_all_notrace:
18558@@ -583,14 +790,34 @@ ldt_ss:
18559 * compensating for the offset by changing to the ESPFIX segment with
18560 * a base address that matches for the difference.
18561 */
18562-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18563+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18564 mov %esp, %edx /* load kernel esp */
18565 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18566 mov %dx, %ax /* eax: new kernel esp */
18567 sub %eax, %edx /* offset (low word is 0) */
18568+#ifdef CONFIG_SMP
18569+ movl PER_CPU_VAR(cpu_number), %ebx
18570+ shll $PAGE_SHIFT_asm, %ebx
18571+ addl $cpu_gdt_table, %ebx
18572+#else
18573+ movl $cpu_gdt_table, %ebx
18574+#endif
18575 shr $16, %edx
18576- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18577- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18578+
18579+#ifdef CONFIG_PAX_KERNEXEC
18580+ mov %cr0, %esi
18581+ btr $16, %esi
18582+ mov %esi, %cr0
18583+#endif
18584+
18585+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18586+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18587+
18588+#ifdef CONFIG_PAX_KERNEXEC
18589+ bts $16, %esi
18590+ mov %esi, %cr0
18591+#endif
18592+
18593 pushl_cfi $__ESPFIX_SS
18594 pushl_cfi %eax /* new kernel esp */
18595 /* Disable interrupts, but do not irqtrace this section: we
18596@@ -619,20 +846,18 @@ work_resched:
18597 movl TI_flags(%ebp), %ecx
18598 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18599 # than syscall tracing?
18600- jz restore_all
18601+ jz restore_all_pax
18602 testb $_TIF_NEED_RESCHED, %cl
18603 jnz work_resched
18604
18605 work_notifysig: # deal with pending signals and
18606 # notify-resume requests
18607+ movl %esp, %eax
18608 #ifdef CONFIG_VM86
18609 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18610- movl %esp, %eax
18611 jne work_notifysig_v86 # returning to kernel-space or
18612 # vm86-space
18613 1:
18614-#else
18615- movl %esp, %eax
18616 #endif
18617 TRACE_IRQS_ON
18618 ENABLE_INTERRUPTS(CLBR_NONE)
18619@@ -653,7 +878,7 @@ work_notifysig_v86:
18620 movl %eax, %esp
18621 jmp 1b
18622 #endif
18623-END(work_pending)
18624+ENDPROC(work_pending)
18625
18626 # perform syscall exit tracing
18627 ALIGN
18628@@ -661,11 +886,14 @@ syscall_trace_entry:
18629 movl $-ENOSYS,PT_EAX(%esp)
18630 movl %esp, %eax
18631 call syscall_trace_enter
18632+
18633+ pax_erase_kstack
18634+
18635 /* What it returned is what we'll actually use. */
18636 cmpl $(NR_syscalls), %eax
18637 jnae syscall_call
18638 jmp syscall_exit
18639-END(syscall_trace_entry)
18640+ENDPROC(syscall_trace_entry)
18641
18642 # perform syscall exit tracing
18643 ALIGN
18644@@ -678,21 +906,25 @@ syscall_exit_work:
18645 movl %esp, %eax
18646 call syscall_trace_leave
18647 jmp resume_userspace
18648-END(syscall_exit_work)
18649+ENDPROC(syscall_exit_work)
18650 CFI_ENDPROC
18651
18652 RING0_INT_FRAME # can't unwind into user space anyway
18653 syscall_fault:
18654+#ifdef CONFIG_PAX_MEMORY_UDEREF
18655+ push %ss
18656+ pop %ds
18657+#endif
18658 ASM_CLAC
18659 GET_THREAD_INFO(%ebp)
18660 movl $-EFAULT,PT_EAX(%esp)
18661 jmp resume_userspace
18662-END(syscall_fault)
18663+ENDPROC(syscall_fault)
18664
18665 syscall_badsys:
18666 movl $-ENOSYS,PT_EAX(%esp)
18667 jmp resume_userspace
18668-END(syscall_badsys)
18669+ENDPROC(syscall_badsys)
18670 CFI_ENDPROC
18671 /*
18672 * End of kprobes section
18673@@ -708,8 +940,15 @@ END(syscall_badsys)
18674 * normal stack and adjusts ESP with the matching offset.
18675 */
18676 /* fixup the stack */
18677- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18678- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18679+#ifdef CONFIG_SMP
18680+ movl PER_CPU_VAR(cpu_number), %ebx
18681+ shll $PAGE_SHIFT_asm, %ebx
18682+ addl $cpu_gdt_table, %ebx
18683+#else
18684+ movl $cpu_gdt_table, %ebx
18685+#endif
18686+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18687+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18688 shl $16, %eax
18689 addl %esp, %eax /* the adjusted stack pointer */
18690 pushl_cfi $__KERNEL_DS
18691@@ -762,7 +1001,7 @@ vector=vector+1
18692 .endr
18693 2: jmp common_interrupt
18694 .endr
18695-END(irq_entries_start)
18696+ENDPROC(irq_entries_start)
18697
18698 .previous
18699 END(interrupt)
18700@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
18701 pushl_cfi $do_coprocessor_error
18702 jmp error_code
18703 CFI_ENDPROC
18704-END(coprocessor_error)
18705+ENDPROC(coprocessor_error)
18706
18707 ENTRY(simd_coprocessor_error)
18708 RING0_INT_FRAME
18709@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
18710 #endif
18711 jmp error_code
18712 CFI_ENDPROC
18713-END(simd_coprocessor_error)
18714+ENDPROC(simd_coprocessor_error)
18715
18716 ENTRY(device_not_available)
18717 RING0_INT_FRAME
18718@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
18719 pushl_cfi $do_device_not_available
18720 jmp error_code
18721 CFI_ENDPROC
18722-END(device_not_available)
18723+ENDPROC(device_not_available)
18724
18725 #ifdef CONFIG_PARAVIRT
18726 ENTRY(native_iret)
18727 iret
18728 _ASM_EXTABLE(native_iret, iret_exc)
18729-END(native_iret)
18730+ENDPROC(native_iret)
18731
18732 ENTRY(native_irq_enable_sysexit)
18733 sti
18734 sysexit
18735-END(native_irq_enable_sysexit)
18736+ENDPROC(native_irq_enable_sysexit)
18737 #endif
18738
18739 ENTRY(overflow)
18740@@ -865,7 +1104,7 @@ ENTRY(overflow)
18741 pushl_cfi $do_overflow
18742 jmp error_code
18743 CFI_ENDPROC
18744-END(overflow)
18745+ENDPROC(overflow)
18746
18747 ENTRY(bounds)
18748 RING0_INT_FRAME
18749@@ -874,7 +1113,7 @@ ENTRY(bounds)
18750 pushl_cfi $do_bounds
18751 jmp error_code
18752 CFI_ENDPROC
18753-END(bounds)
18754+ENDPROC(bounds)
18755
18756 ENTRY(invalid_op)
18757 RING0_INT_FRAME
18758@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
18759 pushl_cfi $do_invalid_op
18760 jmp error_code
18761 CFI_ENDPROC
18762-END(invalid_op)
18763+ENDPROC(invalid_op)
18764
18765 ENTRY(coprocessor_segment_overrun)
18766 RING0_INT_FRAME
18767@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
18768 pushl_cfi $do_coprocessor_segment_overrun
18769 jmp error_code
18770 CFI_ENDPROC
18771-END(coprocessor_segment_overrun)
18772+ENDPROC(coprocessor_segment_overrun)
18773
18774 ENTRY(invalid_TSS)
18775 RING0_EC_FRAME
18776@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
18777 pushl_cfi $do_invalid_TSS
18778 jmp error_code
18779 CFI_ENDPROC
18780-END(invalid_TSS)
18781+ENDPROC(invalid_TSS)
18782
18783 ENTRY(segment_not_present)
18784 RING0_EC_FRAME
18785@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
18786 pushl_cfi $do_segment_not_present
18787 jmp error_code
18788 CFI_ENDPROC
18789-END(segment_not_present)
18790+ENDPROC(segment_not_present)
18791
18792 ENTRY(stack_segment)
18793 RING0_EC_FRAME
18794@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
18795 pushl_cfi $do_stack_segment
18796 jmp error_code
18797 CFI_ENDPROC
18798-END(stack_segment)
18799+ENDPROC(stack_segment)
18800
18801 ENTRY(alignment_check)
18802 RING0_EC_FRAME
18803@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
18804 pushl_cfi $do_alignment_check
18805 jmp error_code
18806 CFI_ENDPROC
18807-END(alignment_check)
18808+ENDPROC(alignment_check)
18809
18810 ENTRY(divide_error)
18811 RING0_INT_FRAME
18812@@ -933,7 +1172,7 @@ ENTRY(divide_error)
18813 pushl_cfi $do_divide_error
18814 jmp error_code
18815 CFI_ENDPROC
18816-END(divide_error)
18817+ENDPROC(divide_error)
18818
18819 #ifdef CONFIG_X86_MCE
18820 ENTRY(machine_check)
18821@@ -943,7 +1182,7 @@ ENTRY(machine_check)
18822 pushl_cfi machine_check_vector
18823 jmp error_code
18824 CFI_ENDPROC
18825-END(machine_check)
18826+ENDPROC(machine_check)
18827 #endif
18828
18829 ENTRY(spurious_interrupt_bug)
18830@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
18831 pushl_cfi $do_spurious_interrupt_bug
18832 jmp error_code
18833 CFI_ENDPROC
18834-END(spurious_interrupt_bug)
18835+ENDPROC(spurious_interrupt_bug)
18836 /*
18837 * End of kprobes section
18838 */
18839@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
18840
18841 ENTRY(mcount)
18842 ret
18843-END(mcount)
18844+ENDPROC(mcount)
18845
18846 ENTRY(ftrace_caller)
18847 cmpl $0, function_trace_stop
18848@@ -1096,7 +1335,7 @@ ftrace_graph_call:
18849 .globl ftrace_stub
18850 ftrace_stub:
18851 ret
18852-END(ftrace_caller)
18853+ENDPROC(ftrace_caller)
18854
18855 ENTRY(ftrace_regs_caller)
18856 pushf /* push flags before compare (in cs location) */
18857@@ -1197,7 +1436,7 @@ trace:
18858 popl %ecx
18859 popl %eax
18860 jmp ftrace_stub
18861-END(mcount)
18862+ENDPROC(mcount)
18863 #endif /* CONFIG_DYNAMIC_FTRACE */
18864 #endif /* CONFIG_FUNCTION_TRACER */
18865
18866@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
18867 popl %ecx
18868 popl %eax
18869 ret
18870-END(ftrace_graph_caller)
18871+ENDPROC(ftrace_graph_caller)
18872
18873 .globl return_to_handler
18874 return_to_handler:
18875@@ -1271,15 +1510,18 @@ error_code:
18876 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18877 REG_TO_PTGS %ecx
18878 SET_KERNEL_GS %ecx
18879- movl $(__USER_DS), %ecx
18880+ movl $(__KERNEL_DS), %ecx
18881 movl %ecx, %ds
18882 movl %ecx, %es
18883+
18884+ pax_enter_kernel
18885+
18886 TRACE_IRQS_OFF
18887 movl %esp,%eax # pt_regs pointer
18888 call *%edi
18889 jmp ret_from_exception
18890 CFI_ENDPROC
18891-END(page_fault)
18892+ENDPROC(page_fault)
18893
18894 /*
18895 * Debug traps and NMI can happen at the one SYSENTER instruction
18896@@ -1322,7 +1564,7 @@ debug_stack_correct:
18897 call do_debug
18898 jmp ret_from_exception
18899 CFI_ENDPROC
18900-END(debug)
18901+ENDPROC(debug)
18902
18903 /*
18904 * NMI is doubly nasty. It can happen _while_ we're handling
18905@@ -1360,6 +1602,9 @@ nmi_stack_correct:
18906 xorl %edx,%edx # zero error code
18907 movl %esp,%eax # pt_regs pointer
18908 call do_nmi
18909+
18910+ pax_exit_kernel
18911+
18912 jmp restore_all_notrace
18913 CFI_ENDPROC
18914
18915@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
18916 FIXUP_ESPFIX_STACK # %eax == %esp
18917 xorl %edx,%edx # zero error code
18918 call do_nmi
18919+
18920+ pax_exit_kernel
18921+
18922 RESTORE_REGS
18923 lss 12+4(%esp), %esp # back to espfix stack
18924 CFI_ADJUST_CFA_OFFSET -24
18925 jmp irq_return
18926 CFI_ENDPROC
18927-END(nmi)
18928+ENDPROC(nmi)
18929
18930 ENTRY(int3)
18931 RING0_INT_FRAME
18932@@ -1414,14 +1662,14 @@ ENTRY(int3)
18933 call do_int3
18934 jmp ret_from_exception
18935 CFI_ENDPROC
18936-END(int3)
18937+ENDPROC(int3)
18938
18939 ENTRY(general_protection)
18940 RING0_EC_FRAME
18941 pushl_cfi $do_general_protection
18942 jmp error_code
18943 CFI_ENDPROC
18944-END(general_protection)
18945+ENDPROC(general_protection)
18946
18947 #ifdef CONFIG_KVM_GUEST
18948 ENTRY(async_page_fault)
18949@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
18950 pushl_cfi $do_async_page_fault
18951 jmp error_code
18952 CFI_ENDPROC
18953-END(async_page_fault)
18954+ENDPROC(async_page_fault)
18955 #endif
18956
18957 /*
18958diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18959index 7272089..6204f9c5 100644
18960--- a/arch/x86/kernel/entry_64.S
18961+++ b/arch/x86/kernel/entry_64.S
18962@@ -59,6 +59,8 @@
18963 #include <asm/context_tracking.h>
18964 #include <asm/smap.h>
18965 #include <linux/err.h>
18966+#include <asm/pgtable.h>
18967+#include <asm/alternative-asm.h>
18968
18969 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18970 #include <linux/elf-em.h>
18971@@ -80,8 +82,9 @@
18972 #ifdef CONFIG_DYNAMIC_FTRACE
18973
18974 ENTRY(function_hook)
18975+ pax_force_retaddr
18976 retq
18977-END(function_hook)
18978+ENDPROC(function_hook)
18979
18980 /* skip is set if stack has been adjusted */
18981 .macro ftrace_caller_setup skip=0
18982@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
18983 #endif
18984
18985 GLOBAL(ftrace_stub)
18986+ pax_force_retaddr
18987 retq
18988-END(ftrace_caller)
18989+ENDPROC(ftrace_caller)
18990
18991 ENTRY(ftrace_regs_caller)
18992 /* Save the current flags before compare (in SS location)*/
18993@@ -191,7 +195,7 @@ ftrace_restore_flags:
18994 popfq
18995 jmp ftrace_stub
18996
18997-END(ftrace_regs_caller)
18998+ENDPROC(ftrace_regs_caller)
18999
19000
19001 #else /* ! CONFIG_DYNAMIC_FTRACE */
19002@@ -212,6 +216,7 @@ ENTRY(function_hook)
19003 #endif
19004
19005 GLOBAL(ftrace_stub)
19006+ pax_force_retaddr
19007 retq
19008
19009 trace:
19010@@ -225,12 +230,13 @@ trace:
19011 #endif
19012 subq $MCOUNT_INSN_SIZE, %rdi
19013
19014+ pax_force_fptr ftrace_trace_function
19015 call *ftrace_trace_function
19016
19017 MCOUNT_RESTORE_FRAME
19018
19019 jmp ftrace_stub
19020-END(function_hook)
19021+ENDPROC(function_hook)
19022 #endif /* CONFIG_DYNAMIC_FTRACE */
19023 #endif /* CONFIG_FUNCTION_TRACER */
19024
19025@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19026
19027 MCOUNT_RESTORE_FRAME
19028
19029+ pax_force_retaddr
19030 retq
19031-END(ftrace_graph_caller)
19032+ENDPROC(ftrace_graph_caller)
19033
19034 GLOBAL(return_to_handler)
19035 subq $24, %rsp
19036@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19037 movq 8(%rsp), %rdx
19038 movq (%rsp), %rax
19039 addq $24, %rsp
19040+ pax_force_fptr %rdi
19041 jmp *%rdi
19042+ENDPROC(return_to_handler)
19043 #endif
19044
19045
19046@@ -284,6 +293,309 @@ ENTRY(native_usergs_sysret64)
19047 ENDPROC(native_usergs_sysret64)
19048 #endif /* CONFIG_PARAVIRT */
19049
19050+ .macro ljmpq sel, off
19051+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19052+ .byte 0x48; ljmp *1234f(%rip)
19053+ .pushsection .rodata
19054+ .align 16
19055+ 1234: .quad \off; .word \sel
19056+ .popsection
19057+#else
19058+ pushq $\sel
19059+ pushq $\off
19060+ lretq
19061+#endif
19062+ .endm
19063+
19064+ .macro pax_enter_kernel
19065+ pax_set_fptr_mask
19066+#ifdef CONFIG_PAX_KERNEXEC
19067+ call pax_enter_kernel
19068+#endif
19069+ .endm
19070+
19071+ .macro pax_exit_kernel
19072+#ifdef CONFIG_PAX_KERNEXEC
19073+ call pax_exit_kernel
19074+#endif
19075+ .endm
19076+
19077+#ifdef CONFIG_PAX_KERNEXEC
19078+ENTRY(pax_enter_kernel)
19079+ pushq %rdi
19080+
19081+#ifdef CONFIG_PARAVIRT
19082+ PV_SAVE_REGS(CLBR_RDI)
19083+#endif
19084+
19085+ GET_CR0_INTO_RDI
19086+ bts $16,%rdi
19087+ jnc 3f
19088+ mov %cs,%edi
19089+ cmp $__KERNEL_CS,%edi
19090+ jnz 2f
19091+1:
19092+
19093+#ifdef CONFIG_PARAVIRT
19094+ PV_RESTORE_REGS(CLBR_RDI)
19095+#endif
19096+
19097+ popq %rdi
19098+ pax_force_retaddr
19099+ retq
19100+
19101+2: ljmpq __KERNEL_CS,1b
19102+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19103+4: SET_RDI_INTO_CR0
19104+ jmp 1b
19105+ENDPROC(pax_enter_kernel)
19106+
19107+ENTRY(pax_exit_kernel)
19108+ pushq %rdi
19109+
19110+#ifdef CONFIG_PARAVIRT
19111+ PV_SAVE_REGS(CLBR_RDI)
19112+#endif
19113+
19114+ mov %cs,%rdi
19115+ cmp $__KERNEXEC_KERNEL_CS,%edi
19116+ jz 2f
19117+ GET_CR0_INTO_RDI
19118+ bts $16,%rdi
19119+ jnc 4f
19120+1:
19121+
19122+#ifdef CONFIG_PARAVIRT
19123+ PV_RESTORE_REGS(CLBR_RDI);
19124+#endif
19125+
19126+ popq %rdi
19127+ pax_force_retaddr
19128+ retq
19129+
19130+2: GET_CR0_INTO_RDI
19131+ btr $16,%rdi
19132+ jnc 4f
19133+ ljmpq __KERNEL_CS,3f
19134+3: SET_RDI_INTO_CR0
19135+ jmp 1b
19136+4: ud2
19137+ jmp 4b
19138+ENDPROC(pax_exit_kernel)
19139+#endif
19140+
19141+ .macro pax_enter_kernel_user
19142+ pax_set_fptr_mask
19143+#ifdef CONFIG_PAX_MEMORY_UDEREF
19144+ call pax_enter_kernel_user
19145+#endif
19146+ .endm
19147+
19148+ .macro pax_exit_kernel_user
19149+#ifdef CONFIG_PAX_MEMORY_UDEREF
19150+ call pax_exit_kernel_user
19151+#endif
19152+#ifdef CONFIG_PAX_RANDKSTACK
19153+ pushq %rax
19154+ pushq %r11
19155+ call pax_randomize_kstack
19156+ popq %r11
19157+ popq %rax
19158+#endif
19159+ .endm
19160+
19161+#ifdef CONFIG_PAX_MEMORY_UDEREF
19162+ENTRY(pax_enter_kernel_user)
19163+ pushq %rdi
19164+ pushq %rbx
19165+
19166+#ifdef CONFIG_PARAVIRT
19167+ PV_SAVE_REGS(CLBR_RDI)
19168+#endif
19169+
19170+ GET_CR3_INTO_RDI
19171+ mov %rdi,%rbx
19172+ add $__START_KERNEL_map,%rbx
19173+ sub phys_base(%rip),%rbx
19174+
19175+#ifdef CONFIG_PARAVIRT
19176+ cmpl $0, pv_info+PARAVIRT_enabled
19177+ jz 1f
19178+ pushq %rdi
19179+ i = 0
19180+ .rept USER_PGD_PTRS
19181+ mov i*8(%rbx),%rsi
19182+ mov $0,%sil
19183+ lea i*8(%rbx),%rdi
19184+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19185+ i = i + 1
19186+ .endr
19187+ popq %rdi
19188+ jmp 2f
19189+1:
19190+#endif
19191+
19192+ i = 0
19193+ .rept USER_PGD_PTRS
19194+ movb $0,i*8(%rbx)
19195+ i = i + 1
19196+ .endr
19197+
19198+#ifdef CONFIG_PARAVIRT
19199+2:
19200+#endif
19201+ SET_RDI_INTO_CR3
19202+
19203+#ifdef CONFIG_PAX_KERNEXEC
19204+ GET_CR0_INTO_RDI
19205+ bts $16,%rdi
19206+ SET_RDI_INTO_CR0
19207+#endif
19208+
19209+#ifdef CONFIG_PARAVIRT
19210+ PV_RESTORE_REGS(CLBR_RDI)
19211+#endif
19212+
19213+ popq %rbx
19214+ popq %rdi
19215+ pax_force_retaddr
19216+ retq
19217+ENDPROC(pax_enter_kernel_user)
19218+
19219+ENTRY(pax_exit_kernel_user)
19220+ pushq %rdi
19221+ pushq %rbx
19222+
19223+#ifdef CONFIG_PARAVIRT
19224+ PV_SAVE_REGS(CLBR_RDI)
19225+#endif
19226+
19227+#ifdef CONFIG_PAX_KERNEXEC
19228+ GET_CR0_INTO_RDI
19229+ btr $16,%rdi
19230+ jnc 3f
19231+ SET_RDI_INTO_CR0
19232+#endif
19233+
19234+ GET_CR3_INTO_RDI
19235+ mov %rdi,%rbx
19236+ add $__START_KERNEL_map,%rbx
19237+ sub phys_base(%rip),%rbx
19238+
19239+#ifdef CONFIG_PARAVIRT
19240+ cmpl $0, pv_info+PARAVIRT_enabled
19241+ jz 1f
19242+ i = 0
19243+ .rept USER_PGD_PTRS
19244+ mov i*8(%rbx),%rsi
19245+ mov $0x67,%sil
19246+ lea i*8(%rbx),%rdi
19247+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19248+ i = i + 1
19249+ .endr
19250+ jmp 2f
19251+1:
19252+#endif
19253+
19254+ i = 0
19255+ .rept USER_PGD_PTRS
19256+ movb $0x67,i*8(%rbx)
19257+ i = i + 1
19258+ .endr
19259+
19260+#ifdef CONFIG_PARAVIRT
19261+2: PV_RESTORE_REGS(CLBR_RDI)
19262+#endif
19263+
19264+ popq %rbx
19265+ popq %rdi
19266+ pax_force_retaddr
19267+ retq
19268+3: ud2
19269+ jmp 3b
19270+ENDPROC(pax_exit_kernel_user)
19271+#endif
19272+
19273+ .macro pax_enter_kernel_nmi
19274+ pax_set_fptr_mask
19275+
19276+#ifdef CONFIG_PAX_KERNEXEC
19277+ GET_CR0_INTO_RDI
19278+ bts $16,%rdi
19279+ jc 110f
19280+ SET_RDI_INTO_CR0
19281+ or $2,%ebx
19282+110:
19283+#endif
19284+ .endm
19285+
19286+ .macro pax_exit_kernel_nmi
19287+#ifdef CONFIG_PAX_KERNEXEC
19288+ btr $1,%ebx
19289+ jnc 110f
19290+ GET_CR0_INTO_RDI
19291+ btr $16,%rdi
19292+ SET_RDI_INTO_CR0
19293+110:
19294+#endif
19295+ .endm
19296+
19297+ .macro pax_erase_kstack
19298+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19299+ call pax_erase_kstack
19300+#endif
19301+ .endm
19302+
19303+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19304+ENTRY(pax_erase_kstack)
19305+ pushq %rdi
19306+ pushq %rcx
19307+ pushq %rax
19308+ pushq %r11
19309+
19310+ GET_THREAD_INFO(%r11)
19311+ mov TI_lowest_stack(%r11), %rdi
19312+ mov $-0xBEEF, %rax
19313+ std
19314+
19315+1: mov %edi, %ecx
19316+ and $THREAD_SIZE_asm - 1, %ecx
19317+ shr $3, %ecx
19318+ repne scasq
19319+ jecxz 2f
19320+
19321+ cmp $2*8, %ecx
19322+ jc 2f
19323+
19324+ mov $2*8, %ecx
19325+ repe scasq
19326+ jecxz 2f
19327+ jne 1b
19328+
19329+2: cld
19330+ mov %esp, %ecx
19331+ sub %edi, %ecx
19332+
19333+ cmp $THREAD_SIZE_asm, %rcx
19334+ jb 3f
19335+ ud2
19336+3:
19337+
19338+ shr $3, %ecx
19339+ rep stosq
19340+
19341+ mov TI_task_thread_sp0(%r11), %rdi
19342+ sub $256, %rdi
19343+ mov %rdi, TI_lowest_stack(%r11)
19344+
19345+ popq %r11
19346+ popq %rax
19347+ popq %rcx
19348+ popq %rdi
19349+ pax_force_retaddr
19350+ ret
19351+ENDPROC(pax_erase_kstack)
19352+#endif
19353
19354 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19355 #ifdef CONFIG_TRACE_IRQFLAGS
19356@@ -375,8 +687,8 @@ ENDPROC(native_usergs_sysret64)
19357 .endm
19358
19359 .macro UNFAKE_STACK_FRAME
19360- addq $8*6, %rsp
19361- CFI_ADJUST_CFA_OFFSET -(6*8)
19362+ addq $8*6 + ARG_SKIP, %rsp
19363+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19364 .endm
19365
19366 /*
19367@@ -463,7 +775,7 @@ ENDPROC(native_usergs_sysret64)
19368 movq %rsp, %rsi
19369
19370 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19371- testl $3, CS-RBP(%rsi)
19372+ testb $3, CS-RBP(%rsi)
19373 je 1f
19374 SWAPGS
19375 /*
19376@@ -498,9 +810,10 @@ ENTRY(save_rest)
19377 movq_cfi r15, R15+16
19378 movq %r11, 8(%rsp) /* return address */
19379 FIXUP_TOP_OF_STACK %r11, 16
19380+ pax_force_retaddr
19381 ret
19382 CFI_ENDPROC
19383-END(save_rest)
19384+ENDPROC(save_rest)
19385
19386 /* save complete stack frame */
19387 .pushsection .kprobes.text, "ax"
19388@@ -529,9 +842,10 @@ ENTRY(save_paranoid)
19389 js 1f /* negative -> in kernel */
19390 SWAPGS
19391 xorl %ebx,%ebx
19392-1: ret
19393+1: pax_force_retaddr_bts
19394+ ret
19395 CFI_ENDPROC
19396-END(save_paranoid)
19397+ENDPROC(save_paranoid)
19398 .popsection
19399
19400 /*
19401@@ -553,7 +867,7 @@ ENTRY(ret_from_fork)
19402
19403 RESTORE_REST
19404
19405- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19406+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19407 jz 1f
19408
19409 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19410@@ -571,7 +885,7 @@ ENTRY(ret_from_fork)
19411 RESTORE_REST
19412 jmp int_ret_from_sys_call
19413 CFI_ENDPROC
19414-END(ret_from_fork)
19415+ENDPROC(ret_from_fork)
19416
19417 /*
19418 * System call entry. Up to 6 arguments in registers are supported.
19419@@ -608,7 +922,7 @@ END(ret_from_fork)
19420 ENTRY(system_call)
19421 CFI_STARTPROC simple
19422 CFI_SIGNAL_FRAME
19423- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19424+ CFI_DEF_CFA rsp,0
19425 CFI_REGISTER rip,rcx
19426 /*CFI_REGISTER rflags,r11*/
19427 SWAPGS_UNSAFE_STACK
19428@@ -621,16 +935,23 @@ GLOBAL(system_call_after_swapgs)
19429
19430 movq %rsp,PER_CPU_VAR(old_rsp)
19431 movq PER_CPU_VAR(kernel_stack),%rsp
19432+ SAVE_ARGS 8*6,0
19433+ pax_enter_kernel_user
19434+
19435+#ifdef CONFIG_PAX_RANDKSTACK
19436+ pax_erase_kstack
19437+#endif
19438+
19439 /*
19440 * No need to follow this irqs off/on section - it's straight
19441 * and short:
19442 */
19443 ENABLE_INTERRUPTS(CLBR_NONE)
19444- SAVE_ARGS 8,0
19445 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19446 movq %rcx,RIP-ARGOFFSET(%rsp)
19447 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19448- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19449+ GET_THREAD_INFO(%rcx)
19450+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19451 jnz tracesys
19452 system_call_fastpath:
19453 #if __SYSCALL_MASK == ~0
19454@@ -640,7 +961,7 @@ system_call_fastpath:
19455 cmpl $__NR_syscall_max,%eax
19456 #endif
19457 ja badsys
19458- movq %r10,%rcx
19459+ movq R10-ARGOFFSET(%rsp),%rcx
19460 call *sys_call_table(,%rax,8) # XXX: rip relative
19461 movq %rax,RAX-ARGOFFSET(%rsp)
19462 /*
19463@@ -654,10 +975,13 @@ sysret_check:
19464 LOCKDEP_SYS_EXIT
19465 DISABLE_INTERRUPTS(CLBR_NONE)
19466 TRACE_IRQS_OFF
19467- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19468+ GET_THREAD_INFO(%rcx)
19469+ movl TI_flags(%rcx),%edx
19470 andl %edi,%edx
19471 jnz sysret_careful
19472 CFI_REMEMBER_STATE
19473+ pax_exit_kernel_user
19474+ pax_erase_kstack
19475 /*
19476 * sysretq will re-enable interrupts:
19477 */
19478@@ -709,14 +1033,18 @@ badsys:
19479 * jump back to the normal fast path.
19480 */
19481 auditsys:
19482- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19483+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19484 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19485 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19486 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19487 movq %rax,%rsi /* 2nd arg: syscall number */
19488 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19489 call __audit_syscall_entry
19490+
19491+ pax_erase_kstack
19492+
19493 LOAD_ARGS 0 /* reload call-clobbered registers */
19494+ pax_set_fptr_mask
19495 jmp system_call_fastpath
19496
19497 /*
19498@@ -737,7 +1065,7 @@ sysret_audit:
19499 /* Do syscall tracing */
19500 tracesys:
19501 #ifdef CONFIG_AUDITSYSCALL
19502- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19503+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19504 jz auditsys
19505 #endif
19506 SAVE_REST
19507@@ -745,12 +1073,16 @@ tracesys:
19508 FIXUP_TOP_OF_STACK %rdi
19509 movq %rsp,%rdi
19510 call syscall_trace_enter
19511+
19512+ pax_erase_kstack
19513+
19514 /*
19515 * Reload arg registers from stack in case ptrace changed them.
19516 * We don't reload %rax because syscall_trace_enter() returned
19517 * the value it wants us to use in the table lookup.
19518 */
19519 LOAD_ARGS ARGOFFSET, 1
19520+ pax_set_fptr_mask
19521 RESTORE_REST
19522 #if __SYSCALL_MASK == ~0
19523 cmpq $__NR_syscall_max,%rax
19524@@ -759,7 +1091,7 @@ tracesys:
19525 cmpl $__NR_syscall_max,%eax
19526 #endif
19527 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19528- movq %r10,%rcx /* fixup for C */
19529+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19530 call *sys_call_table(,%rax,8)
19531 movq %rax,RAX-ARGOFFSET(%rsp)
19532 /* Use IRET because user could have changed frame */
19533@@ -780,7 +1112,9 @@ GLOBAL(int_with_check)
19534 andl %edi,%edx
19535 jnz int_careful
19536 andl $~TS_COMPAT,TI_status(%rcx)
19537- jmp retint_swapgs
19538+ pax_exit_kernel_user
19539+ pax_erase_kstack
19540+ jmp retint_swapgs_pax
19541
19542 /* Either reschedule or signal or syscall exit tracking needed. */
19543 /* First do a reschedule test. */
19544@@ -826,7 +1160,7 @@ int_restore_rest:
19545 TRACE_IRQS_OFF
19546 jmp int_with_check
19547 CFI_ENDPROC
19548-END(system_call)
19549+ENDPROC(system_call)
19550
19551 .macro FORK_LIKE func
19552 ENTRY(stub_\func)
19553@@ -839,9 +1173,10 @@ ENTRY(stub_\func)
19554 DEFAULT_FRAME 0 8 /* offset 8: return address */
19555 call sys_\func
19556 RESTORE_TOP_OF_STACK %r11, 8
19557+ pax_force_retaddr
19558 ret $REST_SKIP /* pop extended registers */
19559 CFI_ENDPROC
19560-END(stub_\func)
19561+ENDPROC(stub_\func)
19562 .endm
19563
19564 .macro FIXED_FRAME label,func
19565@@ -851,9 +1186,10 @@ ENTRY(\label)
19566 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
19567 call \func
19568 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
19569+ pax_force_retaddr
19570 ret
19571 CFI_ENDPROC
19572-END(\label)
19573+ENDPROC(\label)
19574 .endm
19575
19576 FORK_LIKE clone
19577@@ -870,9 +1206,10 @@ ENTRY(ptregscall_common)
19578 movq_cfi_restore R12+8, r12
19579 movq_cfi_restore RBP+8, rbp
19580 movq_cfi_restore RBX+8, rbx
19581+ pax_force_retaddr
19582 ret $REST_SKIP /* pop extended registers */
19583 CFI_ENDPROC
19584-END(ptregscall_common)
19585+ENDPROC(ptregscall_common)
19586
19587 ENTRY(stub_execve)
19588 CFI_STARTPROC
19589@@ -885,7 +1222,7 @@ ENTRY(stub_execve)
19590 RESTORE_REST
19591 jmp int_ret_from_sys_call
19592 CFI_ENDPROC
19593-END(stub_execve)
19594+ENDPROC(stub_execve)
19595
19596 /*
19597 * sigreturn is special because it needs to restore all registers on return.
19598@@ -902,7 +1239,7 @@ ENTRY(stub_rt_sigreturn)
19599 RESTORE_REST
19600 jmp int_ret_from_sys_call
19601 CFI_ENDPROC
19602-END(stub_rt_sigreturn)
19603+ENDPROC(stub_rt_sigreturn)
19604
19605 #ifdef CONFIG_X86_X32_ABI
19606 ENTRY(stub_x32_rt_sigreturn)
19607@@ -916,7 +1253,7 @@ ENTRY(stub_x32_rt_sigreturn)
19608 RESTORE_REST
19609 jmp int_ret_from_sys_call
19610 CFI_ENDPROC
19611-END(stub_x32_rt_sigreturn)
19612+ENDPROC(stub_x32_rt_sigreturn)
19613
19614 ENTRY(stub_x32_execve)
19615 CFI_STARTPROC
19616@@ -930,7 +1267,7 @@ ENTRY(stub_x32_execve)
19617 RESTORE_REST
19618 jmp int_ret_from_sys_call
19619 CFI_ENDPROC
19620-END(stub_x32_execve)
19621+ENDPROC(stub_x32_execve)
19622
19623 #endif
19624
19625@@ -967,7 +1304,7 @@ vector=vector+1
19626 2: jmp common_interrupt
19627 .endr
19628 CFI_ENDPROC
19629-END(irq_entries_start)
19630+ENDPROC(irq_entries_start)
19631
19632 .previous
19633 END(interrupt)
19634@@ -987,6 +1324,16 @@ END(interrupt)
19635 subq $ORIG_RAX-RBP, %rsp
19636 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19637 SAVE_ARGS_IRQ
19638+#ifdef CONFIG_PAX_MEMORY_UDEREF
19639+ testb $3, CS(%rdi)
19640+ jnz 1f
19641+ pax_enter_kernel
19642+ jmp 2f
19643+1: pax_enter_kernel_user
19644+2:
19645+#else
19646+ pax_enter_kernel
19647+#endif
19648 call \func
19649 .endm
19650
19651@@ -1019,7 +1366,7 @@ ret_from_intr:
19652
19653 exit_intr:
19654 GET_THREAD_INFO(%rcx)
19655- testl $3,CS-ARGOFFSET(%rsp)
19656+ testb $3,CS-ARGOFFSET(%rsp)
19657 je retint_kernel
19658
19659 /* Interrupt came from user space */
19660@@ -1041,12 +1388,16 @@ retint_swapgs: /* return to user-space */
19661 * The iretq could re-enable interrupts:
19662 */
19663 DISABLE_INTERRUPTS(CLBR_ANY)
19664+ pax_exit_kernel_user
19665+retint_swapgs_pax:
19666 TRACE_IRQS_IRETQ
19667 SWAPGS
19668 jmp restore_args
19669
19670 retint_restore_args: /* return to kernel space */
19671 DISABLE_INTERRUPTS(CLBR_ANY)
19672+ pax_exit_kernel
19673+ pax_force_retaddr (RIP-ARGOFFSET)
19674 /*
19675 * The iretq could re-enable interrupts:
19676 */
19677@@ -1129,7 +1480,7 @@ ENTRY(retint_kernel)
19678 #endif
19679
19680 CFI_ENDPROC
19681-END(common_interrupt)
19682+ENDPROC(common_interrupt)
19683 /*
19684 * End of kprobes section
19685 */
19686@@ -1147,7 +1498,7 @@ ENTRY(\sym)
19687 interrupt \do_sym
19688 jmp ret_from_intr
19689 CFI_ENDPROC
19690-END(\sym)
19691+ENDPROC(\sym)
19692 .endm
19693
19694 #ifdef CONFIG_SMP
19695@@ -1208,12 +1559,22 @@ ENTRY(\sym)
19696 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19697 call error_entry
19698 DEFAULT_FRAME 0
19699+#ifdef CONFIG_PAX_MEMORY_UDEREF
19700+ testb $3, CS(%rsp)
19701+ jnz 1f
19702+ pax_enter_kernel
19703+ jmp 2f
19704+1: pax_enter_kernel_user
19705+2:
19706+#else
19707+ pax_enter_kernel
19708+#endif
19709 movq %rsp,%rdi /* pt_regs pointer */
19710 xorl %esi,%esi /* no error code */
19711 call \do_sym
19712 jmp error_exit /* %ebx: no swapgs flag */
19713 CFI_ENDPROC
19714-END(\sym)
19715+ENDPROC(\sym)
19716 .endm
19717
19718 .macro paranoidzeroentry sym do_sym
19719@@ -1226,15 +1587,25 @@ ENTRY(\sym)
19720 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19721 call save_paranoid
19722 TRACE_IRQS_OFF
19723+#ifdef CONFIG_PAX_MEMORY_UDEREF
19724+ testb $3, CS(%rsp)
19725+ jnz 1f
19726+ pax_enter_kernel
19727+ jmp 2f
19728+1: pax_enter_kernel_user
19729+2:
19730+#else
19731+ pax_enter_kernel
19732+#endif
19733 movq %rsp,%rdi /* pt_regs pointer */
19734 xorl %esi,%esi /* no error code */
19735 call \do_sym
19736 jmp paranoid_exit /* %ebx: no swapgs flag */
19737 CFI_ENDPROC
19738-END(\sym)
19739+ENDPROC(\sym)
19740 .endm
19741
19742-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19743+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19744 .macro paranoidzeroentry_ist sym do_sym ist
19745 ENTRY(\sym)
19746 INTR_FRAME
19747@@ -1245,14 +1616,30 @@ ENTRY(\sym)
19748 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19749 call save_paranoid
19750 TRACE_IRQS_OFF_DEBUG
19751+#ifdef CONFIG_PAX_MEMORY_UDEREF
19752+ testb $3, CS(%rsp)
19753+ jnz 1f
19754+ pax_enter_kernel
19755+ jmp 2f
19756+1: pax_enter_kernel_user
19757+2:
19758+#else
19759+ pax_enter_kernel
19760+#endif
19761 movq %rsp,%rdi /* pt_regs pointer */
19762 xorl %esi,%esi /* no error code */
19763+#ifdef CONFIG_SMP
19764+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19765+ lea init_tss(%r12), %r12
19766+#else
19767+ lea init_tss(%rip), %r12
19768+#endif
19769 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19770 call \do_sym
19771 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19772 jmp paranoid_exit /* %ebx: no swapgs flag */
19773 CFI_ENDPROC
19774-END(\sym)
19775+ENDPROC(\sym)
19776 .endm
19777
19778 .macro errorentry sym do_sym
19779@@ -1264,13 +1651,23 @@ ENTRY(\sym)
19780 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19781 call error_entry
19782 DEFAULT_FRAME 0
19783+#ifdef CONFIG_PAX_MEMORY_UDEREF
19784+ testb $3, CS(%rsp)
19785+ jnz 1f
19786+ pax_enter_kernel
19787+ jmp 2f
19788+1: pax_enter_kernel_user
19789+2:
19790+#else
19791+ pax_enter_kernel
19792+#endif
19793 movq %rsp,%rdi /* pt_regs pointer */
19794 movq ORIG_RAX(%rsp),%rsi /* get error code */
19795 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19796 call \do_sym
19797 jmp error_exit /* %ebx: no swapgs flag */
19798 CFI_ENDPROC
19799-END(\sym)
19800+ENDPROC(\sym)
19801 .endm
19802
19803 /* error code is on the stack already */
19804@@ -1284,13 +1681,23 @@ ENTRY(\sym)
19805 call save_paranoid
19806 DEFAULT_FRAME 0
19807 TRACE_IRQS_OFF
19808+#ifdef CONFIG_PAX_MEMORY_UDEREF
19809+ testb $3, CS(%rsp)
19810+ jnz 1f
19811+ pax_enter_kernel
19812+ jmp 2f
19813+1: pax_enter_kernel_user
19814+2:
19815+#else
19816+ pax_enter_kernel
19817+#endif
19818 movq %rsp,%rdi /* pt_regs pointer */
19819 movq ORIG_RAX(%rsp),%rsi /* get error code */
19820 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19821 call \do_sym
19822 jmp paranoid_exit /* %ebx: no swapgs flag */
19823 CFI_ENDPROC
19824-END(\sym)
19825+ENDPROC(\sym)
19826 .endm
19827
19828 zeroentry divide_error do_divide_error
19829@@ -1320,9 +1727,10 @@ gs_change:
19830 2: mfence /* workaround */
19831 SWAPGS
19832 popfq_cfi
19833+ pax_force_retaddr
19834 ret
19835 CFI_ENDPROC
19836-END(native_load_gs_index)
19837+ENDPROC(native_load_gs_index)
19838
19839 _ASM_EXTABLE(gs_change,bad_gs)
19840 .section .fixup,"ax"
19841@@ -1350,9 +1758,10 @@ ENTRY(call_softirq)
19842 CFI_DEF_CFA_REGISTER rsp
19843 CFI_ADJUST_CFA_OFFSET -8
19844 decl PER_CPU_VAR(irq_count)
19845+ pax_force_retaddr
19846 ret
19847 CFI_ENDPROC
19848-END(call_softirq)
19849+ENDPROC(call_softirq)
19850
19851 #ifdef CONFIG_XEN
19852 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19853@@ -1390,7 +1799,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19854 decl PER_CPU_VAR(irq_count)
19855 jmp error_exit
19856 CFI_ENDPROC
19857-END(xen_do_hypervisor_callback)
19858+ENDPROC(xen_do_hypervisor_callback)
19859
19860 /*
19861 * Hypervisor uses this for application faults while it executes.
19862@@ -1449,7 +1858,7 @@ ENTRY(xen_failsafe_callback)
19863 SAVE_ALL
19864 jmp error_exit
19865 CFI_ENDPROC
19866-END(xen_failsafe_callback)
19867+ENDPROC(xen_failsafe_callback)
19868
19869 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
19870 xen_hvm_callback_vector xen_evtchn_do_upcall
19871@@ -1501,18 +1910,33 @@ ENTRY(paranoid_exit)
19872 DEFAULT_FRAME
19873 DISABLE_INTERRUPTS(CLBR_NONE)
19874 TRACE_IRQS_OFF_DEBUG
19875- testl %ebx,%ebx /* swapgs needed? */
19876+ testl $1,%ebx /* swapgs needed? */
19877 jnz paranoid_restore
19878- testl $3,CS(%rsp)
19879+ testb $3,CS(%rsp)
19880 jnz paranoid_userspace
19881+#ifdef CONFIG_PAX_MEMORY_UDEREF
19882+ pax_exit_kernel
19883+ TRACE_IRQS_IRETQ 0
19884+ SWAPGS_UNSAFE_STACK
19885+ RESTORE_ALL 8
19886+ pax_force_retaddr_bts
19887+ jmp irq_return
19888+#endif
19889 paranoid_swapgs:
19890+#ifdef CONFIG_PAX_MEMORY_UDEREF
19891+ pax_exit_kernel_user
19892+#else
19893+ pax_exit_kernel
19894+#endif
19895 TRACE_IRQS_IRETQ 0
19896 SWAPGS_UNSAFE_STACK
19897 RESTORE_ALL 8
19898 jmp irq_return
19899 paranoid_restore:
19900+ pax_exit_kernel
19901 TRACE_IRQS_IRETQ_DEBUG 0
19902 RESTORE_ALL 8
19903+ pax_force_retaddr_bts
19904 jmp irq_return
19905 paranoid_userspace:
19906 GET_THREAD_INFO(%rcx)
19907@@ -1541,7 +1965,7 @@ paranoid_schedule:
19908 TRACE_IRQS_OFF
19909 jmp paranoid_userspace
19910 CFI_ENDPROC
19911-END(paranoid_exit)
19912+ENDPROC(paranoid_exit)
19913
19914 /*
19915 * Exception entry point. This expects an error code/orig_rax on the stack.
19916@@ -1568,12 +1992,13 @@ ENTRY(error_entry)
19917 movq_cfi r14, R14+8
19918 movq_cfi r15, R15+8
19919 xorl %ebx,%ebx
19920- testl $3,CS+8(%rsp)
19921+ testb $3,CS+8(%rsp)
19922 je error_kernelspace
19923 error_swapgs:
19924 SWAPGS
19925 error_sti:
19926 TRACE_IRQS_OFF
19927+ pax_force_retaddr_bts
19928 ret
19929
19930 /*
19931@@ -1600,7 +2025,7 @@ bstep_iret:
19932 movq %rcx,RIP+8(%rsp)
19933 jmp error_swapgs
19934 CFI_ENDPROC
19935-END(error_entry)
19936+ENDPROC(error_entry)
19937
19938
19939 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19940@@ -1611,7 +2036,7 @@ ENTRY(error_exit)
19941 DISABLE_INTERRUPTS(CLBR_NONE)
19942 TRACE_IRQS_OFF
19943 GET_THREAD_INFO(%rcx)
19944- testl %eax,%eax
19945+ testl $1,%eax
19946 jne retint_kernel
19947 LOCKDEP_SYS_EXIT_IRQ
19948 movl TI_flags(%rcx),%edx
19949@@ -1620,7 +2045,7 @@ ENTRY(error_exit)
19950 jnz retint_careful
19951 jmp retint_swapgs
19952 CFI_ENDPROC
19953-END(error_exit)
19954+ENDPROC(error_exit)
19955
19956 /*
19957 * Test if a given stack is an NMI stack or not.
19958@@ -1678,9 +2103,11 @@ ENTRY(nmi)
19959 * If %cs was not the kernel segment, then the NMI triggered in user
19960 * space, which means it is definitely not nested.
19961 */
19962+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19963+ je 1f
19964 cmpl $__KERNEL_CS, 16(%rsp)
19965 jne first_nmi
19966-
19967+1:
19968 /*
19969 * Check the special variable on the stack to see if NMIs are
19970 * executing.
19971@@ -1714,8 +2141,7 @@ nested_nmi:
19972
19973 1:
19974 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
19975- leaq -1*8(%rsp), %rdx
19976- movq %rdx, %rsp
19977+ subq $8, %rsp
19978 CFI_ADJUST_CFA_OFFSET 1*8
19979 leaq -10*8(%rsp), %rdx
19980 pushq_cfi $__KERNEL_DS
19981@@ -1733,6 +2159,7 @@ nested_nmi_out:
19982 CFI_RESTORE rdx
19983
19984 /* No need to check faults here */
19985+# pax_force_retaddr_bts
19986 INTERRUPT_RETURN
19987
19988 CFI_RESTORE_STATE
19989@@ -1849,6 +2276,8 @@ end_repeat_nmi:
19990 */
19991 movq %cr2, %r12
19992
19993+ pax_enter_kernel_nmi
19994+
19995 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
19996 movq %rsp,%rdi
19997 movq $-1,%rsi
19998@@ -1861,26 +2290,31 @@ end_repeat_nmi:
19999 movq %r12, %cr2
20000 1:
20001
20002- testl %ebx,%ebx /* swapgs needed? */
20003+ testl $1,%ebx /* swapgs needed? */
20004 jnz nmi_restore
20005 nmi_swapgs:
20006 SWAPGS_UNSAFE_STACK
20007 nmi_restore:
20008+ pax_exit_kernel_nmi
20009 /* Pop the extra iret frame at once */
20010 RESTORE_ALL 6*8
20011+ testb $3, 8(%rsp)
20012+ jnz 1f
20013+ pax_force_retaddr_bts
20014+1:
20015
20016 /* Clear the NMI executing stack variable */
20017 movq $0, 5*8(%rsp)
20018 jmp irq_return
20019 CFI_ENDPROC
20020-END(nmi)
20021+ENDPROC(nmi)
20022
20023 ENTRY(ignore_sysret)
20024 CFI_STARTPROC
20025 mov $-ENOSYS,%eax
20026 sysret
20027 CFI_ENDPROC
20028-END(ignore_sysret)
20029+ENDPROC(ignore_sysret)
20030
20031 /*
20032 * End of kprobes section
20033diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20034index 42a392a..fbbd930 100644
20035--- a/arch/x86/kernel/ftrace.c
20036+++ b/arch/x86/kernel/ftrace.c
20037@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20038 {
20039 unsigned char replaced[MCOUNT_INSN_SIZE];
20040
20041+ ip = ktla_ktva(ip);
20042+
20043 /*
20044 * Note: Due to modules and __init, code can
20045 * disappear and change, we need to protect against faulting
20046@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20047 unsigned char old[MCOUNT_INSN_SIZE], *new;
20048 int ret;
20049
20050- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20051+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20052 new = ftrace_call_replace(ip, (unsigned long)func);
20053
20054 /* See comment above by declaration of modifying_ftrace_code */
20055@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20056 /* Also update the regs callback function */
20057 if (!ret) {
20058 ip = (unsigned long)(&ftrace_regs_call);
20059- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20060+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20061 new = ftrace_call_replace(ip, (unsigned long)func);
20062 ret = ftrace_modify_code(ip, old, new);
20063 }
20064@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20065 * kernel identity mapping to modify code.
20066 */
20067 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20068- ip = (unsigned long)__va(__pa_symbol(ip));
20069+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
20070
20071 return probe_kernel_write((void *)ip, val, size);
20072 }
20073@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20074 unsigned char replaced[MCOUNT_INSN_SIZE];
20075 unsigned char brk = BREAKPOINT_INSTRUCTION;
20076
20077- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20078+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20079 return -EFAULT;
20080
20081 /* Make sure it is what we expect it to be */
20082@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20083 return ret;
20084
20085 fail_update:
20086- probe_kernel_write((void *)ip, &old_code[0], 1);
20087+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20088 goto out;
20089 }
20090
20091@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20092 {
20093 unsigned char code[MCOUNT_INSN_SIZE];
20094
20095+ ip = ktla_ktva(ip);
20096+
20097 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20098 return -EFAULT;
20099
20100diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
20101index 55b6761..a6456fc 100644
20102--- a/arch/x86/kernel/head64.c
20103+++ b/arch/x86/kernel/head64.c
20104@@ -67,12 +67,12 @@ again:
20105 pgd = *pgd_p;
20106
20107 /*
20108- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
20109- * critical -- __PAGE_OFFSET would point us back into the dynamic
20110+ * The use of __early_va rather than __va here is critical:
20111+ * __va would point us back into the dynamic
20112 * range and we might end up looping forever...
20113 */
20114 if (pgd)
20115- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
20116+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
20117 else {
20118 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
20119 reset_early_page_tables();
20120@@ -82,13 +82,13 @@ again:
20121 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
20122 for (i = 0; i < PTRS_PER_PUD; i++)
20123 pud_p[i] = 0;
20124- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
20125+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
20126 }
20127 pud_p += pud_index(address);
20128 pud = *pud_p;
20129
20130 if (pud)
20131- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
20132+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
20133 else {
20134 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
20135 reset_early_page_tables();
20136@@ -98,7 +98,7 @@ again:
20137 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
20138 for (i = 0; i < PTRS_PER_PMD; i++)
20139 pmd_p[i] = 0;
20140- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
20141+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
20142 }
20143 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
20144 pmd_p[pmd_index(address)] = pmd;
20145@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
20146 if (console_loglevel == 10)
20147 early_printk("Kernel alive\n");
20148
20149- clear_page(init_level4_pgt);
20150 /* set init_level4_pgt kernel high mapping*/
20151 init_level4_pgt[511] = early_level4_pgt[511];
20152
20153diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20154index 73afd11..d1670f5 100644
20155--- a/arch/x86/kernel/head_32.S
20156+++ b/arch/x86/kernel/head_32.S
20157@@ -26,6 +26,12 @@
20158 /* Physical address */
20159 #define pa(X) ((X) - __PAGE_OFFSET)
20160
20161+#ifdef CONFIG_PAX_KERNEXEC
20162+#define ta(X) (X)
20163+#else
20164+#define ta(X) ((X) - __PAGE_OFFSET)
20165+#endif
20166+
20167 /*
20168 * References to members of the new_cpu_data structure.
20169 */
20170@@ -55,11 +61,7 @@
20171 * and small than max_low_pfn, otherwise will waste some page table entries
20172 */
20173
20174-#if PTRS_PER_PMD > 1
20175-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20176-#else
20177-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20178-#endif
20179+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20180
20181 /* Number of possible pages in the lowmem region */
20182 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20183@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20184 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20185
20186 /*
20187+ * Real beginning of normal "text" segment
20188+ */
20189+ENTRY(stext)
20190+ENTRY(_stext)
20191+
20192+/*
20193 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20194 * %esi points to the real-mode code as a 32-bit pointer.
20195 * CS and DS must be 4 GB flat segments, but we don't depend on
20196@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20197 * can.
20198 */
20199 __HEAD
20200+
20201+#ifdef CONFIG_PAX_KERNEXEC
20202+ jmp startup_32
20203+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20204+.fill PAGE_SIZE-5,1,0xcc
20205+#endif
20206+
20207 ENTRY(startup_32)
20208 movl pa(stack_start),%ecx
20209
20210@@ -106,6 +121,59 @@ ENTRY(startup_32)
20211 2:
20212 leal -__PAGE_OFFSET(%ecx),%esp
20213
20214+#ifdef CONFIG_SMP
20215+ movl $pa(cpu_gdt_table),%edi
20216+ movl $__per_cpu_load,%eax
20217+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20218+ rorl $16,%eax
20219+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20220+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20221+ movl $__per_cpu_end - 1,%eax
20222+ subl $__per_cpu_start,%eax
20223+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20224+#endif
20225+
20226+#ifdef CONFIG_PAX_MEMORY_UDEREF
20227+ movl $NR_CPUS,%ecx
20228+ movl $pa(cpu_gdt_table),%edi
20229+1:
20230+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20231+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20232+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20233+ addl $PAGE_SIZE_asm,%edi
20234+ loop 1b
20235+#endif
20236+
20237+#ifdef CONFIG_PAX_KERNEXEC
20238+ movl $pa(boot_gdt),%edi
20239+ movl $__LOAD_PHYSICAL_ADDR,%eax
20240+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20241+ rorl $16,%eax
20242+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20243+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20244+ rorl $16,%eax
20245+
20246+ ljmp $(__BOOT_CS),$1f
20247+1:
20248+
20249+ movl $NR_CPUS,%ecx
20250+ movl $pa(cpu_gdt_table),%edi
20251+ addl $__PAGE_OFFSET,%eax
20252+1:
20253+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20254+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20255+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20256+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20257+ rorl $16,%eax
20258+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20259+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20260+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20261+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20262+ rorl $16,%eax
20263+ addl $PAGE_SIZE_asm,%edi
20264+ loop 1b
20265+#endif
20266+
20267 /*
20268 * Clear BSS first so that there are no surprises...
20269 */
20270@@ -201,8 +269,11 @@ ENTRY(startup_32)
20271 movl %eax, pa(max_pfn_mapped)
20272
20273 /* Do early initialization of the fixmap area */
20274- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20275- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20276+#ifdef CONFIG_COMPAT_VDSO
20277+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20278+#else
20279+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20280+#endif
20281 #else /* Not PAE */
20282
20283 page_pde_offset = (__PAGE_OFFSET >> 20);
20284@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20285 movl %eax, pa(max_pfn_mapped)
20286
20287 /* Do early initialization of the fixmap area */
20288- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20289- movl %eax,pa(initial_page_table+0xffc)
20290+#ifdef CONFIG_COMPAT_VDSO
20291+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20292+#else
20293+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20294+#endif
20295 #endif
20296
20297 #ifdef CONFIG_PARAVIRT
20298@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20299 cmpl $num_subarch_entries, %eax
20300 jae bad_subarch
20301
20302- movl pa(subarch_entries)(,%eax,4), %eax
20303- subl $__PAGE_OFFSET, %eax
20304- jmp *%eax
20305+ jmp *pa(subarch_entries)(,%eax,4)
20306
20307 bad_subarch:
20308 WEAK(lguest_entry)
20309@@ -261,10 +333,10 @@ WEAK(xen_entry)
20310 __INITDATA
20311
20312 subarch_entries:
20313- .long default_entry /* normal x86/PC */
20314- .long lguest_entry /* lguest hypervisor */
20315- .long xen_entry /* Xen hypervisor */
20316- .long default_entry /* Moorestown MID */
20317+ .long ta(default_entry) /* normal x86/PC */
20318+ .long ta(lguest_entry) /* lguest hypervisor */
20319+ .long ta(xen_entry) /* Xen hypervisor */
20320+ .long ta(default_entry) /* Moorestown MID */
20321 num_subarch_entries = (. - subarch_entries) / 4
20322 .previous
20323 #else
20324@@ -355,6 +427,7 @@ default_entry:
20325 movl pa(mmu_cr4_features),%eax
20326 movl %eax,%cr4
20327
20328+#ifdef CONFIG_X86_PAE
20329 testb $X86_CR4_PAE, %al # check if PAE is enabled
20330 jz enable_paging
20331
20332@@ -383,6 +456,9 @@ default_entry:
20333 /* Make changes effective */
20334 wrmsr
20335
20336+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20337+#endif
20338+
20339 enable_paging:
20340
20341 /*
20342@@ -451,14 +527,20 @@ is486:
20343 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20344 movl %eax,%ss # after changing gdt.
20345
20346- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20347+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20348 movl %eax,%ds
20349 movl %eax,%es
20350
20351 movl $(__KERNEL_PERCPU), %eax
20352 movl %eax,%fs # set this cpu's percpu
20353
20354+#ifdef CONFIG_CC_STACKPROTECTOR
20355 movl $(__KERNEL_STACK_CANARY),%eax
20356+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20357+ movl $(__USER_DS),%eax
20358+#else
20359+ xorl %eax,%eax
20360+#endif
20361 movl %eax,%gs
20362
20363 xorl %eax,%eax # Clear LDT
20364@@ -534,8 +616,11 @@ setup_once:
20365 * relocation. Manually set base address in stack canary
20366 * segment descriptor.
20367 */
20368- movl $gdt_page,%eax
20369+ movl $cpu_gdt_table,%eax
20370 movl $stack_canary,%ecx
20371+#ifdef CONFIG_SMP
20372+ addl $__per_cpu_load,%ecx
20373+#endif
20374 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20375 shrl $16, %ecx
20376 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20377@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
20378 /* This is global to keep gas from relaxing the jumps */
20379 ENTRY(early_idt_handler)
20380 cld
20381- cmpl $2,%ss:early_recursion_flag
20382+ cmpl $1,%ss:early_recursion_flag
20383 je hlt_loop
20384 incl %ss:early_recursion_flag
20385
20386@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
20387 pushl (20+6*4)(%esp) /* trapno */
20388 pushl $fault_msg
20389 call printk
20390-#endif
20391 call dump_stack
20392+#endif
20393 hlt_loop:
20394 hlt
20395 jmp hlt_loop
20396@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
20397 /* This is the default interrupt "handler" :-) */
20398 ALIGN
20399 ignore_int:
20400- cld
20401 #ifdef CONFIG_PRINTK
20402+ cmpl $2,%ss:early_recursion_flag
20403+ je hlt_loop
20404+ incl %ss:early_recursion_flag
20405+ cld
20406 pushl %eax
20407 pushl %ecx
20408 pushl %edx
20409@@ -634,9 +722,6 @@ ignore_int:
20410 movl $(__KERNEL_DS),%eax
20411 movl %eax,%ds
20412 movl %eax,%es
20413- cmpl $2,early_recursion_flag
20414- je hlt_loop
20415- incl early_recursion_flag
20416 pushl 16(%esp)
20417 pushl 24(%esp)
20418 pushl 32(%esp)
20419@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
20420 /*
20421 * BSS section
20422 */
20423-__PAGE_ALIGNED_BSS
20424- .align PAGE_SIZE
20425 #ifdef CONFIG_X86_PAE
20426+.section .initial_pg_pmd,"a",@progbits
20427 initial_pg_pmd:
20428 .fill 1024*KPMDS,4,0
20429 #else
20430+.section .initial_page_table,"a",@progbits
20431 ENTRY(initial_page_table)
20432 .fill 1024,4,0
20433 #endif
20434+.section .initial_pg_fixmap,"a",@progbits
20435 initial_pg_fixmap:
20436 .fill 1024,4,0
20437+.section .empty_zero_page,"a",@progbits
20438 ENTRY(empty_zero_page)
20439 .fill 4096,1,0
20440+.section .swapper_pg_dir,"a",@progbits
20441 ENTRY(swapper_pg_dir)
20442+#ifdef CONFIG_X86_PAE
20443+ .fill 4,8,0
20444+#else
20445 .fill 1024,4,0
20446+#endif
20447+
20448+/*
20449+ * The IDT has to be page-aligned to simplify the Pentium
20450+ * F0 0F bug workaround.. We have a special link segment
20451+ * for this.
20452+ */
20453+.section .idt,"a",@progbits
20454+ENTRY(idt_table)
20455+ .fill 256,8,0
20456
20457 /*
20458 * This starts the data section.
20459 */
20460 #ifdef CONFIG_X86_PAE
20461-__PAGE_ALIGNED_DATA
20462- /* Page-aligned for the benefit of paravirt? */
20463- .align PAGE_SIZE
20464+.section .initial_page_table,"a",@progbits
20465 ENTRY(initial_page_table)
20466 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20467 # if KPMDS == 3
20468@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
20469 # error "Kernel PMDs should be 1, 2 or 3"
20470 # endif
20471 .align PAGE_SIZE /* needs to be page-sized too */
20472+
20473+#ifdef CONFIG_PAX_PER_CPU_PGD
20474+ENTRY(cpu_pgd)
20475+ .rept NR_CPUS
20476+ .fill 4,8,0
20477+ .endr
20478+#endif
20479+
20480 #endif
20481
20482 .data
20483 .balign 4
20484 ENTRY(stack_start)
20485- .long init_thread_union+THREAD_SIZE
20486+ .long init_thread_union+THREAD_SIZE-8
20487
20488 __INITRODATA
20489 int_msg:
20490@@ -744,7 +851,7 @@ fault_msg:
20491 * segment size, and 32-bit linear address value:
20492 */
20493
20494- .data
20495+.section .rodata,"a",@progbits
20496 .globl boot_gdt_descr
20497 .globl idt_descr
20498
20499@@ -753,7 +860,7 @@ fault_msg:
20500 .word 0 # 32 bit align gdt_desc.address
20501 boot_gdt_descr:
20502 .word __BOOT_DS+7
20503- .long boot_gdt - __PAGE_OFFSET
20504+ .long pa(boot_gdt)
20505
20506 .word 0 # 32-bit align idt_desc.address
20507 idt_descr:
20508@@ -764,7 +871,7 @@ idt_descr:
20509 .word 0 # 32 bit align gdt_desc.address
20510 ENTRY(early_gdt_descr)
20511 .word GDT_ENTRIES*8-1
20512- .long gdt_page /* Overwritten for secondary CPUs */
20513+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20514
20515 /*
20516 * The boot_gdt must mirror the equivalent in setup.S and is
20517@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
20518 .align L1_CACHE_BYTES
20519 ENTRY(boot_gdt)
20520 .fill GDT_ENTRY_BOOT_CS,8,0
20521- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20522- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20523+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20524+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20525+
20526+ .align PAGE_SIZE_asm
20527+ENTRY(cpu_gdt_table)
20528+ .rept NR_CPUS
20529+ .quad 0x0000000000000000 /* NULL descriptor */
20530+ .quad 0x0000000000000000 /* 0x0b reserved */
20531+ .quad 0x0000000000000000 /* 0x13 reserved */
20532+ .quad 0x0000000000000000 /* 0x1b reserved */
20533+
20534+#ifdef CONFIG_PAX_KERNEXEC
20535+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20536+#else
20537+ .quad 0x0000000000000000 /* 0x20 unused */
20538+#endif
20539+
20540+ .quad 0x0000000000000000 /* 0x28 unused */
20541+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20542+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20543+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20544+ .quad 0x0000000000000000 /* 0x4b reserved */
20545+ .quad 0x0000000000000000 /* 0x53 reserved */
20546+ .quad 0x0000000000000000 /* 0x5b reserved */
20547+
20548+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20549+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20550+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20551+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20552+
20553+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20554+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20555+
20556+ /*
20557+ * Segments used for calling PnP BIOS have byte granularity.
20558+ * The code segments and data segments have fixed 64k limits,
20559+ * the transfer segment sizes are set at run time.
20560+ */
20561+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20562+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20563+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20564+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20565+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20566+
20567+ /*
20568+ * The APM segments have byte granularity and their bases
20569+ * are set at run time. All have 64k limits.
20570+ */
20571+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20572+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20573+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20574+
20575+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20576+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20577+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20578+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20579+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20580+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20581+
20582+ /* Be sure this is zeroed to avoid false validations in Xen */
20583+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20584+ .endr
20585diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20586index 321d65e..863089b 100644
20587--- a/arch/x86/kernel/head_64.S
20588+++ b/arch/x86/kernel/head_64.S
20589@@ -20,6 +20,8 @@
20590 #include <asm/processor-flags.h>
20591 #include <asm/percpu.h>
20592 #include <asm/nops.h>
20593+#include <asm/cpufeature.h>
20594+#include <asm/alternative-asm.h>
20595
20596 #ifdef CONFIG_PARAVIRT
20597 #include <asm/asm-offsets.h>
20598@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20599 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20600 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20601 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20602+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20603+L3_VMALLOC_START = pud_index(VMALLOC_START)
20604+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20605+L3_VMALLOC_END = pud_index(VMALLOC_END)
20606+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20607+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20608
20609 .text
20610 __HEAD
20611@@ -89,11 +97,23 @@ startup_64:
20612 * Fixup the physical addresses in the page table
20613 */
20614 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
20615+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20616+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20617+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20618+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20619+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20620
20621- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20622- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20623+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
20624+#ifndef CONFIG_XEN
20625+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
20626+#endif
20627
20628- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20629+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20630+
20631+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20632+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
20633+
20634+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20635
20636 /*
20637 * Set up the identity mapping for the switchover. These
20638@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
20639 movq $(init_level4_pgt - __START_KERNEL_map), %rax
20640 1:
20641
20642- /* Enable PAE mode and PGE */
20643- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
20644+ /* Enable PAE mode and PSE/PGE */
20645+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
20646 movq %rcx, %cr4
20647
20648 /* Setup early boot stage 4 level pagetables. */
20649@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
20650 movl $MSR_EFER, %ecx
20651 rdmsr
20652 btsl $_EFER_SCE, %eax /* Enable System Call */
20653- btl $20,%edi /* No Execute supported? */
20654+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20655 jnc 1f
20656 btsl $_EFER_NX, %eax
20657 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
20658+ leaq init_level4_pgt(%rip), %rdi
20659+#ifndef CONFIG_EFI
20660+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20661+#endif
20662+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20663+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20664+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20665+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20666 1: wrmsr /* Make changes effective */
20667
20668 /* Setup cr0 */
20669@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
20670 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
20671 * address given in m16:64.
20672 */
20673+ pax_set_fptr_mask
20674 movq initial_code(%rip),%rax
20675 pushq $0 # fake return address to stop unwinder
20676 pushq $__KERNEL_CS # set correct cs
20677@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
20678 call dump_stack
20679 #ifdef CONFIG_KALLSYMS
20680 leaq early_idt_ripmsg(%rip),%rdi
20681- movq 40(%rsp),%rsi # %rip again
20682+ movq 88(%rsp),%rsi # %rip again
20683 call __print_symbol
20684 #endif
20685 #endif /* EARLY_PRINTK */
20686@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
20687 early_recursion_flag:
20688 .long 0
20689
20690+ .section .rodata,"a",@progbits
20691 #ifdef CONFIG_EARLY_PRINTK
20692 early_idt_msg:
20693 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20694@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
20695 NEXT_PAGE(early_dynamic_pgts)
20696 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
20697
20698- .data
20699+ .section .rodata,"a",@progbits
20700
20701-#ifndef CONFIG_XEN
20702 NEXT_PAGE(init_level4_pgt)
20703- .fill 512,8,0
20704-#else
20705-NEXT_PAGE(init_level4_pgt)
20706- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20707 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20708 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20709+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20710+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20711+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20712+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20713+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20714+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20715 .org init_level4_pgt + L4_START_KERNEL*8, 0
20716 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20717 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20718
20719+#ifdef CONFIG_PAX_PER_CPU_PGD
20720+NEXT_PAGE(cpu_pgd)
20721+ .rept NR_CPUS
20722+ .fill 512,8,0
20723+ .endr
20724+#endif
20725+
20726 NEXT_PAGE(level3_ident_pgt)
20727 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20728+#ifdef CONFIG_XEN
20729 .fill 511, 8, 0
20730+#else
20731+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20732+ .fill 510,8,0
20733+#endif
20734+
20735+NEXT_PAGE(level3_vmalloc_start_pgt)
20736+ .fill 512,8,0
20737+
20738+NEXT_PAGE(level3_vmalloc_end_pgt)
20739+ .fill 512,8,0
20740+
20741+NEXT_PAGE(level3_vmemmap_pgt)
20742+ .fill L3_VMEMMAP_START,8,0
20743+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20744+
20745 NEXT_PAGE(level2_ident_pgt)
20746- /* Since I easily can, map the first 1G.
20747+ /* Since I easily can, map the first 2G.
20748 * Don't set NX because code runs from these pages.
20749 */
20750- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20751-#endif
20752+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20753
20754 NEXT_PAGE(level3_kernel_pgt)
20755 .fill L3_START_KERNEL,8,0
20756@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
20757 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20758 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20759
20760+NEXT_PAGE(level2_vmemmap_pgt)
20761+ .fill 512,8,0
20762+
20763 NEXT_PAGE(level2_kernel_pgt)
20764 /*
20765 * 512 MB kernel mapping. We spend a full page on this pagetable
20766@@ -488,39 +544,64 @@ NEXT_PAGE(level2_kernel_pgt)
20767 KERNEL_IMAGE_SIZE/PMD_SIZE)
20768
20769 NEXT_PAGE(level2_fixmap_pgt)
20770- .fill 506,8,0
20771- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20772- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20773- .fill 5,8,0
20774+ .fill 507,8,0
20775+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20776+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20777+ .fill 4,8,0
20778
20779-NEXT_PAGE(level1_fixmap_pgt)
20780+NEXT_PAGE(level1_vsyscall_pgt)
20781 .fill 512,8,0
20782
20783 #undef PMDS
20784
20785- .data
20786+ .align PAGE_SIZE
20787+ENTRY(cpu_gdt_table)
20788+ .rept NR_CPUS
20789+ .quad 0x0000000000000000 /* NULL descriptor */
20790+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20791+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20792+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20793+ .quad 0x00cffb000000ffff /* __USER32_CS */
20794+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20795+ .quad 0x00affb000000ffff /* __USER_CS */
20796+
20797+#ifdef CONFIG_PAX_KERNEXEC
20798+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20799+#else
20800+ .quad 0x0 /* unused */
20801+#endif
20802+
20803+ .quad 0,0 /* TSS */
20804+ .quad 0,0 /* LDT */
20805+ .quad 0,0,0 /* three TLS descriptors */
20806+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20807+ /* asm/segment.h:GDT_ENTRIES must match this */
20808+
20809+ /* zero the remaining page */
20810+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20811+ .endr
20812+
20813 .align 16
20814 .globl early_gdt_descr
20815 early_gdt_descr:
20816 .word GDT_ENTRIES*8-1
20817 early_gdt_descr_base:
20818- .quad INIT_PER_CPU_VAR(gdt_page)
20819+ .quad cpu_gdt_table
20820
20821 ENTRY(phys_base)
20822 /* This must match the first entry in level2_kernel_pgt */
20823 .quad 0x0000000000000000
20824
20825 #include "../../x86/xen/xen-head.S"
20826-
20827- .section .bss, "aw", @nobits
20828- .align L1_CACHE_BYTES
20829-ENTRY(idt_table)
20830- .skip IDT_ENTRIES * 16
20831
20832- .align L1_CACHE_BYTES
20833-ENTRY(nmi_idt_table)
20834- .skip IDT_ENTRIES * 16
20835-
20836- __PAGE_ALIGNED_BSS
20837+ .section .rodata,"a",@progbits
20838 NEXT_PAGE(empty_zero_page)
20839 .skip PAGE_SIZE
20840+
20841+ .align L1_CACHE_BYTES
20842+ENTRY(idt_table)
20843+ .fill 512,8,0
20844+
20845+ .align L1_CACHE_BYTES
20846+ENTRY(nmi_idt_table)
20847+ .fill 512,8,0
20848diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20849index 0fa6912..37fce70 100644
20850--- a/arch/x86/kernel/i386_ksyms_32.c
20851+++ b/arch/x86/kernel/i386_ksyms_32.c
20852@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20853 EXPORT_SYMBOL(cmpxchg8b_emu);
20854 #endif
20855
20856+EXPORT_SYMBOL_GPL(cpu_gdt_table);
20857+
20858 /* Networking helper routines. */
20859 EXPORT_SYMBOL(csum_partial_copy_generic);
20860+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20861+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20862
20863 EXPORT_SYMBOL(__get_user_1);
20864 EXPORT_SYMBOL(__get_user_2);
20865@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
20866
20867 EXPORT_SYMBOL(csum_partial);
20868 EXPORT_SYMBOL(empty_zero_page);
20869+
20870+#ifdef CONFIG_PAX_KERNEXEC
20871+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20872+#endif
20873diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20874index cb33909..1163b40 100644
20875--- a/arch/x86/kernel/i387.c
20876+++ b/arch/x86/kernel/i387.c
20877@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20878 static inline bool interrupted_user_mode(void)
20879 {
20880 struct pt_regs *regs = get_irq_regs();
20881- return regs && user_mode_vm(regs);
20882+ return regs && user_mode(regs);
20883 }
20884
20885 /*
20886diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20887index 9a5c460..84868423 100644
20888--- a/arch/x86/kernel/i8259.c
20889+++ b/arch/x86/kernel/i8259.c
20890@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
20891 static void make_8259A_irq(unsigned int irq)
20892 {
20893 disable_irq_nosync(irq);
20894- io_apic_irqs &= ~(1<<irq);
20895+ io_apic_irqs &= ~(1UL<<irq);
20896 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
20897 i8259A_chip.name);
20898 enable_irq(irq);
20899@@ -209,7 +209,7 @@ spurious_8259A_irq:
20900 "spurious 8259A interrupt: IRQ%d.\n", irq);
20901 spurious_irq_mask |= irqmask;
20902 }
20903- atomic_inc(&irq_err_count);
20904+ atomic_inc_unchecked(&irq_err_count);
20905 /*
20906 * Theoretically we do not have to handle this IRQ,
20907 * but in Linux this does not cause problems and is
20908@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20909 /* (slave's support for AEOI in flat mode is to be investigated) */
20910 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20911
20912+ pax_open_kernel();
20913 if (auto_eoi)
20914 /*
20915 * In AEOI mode we just have to mask the interrupt
20916 * when acking.
20917 */
20918- i8259A_chip.irq_mask_ack = disable_8259A_irq;
20919+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20920 else
20921- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20922+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20923+ pax_close_kernel();
20924
20925 udelay(100); /* wait for 8259A to initialize */
20926
20927diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20928index a979b5b..1d6db75 100644
20929--- a/arch/x86/kernel/io_delay.c
20930+++ b/arch/x86/kernel/io_delay.c
20931@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20932 * Quirk table for systems that misbehave (lock up, etc.) if port
20933 * 0x80 is used:
20934 */
20935-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20936+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20937 {
20938 .callback = dmi_io_delay_0xed_port,
20939 .ident = "Compaq Presario V6000",
20940diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20941index 4ddaf66..6292f4e 100644
20942--- a/arch/x86/kernel/ioport.c
20943+++ b/arch/x86/kernel/ioport.c
20944@@ -6,6 +6,7 @@
20945 #include <linux/sched.h>
20946 #include <linux/kernel.h>
20947 #include <linux/capability.h>
20948+#include <linux/security.h>
20949 #include <linux/errno.h>
20950 #include <linux/types.h>
20951 #include <linux/ioport.h>
20952@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20953
20954 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20955 return -EINVAL;
20956+#ifdef CONFIG_GRKERNSEC_IO
20957+ if (turn_on && grsec_disable_privio) {
20958+ gr_handle_ioperm();
20959+ return -EPERM;
20960+ }
20961+#endif
20962 if (turn_on && !capable(CAP_SYS_RAWIO))
20963 return -EPERM;
20964
20965@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20966 * because the ->io_bitmap_max value must match the bitmap
20967 * contents:
20968 */
20969- tss = &per_cpu(init_tss, get_cpu());
20970+ tss = init_tss + get_cpu();
20971
20972 if (turn_on)
20973 bitmap_clear(t->io_bitmap_ptr, from, num);
20974@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
20975 return -EINVAL;
20976 /* Trying to gain more privileges? */
20977 if (level > old) {
20978+#ifdef CONFIG_GRKERNSEC_IO
20979+ if (grsec_disable_privio) {
20980+ gr_handle_iopl();
20981+ return -EPERM;
20982+ }
20983+#endif
20984 if (!capable(CAP_SYS_RAWIO))
20985 return -EPERM;
20986 }
20987diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
20988index ac0631d..ff7cb62 100644
20989--- a/arch/x86/kernel/irq.c
20990+++ b/arch/x86/kernel/irq.c
20991@@ -18,7 +18,7 @@
20992 #include <asm/mce.h>
20993 #include <asm/hw_irq.h>
20994
20995-atomic_t irq_err_count;
20996+atomic_unchecked_t irq_err_count;
20997
20998 /* Function pointer for generic interrupt vector handling */
20999 void (*x86_platform_ipi_callback)(void) = NULL;
21000@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21001 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21002 seq_printf(p, " Machine check polls\n");
21003 #endif
21004- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21005+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21006 #if defined(CONFIG_X86_IO_APIC)
21007- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21008+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21009 #endif
21010 return 0;
21011 }
21012@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21013
21014 u64 arch_irq_stat(void)
21015 {
21016- u64 sum = atomic_read(&irq_err_count);
21017+ u64 sum = atomic_read_unchecked(&irq_err_count);
21018 return sum;
21019 }
21020
21021diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21022index 344faf8..355f60d 100644
21023--- a/arch/x86/kernel/irq_32.c
21024+++ b/arch/x86/kernel/irq_32.c
21025@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21026 __asm__ __volatile__("andl %%esp,%0" :
21027 "=r" (sp) : "0" (THREAD_SIZE - 1));
21028
21029- return sp < (sizeof(struct thread_info) + STACK_WARN);
21030+ return sp < STACK_WARN;
21031 }
21032
21033 static void print_stack_overflow(void)
21034@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21035 * per-CPU IRQ handling contexts (thread information and stack)
21036 */
21037 union irq_ctx {
21038- struct thread_info tinfo;
21039- u32 stack[THREAD_SIZE/sizeof(u32)];
21040+ unsigned long previous_esp;
21041+ u32 stack[THREAD_SIZE/sizeof(u32)];
21042 } __attribute__((aligned(THREAD_SIZE)));
21043
21044 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21045@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21046 static inline int
21047 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21048 {
21049- union irq_ctx *curctx, *irqctx;
21050+ union irq_ctx *irqctx;
21051 u32 *isp, arg1, arg2;
21052
21053- curctx = (union irq_ctx *) current_thread_info();
21054 irqctx = __this_cpu_read(hardirq_ctx);
21055
21056 /*
21057@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21058 * handler) we can't do that and just have to keep using the
21059 * current stack (which is the irq stack already after all)
21060 */
21061- if (unlikely(curctx == irqctx))
21062+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21063 return 0;
21064
21065 /* build the stack frame on the IRQ stack */
21066- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21067- irqctx->tinfo.task = curctx->tinfo.task;
21068- irqctx->tinfo.previous_esp = current_stack_pointer;
21069+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21070+ irqctx->previous_esp = current_stack_pointer;
21071
21072- /* Copy the preempt_count so that the [soft]irq checks work. */
21073- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21074+#ifdef CONFIG_PAX_MEMORY_UDEREF
21075+ __set_fs(MAKE_MM_SEG(0));
21076+#endif
21077
21078 if (unlikely(overflow))
21079 call_on_stack(print_stack_overflow, isp);
21080@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21081 : "0" (irq), "1" (desc), "2" (isp),
21082 "D" (desc->handle_irq)
21083 : "memory", "cc", "ecx");
21084+
21085+#ifdef CONFIG_PAX_MEMORY_UDEREF
21086+ __set_fs(current_thread_info()->addr_limit);
21087+#endif
21088+
21089 return 1;
21090 }
21091
21092@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21093 */
21094 void __cpuinit irq_ctx_init(int cpu)
21095 {
21096- union irq_ctx *irqctx;
21097-
21098 if (per_cpu(hardirq_ctx, cpu))
21099 return;
21100
21101- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21102- THREADINFO_GFP,
21103- THREAD_SIZE_ORDER));
21104- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21105- irqctx->tinfo.cpu = cpu;
21106- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21107- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21108-
21109- per_cpu(hardirq_ctx, cpu) = irqctx;
21110-
21111- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21112- THREADINFO_GFP,
21113- THREAD_SIZE_ORDER));
21114- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21115- irqctx->tinfo.cpu = cpu;
21116- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21117-
21118- per_cpu(softirq_ctx, cpu) = irqctx;
21119+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21120+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21121+
21122+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21123+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21124
21125 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21126 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21127@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21128 asmlinkage void do_softirq(void)
21129 {
21130 unsigned long flags;
21131- struct thread_info *curctx;
21132 union irq_ctx *irqctx;
21133 u32 *isp;
21134
21135@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21136 local_irq_save(flags);
21137
21138 if (local_softirq_pending()) {
21139- curctx = current_thread_info();
21140 irqctx = __this_cpu_read(softirq_ctx);
21141- irqctx->tinfo.task = curctx->task;
21142- irqctx->tinfo.previous_esp = current_stack_pointer;
21143+ irqctx->previous_esp = current_stack_pointer;
21144
21145 /* build the stack frame on the softirq stack */
21146- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21147+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21148+
21149+#ifdef CONFIG_PAX_MEMORY_UDEREF
21150+ __set_fs(MAKE_MM_SEG(0));
21151+#endif
21152
21153 call_on_stack(__do_softirq, isp);
21154+
21155+#ifdef CONFIG_PAX_MEMORY_UDEREF
21156+ __set_fs(current_thread_info()->addr_limit);
21157+#endif
21158+
21159 /*
21160 * Shouldn't happen, we returned above if in_interrupt():
21161 */
21162@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21163 if (unlikely(!desc))
21164 return false;
21165
21166- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21167+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21168 if (unlikely(overflow))
21169 print_stack_overflow();
21170 desc->handle_irq(irq, desc);
21171diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21172index d04d3ec..ea4b374 100644
21173--- a/arch/x86/kernel/irq_64.c
21174+++ b/arch/x86/kernel/irq_64.c
21175@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21176 u64 estack_top, estack_bottom;
21177 u64 curbase = (u64)task_stack_page(current);
21178
21179- if (user_mode_vm(regs))
21180+ if (user_mode(regs))
21181 return;
21182
21183 if (regs->sp >= curbase + sizeof(struct thread_info) +
21184diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21185index dc1404b..bbc43e7 100644
21186--- a/arch/x86/kernel/kdebugfs.c
21187+++ b/arch/x86/kernel/kdebugfs.c
21188@@ -27,7 +27,7 @@ struct setup_data_node {
21189 u32 len;
21190 };
21191
21192-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21193+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21194 size_t count, loff_t *ppos)
21195 {
21196 struct setup_data_node *node = file->private_data;
21197diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21198index 836f832..a8bda67 100644
21199--- a/arch/x86/kernel/kgdb.c
21200+++ b/arch/x86/kernel/kgdb.c
21201@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21202 #ifdef CONFIG_X86_32
21203 switch (regno) {
21204 case GDB_SS:
21205- if (!user_mode_vm(regs))
21206+ if (!user_mode(regs))
21207 *(unsigned long *)mem = __KERNEL_DS;
21208 break;
21209 case GDB_SP:
21210- if (!user_mode_vm(regs))
21211+ if (!user_mode(regs))
21212 *(unsigned long *)mem = kernel_stack_pointer(regs);
21213 break;
21214 case GDB_GS:
21215@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21216 bp->attr.bp_addr = breakinfo[breakno].addr;
21217 bp->attr.bp_len = breakinfo[breakno].len;
21218 bp->attr.bp_type = breakinfo[breakno].type;
21219- info->address = breakinfo[breakno].addr;
21220+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21221+ info->address = ktla_ktva(breakinfo[breakno].addr);
21222+ else
21223+ info->address = breakinfo[breakno].addr;
21224 info->len = breakinfo[breakno].len;
21225 info->type = breakinfo[breakno].type;
21226 val = arch_install_hw_breakpoint(bp);
21227@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21228 case 'k':
21229 /* clear the trace bit */
21230 linux_regs->flags &= ~X86_EFLAGS_TF;
21231- atomic_set(&kgdb_cpu_doing_single_step, -1);
21232+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21233
21234 /* set the trace bit if we're stepping */
21235 if (remcomInBuffer[0] == 's') {
21236 linux_regs->flags |= X86_EFLAGS_TF;
21237- atomic_set(&kgdb_cpu_doing_single_step,
21238+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21239 raw_smp_processor_id());
21240 }
21241
21242@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21243
21244 switch (cmd) {
21245 case DIE_DEBUG:
21246- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21247+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21248 if (user_mode(regs))
21249 return single_step_cont(regs, args);
21250 break;
21251@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21252 #endif /* CONFIG_DEBUG_RODATA */
21253
21254 bpt->type = BP_BREAKPOINT;
21255- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21256+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21257 BREAK_INSTR_SIZE);
21258 if (err)
21259 return err;
21260- err = probe_kernel_write((char *)bpt->bpt_addr,
21261+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21262 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21263 #ifdef CONFIG_DEBUG_RODATA
21264 if (!err)
21265@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21266 return -EBUSY;
21267 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21268 BREAK_INSTR_SIZE);
21269- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21270+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21271 if (err)
21272 return err;
21273 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21274@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21275 if (mutex_is_locked(&text_mutex))
21276 goto knl_write;
21277 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21278- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21279+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21280 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21281 goto knl_write;
21282 return err;
21283 knl_write:
21284 #endif /* CONFIG_DEBUG_RODATA */
21285- return probe_kernel_write((char *)bpt->bpt_addr,
21286+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21287 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21288 }
21289
21290diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
21291index 211bce4..6e2580a 100644
21292--- a/arch/x86/kernel/kprobes/core.c
21293+++ b/arch/x86/kernel/kprobes/core.c
21294@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21295 s32 raddr;
21296 } __packed *insn;
21297
21298- insn = (struct __arch_relative_insn *)from;
21299+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21300+
21301+ pax_open_kernel();
21302 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21303 insn->op = op;
21304+ pax_close_kernel();
21305 }
21306
21307 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21308@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21309 kprobe_opcode_t opcode;
21310 kprobe_opcode_t *orig_opcodes = opcodes;
21311
21312- if (search_exception_tables((unsigned long)opcodes))
21313+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21314 return 0; /* Page fault may occur on this address. */
21315
21316 retry:
21317@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21318 * for the first byte, we can recover the original instruction
21319 * from it and kp->opcode.
21320 */
21321- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21322+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21323 buf[0] = kp->opcode;
21324- return (unsigned long)buf;
21325+ return ktva_ktla((unsigned long)buf);
21326 }
21327
21328 /*
21329@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21330 /* Another subsystem puts a breakpoint, failed to recover */
21331 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21332 return 0;
21333+ pax_open_kernel();
21334 memcpy(dest, insn.kaddr, insn.length);
21335+ pax_close_kernel();
21336
21337 #ifdef CONFIG_X86_64
21338 if (insn_rip_relative(&insn)) {
21339@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21340 return 0;
21341 }
21342 disp = (u8 *) dest + insn_offset_displacement(&insn);
21343+ pax_open_kernel();
21344 *(s32 *) disp = (s32) newdisp;
21345+ pax_close_kernel();
21346 }
21347 #endif
21348 return insn.length;
21349@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21350 * nor set current_kprobe, because it doesn't use single
21351 * stepping.
21352 */
21353- regs->ip = (unsigned long)p->ainsn.insn;
21354+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21355 preempt_enable_no_resched();
21356 return;
21357 }
21358@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21359 regs->flags &= ~X86_EFLAGS_IF;
21360 /* single step inline if the instruction is an int3 */
21361 if (p->opcode == BREAKPOINT_INSTRUCTION)
21362- regs->ip = (unsigned long)p->addr;
21363+ regs->ip = ktla_ktva((unsigned long)p->addr);
21364 else
21365- regs->ip = (unsigned long)p->ainsn.insn;
21366+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21367 }
21368
21369 /*
21370@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21371 setup_singlestep(p, regs, kcb, 0);
21372 return 1;
21373 }
21374- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21375+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21376 /*
21377 * The breakpoint instruction was removed right
21378 * after we hit it. Another cpu has removed
21379@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21380 " movq %rax, 152(%rsp)\n"
21381 RESTORE_REGS_STRING
21382 " popfq\n"
21383+#ifdef KERNEXEC_PLUGIN
21384+ " btsq $63,(%rsp)\n"
21385+#endif
21386 #else
21387 " pushf\n"
21388 SAVE_REGS_STRING
21389@@ -779,7 +789,7 @@ static void __kprobes
21390 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21391 {
21392 unsigned long *tos = stack_addr(regs);
21393- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21394+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21395 unsigned long orig_ip = (unsigned long)p->addr;
21396 kprobe_opcode_t *insn = p->ainsn.insn;
21397
21398@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21399 struct die_args *args = data;
21400 int ret = NOTIFY_DONE;
21401
21402- if (args->regs && user_mode_vm(args->regs))
21403+ if (args->regs && user_mode(args->regs))
21404 return ret;
21405
21406 switch (val) {
21407diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
21408index 76dc6f0..66bdfc3 100644
21409--- a/arch/x86/kernel/kprobes/opt.c
21410+++ b/arch/x86/kernel/kprobes/opt.c
21411@@ -79,6 +79,7 @@ found:
21412 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
21413 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
21414 {
21415+ pax_open_kernel();
21416 #ifdef CONFIG_X86_64
21417 *addr++ = 0x48;
21418 *addr++ = 0xbf;
21419@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
21420 *addr++ = 0xb8;
21421 #endif
21422 *(unsigned long *)addr = val;
21423+ pax_close_kernel();
21424 }
21425
21426 static void __used __kprobes kprobes_optinsn_template_holder(void)
21427@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21428 * Verify if the address gap is in 2GB range, because this uses
21429 * a relative jump.
21430 */
21431- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21432+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21433 if (abs(rel) > 0x7fffffff)
21434 return -ERANGE;
21435
21436@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21437 op->optinsn.size = ret;
21438
21439 /* Copy arch-dep-instance from template */
21440- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21441+ pax_open_kernel();
21442+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21443+ pax_close_kernel();
21444
21445 /* Set probe information */
21446 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21447
21448 /* Set probe function call */
21449- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21450+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21451
21452 /* Set returning jmp instruction at the tail of out-of-line buffer */
21453- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21454+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21455 (u8 *)op->kp.addr + op->optinsn.size);
21456
21457 flush_icache_range((unsigned long) buf,
21458@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21459 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21460
21461 /* Backup instructions which will be replaced by jump address */
21462- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21463+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21464 RELATIVE_ADDR_SIZE);
21465
21466 insn_buf[0] = RELATIVEJUMP_OPCODE;
21467@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21468 /* This kprobe is really able to run optimized path. */
21469 op = container_of(p, struct optimized_kprobe, kp);
21470 /* Detour through copied instructions */
21471- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21472+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21473 if (!reenter)
21474 reset_current_kprobe();
21475 preempt_enable_no_resched();
21476diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21477index cd6d9a5..16245a4 100644
21478--- a/arch/x86/kernel/kvm.c
21479+++ b/arch/x86/kernel/kvm.c
21480@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21481 return NOTIFY_OK;
21482 }
21483
21484-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21485+static struct notifier_block kvm_cpu_notifier = {
21486 .notifier_call = kvm_cpu_notify,
21487 };
21488 #endif
21489diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21490index ebc9873..1b9724b 100644
21491--- a/arch/x86/kernel/ldt.c
21492+++ b/arch/x86/kernel/ldt.c
21493@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21494 if (reload) {
21495 #ifdef CONFIG_SMP
21496 preempt_disable();
21497- load_LDT(pc);
21498+ load_LDT_nolock(pc);
21499 if (!cpumask_equal(mm_cpumask(current->mm),
21500 cpumask_of(smp_processor_id())))
21501 smp_call_function(flush_ldt, current->mm, 1);
21502 preempt_enable();
21503 #else
21504- load_LDT(pc);
21505+ load_LDT_nolock(pc);
21506 #endif
21507 }
21508 if (oldsize) {
21509@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21510 return err;
21511
21512 for (i = 0; i < old->size; i++)
21513- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21514+ write_ldt_entry(new->ldt, i, old->ldt + i);
21515 return 0;
21516 }
21517
21518@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21519 retval = copy_ldt(&mm->context, &old_mm->context);
21520 mutex_unlock(&old_mm->context.lock);
21521 }
21522+
21523+ if (tsk == current) {
21524+ mm->context.vdso = 0;
21525+
21526+#ifdef CONFIG_X86_32
21527+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21528+ mm->context.user_cs_base = 0UL;
21529+ mm->context.user_cs_limit = ~0UL;
21530+
21531+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21532+ cpus_clear(mm->context.cpu_user_cs_mask);
21533+#endif
21534+
21535+#endif
21536+#endif
21537+
21538+ }
21539+
21540 return retval;
21541 }
21542
21543@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21544 }
21545 }
21546
21547+#ifdef CONFIG_PAX_SEGMEXEC
21548+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21549+ error = -EINVAL;
21550+ goto out_unlock;
21551+ }
21552+#endif
21553+
21554 fill_ldt(&ldt, &ldt_info);
21555 if (oldmode)
21556 ldt.avl = 0;
21557diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21558index 5b19e4d..6476a76 100644
21559--- a/arch/x86/kernel/machine_kexec_32.c
21560+++ b/arch/x86/kernel/machine_kexec_32.c
21561@@ -26,7 +26,7 @@
21562 #include <asm/cacheflush.h>
21563 #include <asm/debugreg.h>
21564
21565-static void set_idt(void *newidt, __u16 limit)
21566+static void set_idt(struct desc_struct *newidt, __u16 limit)
21567 {
21568 struct desc_ptr curidt;
21569
21570@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21571 }
21572
21573
21574-static void set_gdt(void *newgdt, __u16 limit)
21575+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21576 {
21577 struct desc_ptr curgdt;
21578
21579@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21580 }
21581
21582 control_page = page_address(image->control_code_page);
21583- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21584+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21585
21586 relocate_kernel_ptr = control_page;
21587 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21588diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21589index 22db92b..d546bec 100644
21590--- a/arch/x86/kernel/microcode_core.c
21591+++ b/arch/x86/kernel/microcode_core.c
21592@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21593 return NOTIFY_OK;
21594 }
21595
21596-static struct notifier_block __refdata mc_cpu_notifier = {
21597+static struct notifier_block mc_cpu_notifier = {
21598 .notifier_call = mc_cpu_callback,
21599 };
21600
21601diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21602index 5fb2ceb..3ae90bb 100644
21603--- a/arch/x86/kernel/microcode_intel.c
21604+++ b/arch/x86/kernel/microcode_intel.c
21605@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21606
21607 static int get_ucode_user(void *to, const void *from, size_t n)
21608 {
21609- return copy_from_user(to, from, n);
21610+ return copy_from_user(to, (const void __force_user *)from, n);
21611 }
21612
21613 static enum ucode_state
21614 request_microcode_user(int cpu, const void __user *buf, size_t size)
21615 {
21616- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21617+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21618 }
21619
21620 static void microcode_fini_cpu(int cpu)
21621diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21622index 216a4d7..228255a 100644
21623--- a/arch/x86/kernel/module.c
21624+++ b/arch/x86/kernel/module.c
21625@@ -43,15 +43,60 @@ do { \
21626 } while (0)
21627 #endif
21628
21629-void *module_alloc(unsigned long size)
21630+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21631 {
21632- if (PAGE_ALIGN(size) > MODULES_LEN)
21633+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21634 return NULL;
21635 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21636- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21637+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21638 -1, __builtin_return_address(0));
21639 }
21640
21641+void *module_alloc(unsigned long size)
21642+{
21643+
21644+#ifdef CONFIG_PAX_KERNEXEC
21645+ return __module_alloc(size, PAGE_KERNEL);
21646+#else
21647+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21648+#endif
21649+
21650+}
21651+
21652+#ifdef CONFIG_PAX_KERNEXEC
21653+#ifdef CONFIG_X86_32
21654+void *module_alloc_exec(unsigned long size)
21655+{
21656+ struct vm_struct *area;
21657+
21658+ if (size == 0)
21659+ return NULL;
21660+
21661+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21662+ return area ? area->addr : NULL;
21663+}
21664+EXPORT_SYMBOL(module_alloc_exec);
21665+
21666+void module_free_exec(struct module *mod, void *module_region)
21667+{
21668+ vunmap(module_region);
21669+}
21670+EXPORT_SYMBOL(module_free_exec);
21671+#else
21672+void module_free_exec(struct module *mod, void *module_region)
21673+{
21674+ module_free(mod, module_region);
21675+}
21676+EXPORT_SYMBOL(module_free_exec);
21677+
21678+void *module_alloc_exec(unsigned long size)
21679+{
21680+ return __module_alloc(size, PAGE_KERNEL_RX);
21681+}
21682+EXPORT_SYMBOL(module_alloc_exec);
21683+#endif
21684+#endif
21685+
21686 #ifdef CONFIG_X86_32
21687 int apply_relocate(Elf32_Shdr *sechdrs,
21688 const char *strtab,
21689@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21690 unsigned int i;
21691 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21692 Elf32_Sym *sym;
21693- uint32_t *location;
21694+ uint32_t *plocation, location;
21695
21696 DEBUGP("Applying relocate section %u to %u\n",
21697 relsec, sechdrs[relsec].sh_info);
21698 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21699 /* This is where to make the change */
21700- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21701- + rel[i].r_offset;
21702+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21703+ location = (uint32_t)plocation;
21704+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21705+ plocation = ktla_ktva((void *)plocation);
21706 /* This is the symbol it is referring to. Note that all
21707 undefined symbols have been resolved. */
21708 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21709@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21710 switch (ELF32_R_TYPE(rel[i].r_info)) {
21711 case R_386_32:
21712 /* We add the value into the location given */
21713- *location += sym->st_value;
21714+ pax_open_kernel();
21715+ *plocation += sym->st_value;
21716+ pax_close_kernel();
21717 break;
21718 case R_386_PC32:
21719 /* Add the value, subtract its position */
21720- *location += sym->st_value - (uint32_t)location;
21721+ pax_open_kernel();
21722+ *plocation += sym->st_value - location;
21723+ pax_close_kernel();
21724 break;
21725 default:
21726 pr_err("%s: Unknown relocation: %u\n",
21727@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21728 case R_X86_64_NONE:
21729 break;
21730 case R_X86_64_64:
21731+ pax_open_kernel();
21732 *(u64 *)loc = val;
21733+ pax_close_kernel();
21734 break;
21735 case R_X86_64_32:
21736+ pax_open_kernel();
21737 *(u32 *)loc = val;
21738+ pax_close_kernel();
21739 if (val != *(u32 *)loc)
21740 goto overflow;
21741 break;
21742 case R_X86_64_32S:
21743+ pax_open_kernel();
21744 *(s32 *)loc = val;
21745+ pax_close_kernel();
21746 if ((s64)val != *(s32 *)loc)
21747 goto overflow;
21748 break;
21749 case R_X86_64_PC32:
21750 val -= (u64)loc;
21751+ pax_open_kernel();
21752 *(u32 *)loc = val;
21753+ pax_close_kernel();
21754+
21755 #if 0
21756 if ((s64)val != *(s32 *)loc)
21757 goto overflow;
21758diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21759index ce13049..e2e9c3c 100644
21760--- a/arch/x86/kernel/msr.c
21761+++ b/arch/x86/kernel/msr.c
21762@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21763 return notifier_from_errno(err);
21764 }
21765
21766-static struct notifier_block __refdata msr_class_cpu_notifier = {
21767+static struct notifier_block msr_class_cpu_notifier = {
21768 .notifier_call = msr_class_cpu_callback,
21769 };
21770
21771diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21772index 6030805..2d33f21 100644
21773--- a/arch/x86/kernel/nmi.c
21774+++ b/arch/x86/kernel/nmi.c
21775@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21776 return handled;
21777 }
21778
21779-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21780+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21781 {
21782 struct nmi_desc *desc = nmi_to_desc(type);
21783 unsigned long flags;
21784@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21785 * event confuses some handlers (kdump uses this flag)
21786 */
21787 if (action->flags & NMI_FLAG_FIRST)
21788- list_add_rcu(&action->list, &desc->head);
21789+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21790 else
21791- list_add_tail_rcu(&action->list, &desc->head);
21792+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21793
21794 spin_unlock_irqrestore(&desc->lock, flags);
21795 return 0;
21796@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21797 if (!strcmp(n->name, name)) {
21798 WARN(in_nmi(),
21799 "Trying to free NMI (%s) from NMI context!\n", n->name);
21800- list_del_rcu(&n->list);
21801+ pax_list_del_rcu((struct list_head *)&n->list);
21802 break;
21803 }
21804 }
21805@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21806 dotraplinkage notrace __kprobes void
21807 do_nmi(struct pt_regs *regs, long error_code)
21808 {
21809+
21810+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21811+ if (!user_mode(regs)) {
21812+ unsigned long cs = regs->cs & 0xFFFF;
21813+ unsigned long ip = ktva_ktla(regs->ip);
21814+
21815+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21816+ regs->ip = ip;
21817+ }
21818+#endif
21819+
21820 nmi_nesting_preprocess(regs);
21821
21822 nmi_enter();
21823diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21824index 6d9582e..f746287 100644
21825--- a/arch/x86/kernel/nmi_selftest.c
21826+++ b/arch/x86/kernel/nmi_selftest.c
21827@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21828 {
21829 /* trap all the unknown NMIs we may generate */
21830 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21831- __initdata);
21832+ __initconst);
21833 }
21834
21835 static void __init cleanup_nmi_testsuite(void)
21836@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21837 unsigned long timeout;
21838
21839 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21840- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21841+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21842 nmi_fail = FAILURE;
21843 return;
21844 }
21845diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21846index 676b8c7..870ba04 100644
21847--- a/arch/x86/kernel/paravirt-spinlocks.c
21848+++ b/arch/x86/kernel/paravirt-spinlocks.c
21849@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21850 arch_spin_lock(lock);
21851 }
21852
21853-struct pv_lock_ops pv_lock_ops = {
21854+struct pv_lock_ops pv_lock_ops __read_only = {
21855 #ifdef CONFIG_SMP
21856 .spin_is_locked = __ticket_spin_is_locked,
21857 .spin_is_contended = __ticket_spin_is_contended,
21858diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21859index cd6de64..27c6af0 100644
21860--- a/arch/x86/kernel/paravirt.c
21861+++ b/arch/x86/kernel/paravirt.c
21862@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21863 {
21864 return x;
21865 }
21866+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21867+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21868+#endif
21869
21870 void __init default_banner(void)
21871 {
21872@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21873 if (opfunc == NULL)
21874 /* If there's no function, patch it with a ud2a (BUG) */
21875 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21876- else if (opfunc == _paravirt_nop)
21877+ else if (opfunc == (void *)_paravirt_nop)
21878 /* If the operation is a nop, then nop the callsite */
21879 ret = paravirt_patch_nop();
21880
21881 /* identity functions just return their single argument */
21882- else if (opfunc == _paravirt_ident_32)
21883+ else if (opfunc == (void *)_paravirt_ident_32)
21884 ret = paravirt_patch_ident_32(insnbuf, len);
21885- else if (opfunc == _paravirt_ident_64)
21886+ else if (opfunc == (void *)_paravirt_ident_64)
21887 ret = paravirt_patch_ident_64(insnbuf, len);
21888+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21889+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21890+ ret = paravirt_patch_ident_64(insnbuf, len);
21891+#endif
21892
21893 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21894 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21895@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21896 if (insn_len > len || start == NULL)
21897 insn_len = len;
21898 else
21899- memcpy(insnbuf, start, insn_len);
21900+ memcpy(insnbuf, ktla_ktva(start), insn_len);
21901
21902 return insn_len;
21903 }
21904@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
21905 return this_cpu_read(paravirt_lazy_mode);
21906 }
21907
21908-struct pv_info pv_info = {
21909+struct pv_info pv_info __read_only = {
21910 .name = "bare hardware",
21911 .paravirt_enabled = 0,
21912 .kernel_rpl = 0,
21913@@ -315,16 +322,16 @@ struct pv_info pv_info = {
21914 #endif
21915 };
21916
21917-struct pv_init_ops pv_init_ops = {
21918+struct pv_init_ops pv_init_ops __read_only = {
21919 .patch = native_patch,
21920 };
21921
21922-struct pv_time_ops pv_time_ops = {
21923+struct pv_time_ops pv_time_ops __read_only = {
21924 .sched_clock = native_sched_clock,
21925 .steal_clock = native_steal_clock,
21926 };
21927
21928-struct pv_irq_ops pv_irq_ops = {
21929+struct pv_irq_ops pv_irq_ops __read_only = {
21930 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21931 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21932 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21933@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21934 #endif
21935 };
21936
21937-struct pv_cpu_ops pv_cpu_ops = {
21938+struct pv_cpu_ops pv_cpu_ops __read_only = {
21939 .cpuid = native_cpuid,
21940 .get_debugreg = native_get_debugreg,
21941 .set_debugreg = native_set_debugreg,
21942@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21943 .end_context_switch = paravirt_nop,
21944 };
21945
21946-struct pv_apic_ops pv_apic_ops = {
21947+struct pv_apic_ops pv_apic_ops __read_only= {
21948 #ifdef CONFIG_X86_LOCAL_APIC
21949 .startup_ipi_hook = paravirt_nop,
21950 #endif
21951 };
21952
21953-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21954+#ifdef CONFIG_X86_32
21955+#ifdef CONFIG_X86_PAE
21956+/* 64-bit pagetable entries */
21957+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21958+#else
21959 /* 32-bit pagetable entries */
21960 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21961+#endif
21962 #else
21963 /* 64-bit pagetable entries */
21964 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21965 #endif
21966
21967-struct pv_mmu_ops pv_mmu_ops = {
21968+struct pv_mmu_ops pv_mmu_ops __read_only = {
21969
21970 .read_cr2 = native_read_cr2,
21971 .write_cr2 = native_write_cr2,
21972@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21973 .make_pud = PTE_IDENT,
21974
21975 .set_pgd = native_set_pgd,
21976+ .set_pgd_batched = native_set_pgd_batched,
21977 #endif
21978 #endif /* PAGETABLE_LEVELS >= 3 */
21979
21980@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21981 },
21982
21983 .set_fixmap = native_set_fixmap,
21984+
21985+#ifdef CONFIG_PAX_KERNEXEC
21986+ .pax_open_kernel = native_pax_open_kernel,
21987+ .pax_close_kernel = native_pax_close_kernel,
21988+#endif
21989+
21990 };
21991
21992 EXPORT_SYMBOL_GPL(pv_time_ops);
21993diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
21994index 299d493..2ccb0ee 100644
21995--- a/arch/x86/kernel/pci-calgary_64.c
21996+++ b/arch/x86/kernel/pci-calgary_64.c
21997@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
21998 tce_space = be64_to_cpu(readq(target));
21999 tce_space = tce_space & TAR_SW_BITS;
22000
22001- tce_space = tce_space & (~specified_table_size);
22002+ tce_space = tce_space & (~(unsigned long)specified_table_size);
22003 info->tce_space = (u64 *)__va(tce_space);
22004 }
22005 }
22006diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22007index 35ccf75..7a15747 100644
22008--- a/arch/x86/kernel/pci-iommu_table.c
22009+++ b/arch/x86/kernel/pci-iommu_table.c
22010@@ -2,7 +2,7 @@
22011 #include <asm/iommu_table.h>
22012 #include <linux/string.h>
22013 #include <linux/kallsyms.h>
22014-
22015+#include <linux/sched.h>
22016
22017 #define DEBUG 1
22018
22019diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22020index 6c483ba..d10ce2f 100644
22021--- a/arch/x86/kernel/pci-swiotlb.c
22022+++ b/arch/x86/kernel/pci-swiotlb.c
22023@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22024 void *vaddr, dma_addr_t dma_addr,
22025 struct dma_attrs *attrs)
22026 {
22027- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22028+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22029 }
22030
22031 static struct dma_map_ops swiotlb_dma_ops = {
22032diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22033index 81a5f5e..20f8b58 100644
22034--- a/arch/x86/kernel/process.c
22035+++ b/arch/x86/kernel/process.c
22036@@ -36,7 +36,8 @@
22037 * section. Since TSS's are completely CPU-local, we want them
22038 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22039 */
22040-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22041+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22042+EXPORT_SYMBOL(init_tss);
22043
22044 #ifdef CONFIG_X86_64
22045 static DEFINE_PER_CPU(unsigned char, is_idle);
22046@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22047 task_xstate_cachep =
22048 kmem_cache_create("task_xstate", xstate_size,
22049 __alignof__(union thread_xstate),
22050- SLAB_PANIC | SLAB_NOTRACK, NULL);
22051+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22052 }
22053
22054 /*
22055@@ -105,7 +106,7 @@ void exit_thread(void)
22056 unsigned long *bp = t->io_bitmap_ptr;
22057
22058 if (bp) {
22059- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22060+ struct tss_struct *tss = init_tss + get_cpu();
22061
22062 t->io_bitmap_ptr = NULL;
22063 clear_thread_flag(TIF_IO_BITMAP);
22064@@ -125,6 +126,9 @@ void flush_thread(void)
22065 {
22066 struct task_struct *tsk = current;
22067
22068+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22069+ loadsegment(gs, 0);
22070+#endif
22071 flush_ptrace_hw_breakpoint(tsk);
22072 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22073 drop_init_fpu(tsk);
22074@@ -271,7 +275,7 @@ static void __exit_idle(void)
22075 void exit_idle(void)
22076 {
22077 /* idle loop has pid 0 */
22078- if (current->pid)
22079+ if (task_pid_nr(current))
22080 return;
22081 __exit_idle();
22082 }
22083@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
22084 return ret;
22085 }
22086 #endif
22087-void stop_this_cpu(void *dummy)
22088+__noreturn void stop_this_cpu(void *dummy)
22089 {
22090 local_irq_disable();
22091 /*
22092@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
22093 }
22094 early_param("idle", idle_setup);
22095
22096-unsigned long arch_align_stack(unsigned long sp)
22097+#ifdef CONFIG_PAX_RANDKSTACK
22098+void pax_randomize_kstack(struct pt_regs *regs)
22099 {
22100- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22101- sp -= get_random_int() % 8192;
22102- return sp & ~0xf;
22103-}
22104+ struct thread_struct *thread = &current->thread;
22105+ unsigned long time;
22106
22107-unsigned long arch_randomize_brk(struct mm_struct *mm)
22108-{
22109- unsigned long range_end = mm->brk + 0x02000000;
22110- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22111-}
22112+ if (!randomize_va_space)
22113+ return;
22114+
22115+ if (v8086_mode(regs))
22116+ return;
22117
22118+ rdtscl(time);
22119+
22120+ /* P4 seems to return a 0 LSB, ignore it */
22121+#ifdef CONFIG_MPENTIUM4
22122+ time &= 0x3EUL;
22123+ time <<= 2;
22124+#elif defined(CONFIG_X86_64)
22125+ time &= 0xFUL;
22126+ time <<= 4;
22127+#else
22128+ time &= 0x1FUL;
22129+ time <<= 3;
22130+#endif
22131+
22132+ thread->sp0 ^= time;
22133+ load_sp0(init_tss + smp_processor_id(), thread);
22134+
22135+#ifdef CONFIG_X86_64
22136+ this_cpu_write(kernel_stack, thread->sp0);
22137+#endif
22138+}
22139+#endif
22140diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22141index 7305f7d..22f73d6 100644
22142--- a/arch/x86/kernel/process_32.c
22143+++ b/arch/x86/kernel/process_32.c
22144@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22145 unsigned long thread_saved_pc(struct task_struct *tsk)
22146 {
22147 return ((unsigned long *)tsk->thread.sp)[3];
22148+//XXX return tsk->thread.eip;
22149 }
22150
22151 void __show_regs(struct pt_regs *regs, int all)
22152@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
22153 unsigned long sp;
22154 unsigned short ss, gs;
22155
22156- if (user_mode_vm(regs)) {
22157+ if (user_mode(regs)) {
22158 sp = regs->sp;
22159 ss = regs->ss & 0xffff;
22160- gs = get_user_gs(regs);
22161 } else {
22162 sp = kernel_stack_pointer(regs);
22163 savesegment(ss, ss);
22164- savesegment(gs, gs);
22165 }
22166+ gs = get_user_gs(regs);
22167
22168 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22169 (u16)regs->cs, regs->ip, regs->flags,
22170- smp_processor_id());
22171+ raw_smp_processor_id());
22172 print_symbol("EIP is at %s\n", regs->ip);
22173
22174 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22175@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
22176 int copy_thread(unsigned long clone_flags, unsigned long sp,
22177 unsigned long arg, struct task_struct *p)
22178 {
22179- struct pt_regs *childregs = task_pt_regs(p);
22180+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22181 struct task_struct *tsk;
22182 int err;
22183
22184 p->thread.sp = (unsigned long) childregs;
22185 p->thread.sp0 = (unsigned long) (childregs+1);
22186+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22187
22188 if (unlikely(p->flags & PF_KTHREAD)) {
22189 /* kernel thread */
22190 memset(childregs, 0, sizeof(struct pt_regs));
22191 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22192- task_user_gs(p) = __KERNEL_STACK_CANARY;
22193- childregs->ds = __USER_DS;
22194- childregs->es = __USER_DS;
22195+ savesegment(gs, childregs->gs);
22196+ childregs->ds = __KERNEL_DS;
22197+ childregs->es = __KERNEL_DS;
22198 childregs->fs = __KERNEL_PERCPU;
22199 childregs->bx = sp; /* function */
22200 childregs->bp = arg;
22201@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22202 struct thread_struct *prev = &prev_p->thread,
22203 *next = &next_p->thread;
22204 int cpu = smp_processor_id();
22205- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22206+ struct tss_struct *tss = init_tss + cpu;
22207 fpu_switch_t fpu;
22208
22209 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22210@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22211 */
22212 lazy_save_gs(prev->gs);
22213
22214+#ifdef CONFIG_PAX_MEMORY_UDEREF
22215+ __set_fs(task_thread_info(next_p)->addr_limit);
22216+#endif
22217+
22218 /*
22219 * Load the per-thread Thread-Local Storage descriptor.
22220 */
22221@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22222 */
22223 arch_end_context_switch(next_p);
22224
22225+ this_cpu_write(current_task, next_p);
22226+ this_cpu_write(current_tinfo, &next_p->tinfo);
22227+
22228 /*
22229 * Restore %gs if needed (which is common)
22230 */
22231@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22232
22233 switch_fpu_finish(next_p, fpu);
22234
22235- this_cpu_write(current_task, next_p);
22236-
22237 return prev_p;
22238 }
22239
22240@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
22241 } while (count++ < 16);
22242 return 0;
22243 }
22244-
22245diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22246index 355ae06..4530766 100644
22247--- a/arch/x86/kernel/process_64.c
22248+++ b/arch/x86/kernel/process_64.c
22249@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22250 struct pt_regs *childregs;
22251 struct task_struct *me = current;
22252
22253- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22254+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22255 childregs = task_pt_regs(p);
22256 p->thread.sp = (unsigned long) childregs;
22257 p->thread.usersp = me->thread.usersp;
22258+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22259 set_tsk_thread_flag(p, TIF_FORK);
22260 p->fpu_counter = 0;
22261 p->thread.io_bitmap_ptr = NULL;
22262@@ -273,7 +274,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22263 struct thread_struct *prev = &prev_p->thread;
22264 struct thread_struct *next = &next_p->thread;
22265 int cpu = smp_processor_id();
22266- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22267+ struct tss_struct *tss = init_tss + cpu;
22268 unsigned fsindex, gsindex;
22269 fpu_switch_t fpu;
22270
22271@@ -355,10 +356,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22272 prev->usersp = this_cpu_read(old_rsp);
22273 this_cpu_write(old_rsp, next->usersp);
22274 this_cpu_write(current_task, next_p);
22275+ this_cpu_write(current_tinfo, &next_p->tinfo);
22276
22277- this_cpu_write(kernel_stack,
22278- (unsigned long)task_stack_page(next_p) +
22279- THREAD_SIZE - KERNEL_STACK_OFFSET);
22280+ this_cpu_write(kernel_stack, next->sp0);
22281
22282 /*
22283 * Now maybe reload the debug registers and handle I/O bitmaps
22284@@ -427,12 +427,11 @@ unsigned long get_wchan(struct task_struct *p)
22285 if (!p || p == current || p->state == TASK_RUNNING)
22286 return 0;
22287 stack = (unsigned long)task_stack_page(p);
22288- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22289+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22290 return 0;
22291 fp = *(u64 *)(p->thread.sp);
22292 do {
22293- if (fp < (unsigned long)stack ||
22294- fp >= (unsigned long)stack+THREAD_SIZE)
22295+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22296 return 0;
22297 ip = *(u64 *)(fp+8);
22298 if (!in_sched_functions(ip))
22299diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22300index 29a8120..a50b5ee 100644
22301--- a/arch/x86/kernel/ptrace.c
22302+++ b/arch/x86/kernel/ptrace.c
22303@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22304 {
22305 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22306 unsigned long sp = (unsigned long)&regs->sp;
22307- struct thread_info *tinfo;
22308
22309- if (context == (sp & ~(THREAD_SIZE - 1)))
22310+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22311 return sp;
22312
22313- tinfo = (struct thread_info *)context;
22314- if (tinfo->previous_esp)
22315- return tinfo->previous_esp;
22316+ sp = *(unsigned long *)context;
22317+ if (sp)
22318+ return sp;
22319
22320 return (unsigned long)regs;
22321 }
22322@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22323 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22324 {
22325 int i;
22326- int dr7 = 0;
22327+ unsigned long dr7 = 0;
22328 struct arch_hw_breakpoint *info;
22329
22330 for (i = 0; i < HBP_NUM; i++) {
22331@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22332 unsigned long addr, unsigned long data)
22333 {
22334 int ret;
22335- unsigned long __user *datap = (unsigned long __user *)data;
22336+ unsigned long __user *datap = (__force unsigned long __user *)data;
22337
22338 switch (request) {
22339 /* read the word at location addr in the USER area. */
22340@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22341 if ((int) addr < 0)
22342 return -EIO;
22343 ret = do_get_thread_area(child, addr,
22344- (struct user_desc __user *)data);
22345+ (__force struct user_desc __user *) data);
22346 break;
22347
22348 case PTRACE_SET_THREAD_AREA:
22349 if ((int) addr < 0)
22350 return -EIO;
22351 ret = do_set_thread_area(child, addr,
22352- (struct user_desc __user *)data, 0);
22353+ (__force struct user_desc __user *) data, 0);
22354 break;
22355 #endif
22356
22357@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22358
22359 #ifdef CONFIG_X86_64
22360
22361-static struct user_regset x86_64_regsets[] __read_mostly = {
22362+static user_regset_no_const x86_64_regsets[] __read_only = {
22363 [REGSET_GENERAL] = {
22364 .core_note_type = NT_PRSTATUS,
22365 .n = sizeof(struct user_regs_struct) / sizeof(long),
22366@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22367 #endif /* CONFIG_X86_64 */
22368
22369 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22370-static struct user_regset x86_32_regsets[] __read_mostly = {
22371+static user_regset_no_const x86_32_regsets[] __read_only = {
22372 [REGSET_GENERAL] = {
22373 .core_note_type = NT_PRSTATUS,
22374 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22375@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22376 */
22377 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22378
22379-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22380+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22381 {
22382 #ifdef CONFIG_X86_64
22383 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22384@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22385 memset(info, 0, sizeof(*info));
22386 info->si_signo = SIGTRAP;
22387 info->si_code = si_code;
22388- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22389+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22390 }
22391
22392 void user_single_step_siginfo(struct task_struct *tsk,
22393@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22394 # define IS_IA32 0
22395 #endif
22396
22397+#ifdef CONFIG_GRKERNSEC_SETXID
22398+extern void gr_delayed_cred_worker(void);
22399+#endif
22400+
22401 /*
22402 * We must return the syscall number to actually look up in the table.
22403 * This can be -1L to skip running any syscall at all.
22404@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22405
22406 user_exit();
22407
22408+#ifdef CONFIG_GRKERNSEC_SETXID
22409+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22410+ gr_delayed_cred_worker();
22411+#endif
22412+
22413 /*
22414 * If we stepped into a sysenter/syscall insn, it trapped in
22415 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22416@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22417 */
22418 user_exit();
22419
22420+#ifdef CONFIG_GRKERNSEC_SETXID
22421+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22422+ gr_delayed_cred_worker();
22423+#endif
22424+
22425 audit_syscall_exit(regs);
22426
22427 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22428diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22429index 2cb9470..ff1fd80 100644
22430--- a/arch/x86/kernel/pvclock.c
22431+++ b/arch/x86/kernel/pvclock.c
22432@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22433 return pv_tsc_khz;
22434 }
22435
22436-static atomic64_t last_value = ATOMIC64_INIT(0);
22437+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22438
22439 void pvclock_resume(void)
22440 {
22441- atomic64_set(&last_value, 0);
22442+ atomic64_set_unchecked(&last_value, 0);
22443 }
22444
22445 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22446@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22447 * updating at the same time, and one of them could be slightly behind,
22448 * making the assumption that last_value always go forward fail to hold.
22449 */
22450- last = atomic64_read(&last_value);
22451+ last = atomic64_read_unchecked(&last_value);
22452 do {
22453 if (ret < last)
22454 return last;
22455- last = atomic64_cmpxchg(&last_value, last, ret);
22456+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22457 } while (unlikely(last != ret));
22458
22459 return ret;
22460diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22461index 76fa1e9..abf09ea 100644
22462--- a/arch/x86/kernel/reboot.c
22463+++ b/arch/x86/kernel/reboot.c
22464@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22465 EXPORT_SYMBOL(pm_power_off);
22466
22467 static const struct desc_ptr no_idt = {};
22468-static int reboot_mode;
22469+static unsigned short reboot_mode;
22470 enum reboot_type reboot_type = BOOT_ACPI;
22471 int reboot_force;
22472
22473@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22474
22475 void __noreturn machine_real_restart(unsigned int type)
22476 {
22477+
22478+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22479+ struct desc_struct *gdt;
22480+#endif
22481+
22482 local_irq_disable();
22483
22484 /*
22485@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22486
22487 /* Jump to the identity-mapped low memory code */
22488 #ifdef CONFIG_X86_32
22489- asm volatile("jmpl *%0" : :
22490+
22491+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22492+ gdt = get_cpu_gdt_table(smp_processor_id());
22493+ pax_open_kernel();
22494+#ifdef CONFIG_PAX_MEMORY_UDEREF
22495+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22496+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22497+ loadsegment(ds, __KERNEL_DS);
22498+ loadsegment(es, __KERNEL_DS);
22499+ loadsegment(ss, __KERNEL_DS);
22500+#endif
22501+#ifdef CONFIG_PAX_KERNEXEC
22502+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22503+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22504+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22505+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22506+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22507+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22508+#endif
22509+ pax_close_kernel();
22510+#endif
22511+
22512+ asm volatile("ljmpl *%0" : :
22513 "rm" (real_mode_header->machine_real_restart_asm),
22514 "a" (type));
22515 #else
22516@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22517 * try to force a triple fault and then cycle between hitting the keyboard
22518 * controller and doing that
22519 */
22520-static void native_machine_emergency_restart(void)
22521+static void __noreturn native_machine_emergency_restart(void)
22522 {
22523 int i;
22524 int attempt = 0;
22525@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22526 #endif
22527 }
22528
22529-static void __machine_emergency_restart(int emergency)
22530+static void __noreturn __machine_emergency_restart(int emergency)
22531 {
22532 reboot_emergency = emergency;
22533 machine_ops.emergency_restart();
22534 }
22535
22536-static void native_machine_restart(char *__unused)
22537+static void __noreturn native_machine_restart(char *__unused)
22538 {
22539 pr_notice("machine restart\n");
22540
22541@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22542 __machine_emergency_restart(0);
22543 }
22544
22545-static void native_machine_halt(void)
22546+static void __noreturn native_machine_halt(void)
22547 {
22548 /* Stop other cpus and apics */
22549 machine_shutdown();
22550@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22551 stop_this_cpu(NULL);
22552 }
22553
22554-static void native_machine_power_off(void)
22555+static void __noreturn native_machine_power_off(void)
22556 {
22557 if (pm_power_off) {
22558 if (!reboot_force)
22559@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22560 }
22561 /* A fallback in case there is no PM info available */
22562 tboot_shutdown(TB_SHUTDOWN_HALT);
22563+ unreachable();
22564 }
22565
22566-struct machine_ops machine_ops = {
22567+struct machine_ops machine_ops __read_only = {
22568 .power_off = native_machine_power_off,
22569 .shutdown = native_machine_shutdown,
22570 .emergency_restart = native_machine_emergency_restart,
22571diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22572index f2bb9c9..bed145d7 100644
22573--- a/arch/x86/kernel/relocate_kernel_64.S
22574+++ b/arch/x86/kernel/relocate_kernel_64.S
22575@@ -11,6 +11,7 @@
22576 #include <asm/kexec.h>
22577 #include <asm/processor-flags.h>
22578 #include <asm/pgtable_types.h>
22579+#include <asm/alternative-asm.h>
22580
22581 /*
22582 * Must be relocatable PIC code callable as a C function
22583@@ -167,6 +168,7 @@ identity_mapped:
22584 xorq %r14, %r14
22585 xorq %r15, %r15
22586
22587+ pax_force_retaddr 0, 1
22588 ret
22589
22590 1:
22591diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22592index 56f7fcf..fa229f4 100644
22593--- a/arch/x86/kernel/setup.c
22594+++ b/arch/x86/kernel/setup.c
22595@@ -110,6 +110,7 @@
22596 #include <asm/mce.h>
22597 #include <asm/alternative.h>
22598 #include <asm/prom.h>
22599+#include <asm/boot.h>
22600
22601 /*
22602 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
22603@@ -444,7 +445,7 @@ static void __init parse_setup_data(void)
22604
22605 switch (data->type) {
22606 case SETUP_E820_EXT:
22607- parse_e820_ext(data);
22608+ parse_e820_ext((struct setup_data __force_kernel *)data);
22609 break;
22610 case SETUP_DTB:
22611 add_dtb(pa_data);
22612@@ -771,7 +772,7 @@ static void __init trim_bios_range(void)
22613 * area (640->1Mb) as ram even though it is not.
22614 * take them out.
22615 */
22616- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22617+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22618
22619 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22620 }
22621@@ -779,7 +780,7 @@ static void __init trim_bios_range(void)
22622 /* called before trim_bios_range() to spare extra sanitize */
22623 static void __init e820_add_kernel_range(void)
22624 {
22625- u64 start = __pa_symbol(_text);
22626+ u64 start = __pa_symbol(ktla_ktva(_text));
22627 u64 size = __pa_symbol(_end) - start;
22628
22629 /*
22630@@ -841,8 +842,12 @@ static void __init trim_low_memory_range(void)
22631
22632 void __init setup_arch(char **cmdline_p)
22633 {
22634+#ifdef CONFIG_X86_32
22635+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
22636+#else
22637 memblock_reserve(__pa_symbol(_text),
22638 (unsigned long)__bss_stop - (unsigned long)_text);
22639+#endif
22640
22641 early_reserve_initrd();
22642
22643@@ -934,14 +939,14 @@ void __init setup_arch(char **cmdline_p)
22644
22645 if (!boot_params.hdr.root_flags)
22646 root_mountflags &= ~MS_RDONLY;
22647- init_mm.start_code = (unsigned long) _text;
22648- init_mm.end_code = (unsigned long) _etext;
22649+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22650+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22651 init_mm.end_data = (unsigned long) _edata;
22652 init_mm.brk = _brk_end;
22653
22654- code_resource.start = __pa_symbol(_text);
22655- code_resource.end = __pa_symbol(_etext)-1;
22656- data_resource.start = __pa_symbol(_etext);
22657+ code_resource.start = __pa_symbol(ktla_ktva(_text));
22658+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
22659+ data_resource.start = __pa_symbol(_sdata);
22660 data_resource.end = __pa_symbol(_edata)-1;
22661 bss_resource.start = __pa_symbol(__bss_start);
22662 bss_resource.end = __pa_symbol(__bss_stop)-1;
22663diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22664index 5cdff03..80fa283 100644
22665--- a/arch/x86/kernel/setup_percpu.c
22666+++ b/arch/x86/kernel/setup_percpu.c
22667@@ -21,19 +21,17 @@
22668 #include <asm/cpu.h>
22669 #include <asm/stackprotector.h>
22670
22671-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22672+#ifdef CONFIG_SMP
22673+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22674 EXPORT_PER_CPU_SYMBOL(cpu_number);
22675+#endif
22676
22677-#ifdef CONFIG_X86_64
22678 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22679-#else
22680-#define BOOT_PERCPU_OFFSET 0
22681-#endif
22682
22683 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22684 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22685
22686-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22687+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22688 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22689 };
22690 EXPORT_SYMBOL(__per_cpu_offset);
22691@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22692 {
22693 #ifdef CONFIG_NEED_MULTIPLE_NODES
22694 pg_data_t *last = NULL;
22695- unsigned int cpu;
22696+ int cpu;
22697
22698 for_each_possible_cpu(cpu) {
22699 int node = early_cpu_to_node(cpu);
22700@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22701 {
22702 #ifdef CONFIG_X86_32
22703 struct desc_struct gdt;
22704+ unsigned long base = per_cpu_offset(cpu);
22705
22706- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22707- 0x2 | DESCTYPE_S, 0x8);
22708- gdt.s = 1;
22709+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22710+ 0x83 | DESCTYPE_S, 0xC);
22711 write_gdt_entry(get_cpu_gdt_table(cpu),
22712 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22713 #endif
22714@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22715 /* alrighty, percpu areas up and running */
22716 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22717 for_each_possible_cpu(cpu) {
22718+#ifdef CONFIG_CC_STACKPROTECTOR
22719+#ifdef CONFIG_X86_32
22720+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22721+#endif
22722+#endif
22723 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22724 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22725 per_cpu(cpu_number, cpu) = cpu;
22726@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22727 */
22728 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22729 #endif
22730+#ifdef CONFIG_CC_STACKPROTECTOR
22731+#ifdef CONFIG_X86_32
22732+ if (!cpu)
22733+ per_cpu(stack_canary.canary, cpu) = canary;
22734+#endif
22735+#endif
22736 /*
22737 * Up to this point, the boot CPU has been using .init.data
22738 * area. Reload any changed state for the boot CPU.
22739diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22740index 6956299..f20beae 100644
22741--- a/arch/x86/kernel/signal.c
22742+++ b/arch/x86/kernel/signal.c
22743@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22744 * Align the stack pointer according to the i386 ABI,
22745 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22746 */
22747- sp = ((sp + 4) & -16ul) - 4;
22748+ sp = ((sp - 12) & -16ul) - 4;
22749 #else /* !CONFIG_X86_32 */
22750 sp = round_down(sp, 16) - 8;
22751 #endif
22752@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22753 }
22754
22755 if (current->mm->context.vdso)
22756- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22757+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22758 else
22759- restorer = &frame->retcode;
22760+ restorer = (void __user *)&frame->retcode;
22761 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22762 restorer = ksig->ka.sa.sa_restorer;
22763
22764@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22765 * reasons and because gdb uses it as a signature to notice
22766 * signal handler stack frames.
22767 */
22768- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22769+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22770
22771 if (err)
22772 return -EFAULT;
22773@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22774 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22775
22776 /* Set up to return from userspace. */
22777- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22778+ if (current->mm->context.vdso)
22779+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22780+ else
22781+ restorer = (void __user *)&frame->retcode;
22782 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22783 restorer = ksig->ka.sa.sa_restorer;
22784 put_user_ex(restorer, &frame->pretcode);
22785@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22786 * reasons and because gdb uses it as a signature to notice
22787 * signal handler stack frames.
22788 */
22789- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22790+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22791 } put_user_catch(err);
22792
22793 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
22794@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22795 {
22796 int usig = signr_convert(ksig->sig);
22797 sigset_t *set = sigmask_to_save();
22798- compat_sigset_t *cset = (compat_sigset_t *) set;
22799+ sigset_t sigcopy;
22800+ compat_sigset_t *cset;
22801+
22802+ sigcopy = *set;
22803+
22804+ cset = (compat_sigset_t *) &sigcopy;
22805
22806 /* Set up the stack frame */
22807 if (is_ia32_frame()) {
22808@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22809 } else if (is_x32_frame()) {
22810 return x32_setup_rt_frame(ksig, cset, regs);
22811 } else {
22812- return __setup_rt_frame(ksig->sig, ksig, set, regs);
22813+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
22814 }
22815 }
22816
22817diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22818index 48d2b7d..90d328a 100644
22819--- a/arch/x86/kernel/smp.c
22820+++ b/arch/x86/kernel/smp.c
22821@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22822
22823 __setup("nonmi_ipi", nonmi_ipi_setup);
22824
22825-struct smp_ops smp_ops = {
22826+struct smp_ops smp_ops __read_only = {
22827 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22828 .smp_prepare_cpus = native_smp_prepare_cpus,
22829 .smp_cpus_done = native_smp_cpus_done,
22830diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22831index bfd348e..4816ad8 100644
22832--- a/arch/x86/kernel/smpboot.c
22833+++ b/arch/x86/kernel/smpboot.c
22834@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22835 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22836 (THREAD_SIZE + task_stack_page(idle))) - 1);
22837 per_cpu(current_task, cpu) = idle;
22838+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22839
22840 #ifdef CONFIG_X86_32
22841 /* Stack for startup_32 can be just as for start_secondary onwards */
22842@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22843 #else
22844 clear_tsk_thread_flag(idle, TIF_FORK);
22845 initial_gs = per_cpu_offset(cpu);
22846- per_cpu(kernel_stack, cpu) =
22847- (unsigned long)task_stack_page(idle) -
22848- KERNEL_STACK_OFFSET + THREAD_SIZE;
22849+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22850 #endif
22851+
22852+ pax_open_kernel();
22853 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22854+ pax_close_kernel();
22855+
22856 initial_code = (unsigned long)start_secondary;
22857 stack_start = idle->thread.sp;
22858
22859@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22860 /* the FPU context is blank, nobody can own it */
22861 __cpu_disable_lazy_restore(cpu);
22862
22863+#ifdef CONFIG_PAX_PER_CPU_PGD
22864+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22865+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22866+ KERNEL_PGD_PTRS);
22867+#endif
22868+
22869+ /* the FPU context is blank, nobody can own it */
22870+ __cpu_disable_lazy_restore(cpu);
22871+
22872 err = do_boot_cpu(apicid, cpu, tidle);
22873 if (err) {
22874 pr_debug("do_boot_cpu failed %d\n", err);
22875diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22876index 9b4d51d..5d28b58 100644
22877--- a/arch/x86/kernel/step.c
22878+++ b/arch/x86/kernel/step.c
22879@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22880 struct desc_struct *desc;
22881 unsigned long base;
22882
22883- seg &= ~7UL;
22884+ seg >>= 3;
22885
22886 mutex_lock(&child->mm->context.lock);
22887- if (unlikely((seg >> 3) >= child->mm->context.size))
22888+ if (unlikely(seg >= child->mm->context.size))
22889 addr = -1L; /* bogus selector, access would fault */
22890 else {
22891 desc = child->mm->context.ldt + seg;
22892@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22893 addr += base;
22894 }
22895 mutex_unlock(&child->mm->context.lock);
22896- }
22897+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22898+ addr = ktla_ktva(addr);
22899
22900 return addr;
22901 }
22902@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22903 unsigned char opcode[15];
22904 unsigned long addr = convert_ip_to_linear(child, regs);
22905
22906+ if (addr == -EINVAL)
22907+ return 0;
22908+
22909 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22910 for (i = 0; i < copied; i++) {
22911 switch (opcode[i]) {
22912diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22913new file mode 100644
22914index 0000000..5877189
22915--- /dev/null
22916+++ b/arch/x86/kernel/sys_i386_32.c
22917@@ -0,0 +1,189 @@
22918+/*
22919+ * This file contains various random system calls that
22920+ * have a non-standard calling sequence on the Linux/i386
22921+ * platform.
22922+ */
22923+
22924+#include <linux/errno.h>
22925+#include <linux/sched.h>
22926+#include <linux/mm.h>
22927+#include <linux/fs.h>
22928+#include <linux/smp.h>
22929+#include <linux/sem.h>
22930+#include <linux/msg.h>
22931+#include <linux/shm.h>
22932+#include <linux/stat.h>
22933+#include <linux/syscalls.h>
22934+#include <linux/mman.h>
22935+#include <linux/file.h>
22936+#include <linux/utsname.h>
22937+#include <linux/ipc.h>
22938+#include <linux/elf.h>
22939+
22940+#include <linux/uaccess.h>
22941+#include <linux/unistd.h>
22942+
22943+#include <asm/syscalls.h>
22944+
22945+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22946+{
22947+ unsigned long pax_task_size = TASK_SIZE;
22948+
22949+#ifdef CONFIG_PAX_SEGMEXEC
22950+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22951+ pax_task_size = SEGMEXEC_TASK_SIZE;
22952+#endif
22953+
22954+ if (flags & MAP_FIXED)
22955+ if (len > pax_task_size || addr > pax_task_size - len)
22956+ return -EINVAL;
22957+
22958+ return 0;
22959+}
22960+
22961+/*
22962+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
22963+ */
22964+static unsigned long get_align_mask(void)
22965+{
22966+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
22967+ return 0;
22968+
22969+ if (!(current->flags & PF_RANDOMIZE))
22970+ return 0;
22971+
22972+ return va_align.mask;
22973+}
22974+
22975+unsigned long
22976+arch_get_unmapped_area(struct file *filp, unsigned long addr,
22977+ unsigned long len, unsigned long pgoff, unsigned long flags)
22978+{
22979+ struct mm_struct *mm = current->mm;
22980+ struct vm_area_struct *vma;
22981+ unsigned long pax_task_size = TASK_SIZE;
22982+ struct vm_unmapped_area_info info;
22983+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
22984+
22985+#ifdef CONFIG_PAX_SEGMEXEC
22986+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
22987+ pax_task_size = SEGMEXEC_TASK_SIZE;
22988+#endif
22989+
22990+ pax_task_size -= PAGE_SIZE;
22991+
22992+ if (len > pax_task_size)
22993+ return -ENOMEM;
22994+
22995+ if (flags & MAP_FIXED)
22996+ return addr;
22997+
22998+#ifdef CONFIG_PAX_RANDMMAP
22999+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23000+#endif
23001+
23002+ if (addr) {
23003+ addr = PAGE_ALIGN(addr);
23004+ if (pax_task_size - len >= addr) {
23005+ vma = find_vma(mm, addr);
23006+ if (check_heap_stack_gap(vma, addr, len, offset))
23007+ return addr;
23008+ }
23009+ }
23010+
23011+ info.flags = 0;
23012+ info.length = len;
23013+ info.align_mask = filp ? get_align_mask() : 0;
23014+ info.align_offset = pgoff << PAGE_SHIFT;
23015+ info.threadstack_offset = offset;
23016+
23017+#ifdef CONFIG_PAX_PAGEEXEC
23018+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
23019+ info.low_limit = 0x00110000UL;
23020+ info.high_limit = mm->start_code;
23021+
23022+#ifdef CONFIG_PAX_RANDMMAP
23023+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23024+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
23025+#endif
23026+
23027+ if (info.low_limit < info.high_limit) {
23028+ addr = vm_unmapped_area(&info);
23029+ if (!IS_ERR_VALUE(addr))
23030+ return addr;
23031+ }
23032+ } else
23033+#endif
23034+
23035+ info.low_limit = mm->mmap_base;
23036+ info.high_limit = pax_task_size;
23037+
23038+ return vm_unmapped_area(&info);
23039+}
23040+
23041+unsigned long
23042+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23043+ const unsigned long len, const unsigned long pgoff,
23044+ const unsigned long flags)
23045+{
23046+ struct vm_area_struct *vma;
23047+ struct mm_struct *mm = current->mm;
23048+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
23049+ struct vm_unmapped_area_info info;
23050+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23051+
23052+#ifdef CONFIG_PAX_SEGMEXEC
23053+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23054+ pax_task_size = SEGMEXEC_TASK_SIZE;
23055+#endif
23056+
23057+ pax_task_size -= PAGE_SIZE;
23058+
23059+ /* requested length too big for entire address space */
23060+ if (len > pax_task_size)
23061+ return -ENOMEM;
23062+
23063+ if (flags & MAP_FIXED)
23064+ return addr;
23065+
23066+#ifdef CONFIG_PAX_PAGEEXEC
23067+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23068+ goto bottomup;
23069+#endif
23070+
23071+#ifdef CONFIG_PAX_RANDMMAP
23072+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23073+#endif
23074+
23075+ /* requesting a specific address */
23076+ if (addr) {
23077+ addr = PAGE_ALIGN(addr);
23078+ if (pax_task_size - len >= addr) {
23079+ vma = find_vma(mm, addr);
23080+ if (check_heap_stack_gap(vma, addr, len, offset))
23081+ return addr;
23082+ }
23083+ }
23084+
23085+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
23086+ info.length = len;
23087+ info.low_limit = PAGE_SIZE;
23088+ info.high_limit = mm->mmap_base;
23089+ info.align_mask = filp ? get_align_mask() : 0;
23090+ info.align_offset = pgoff << PAGE_SHIFT;
23091+ info.threadstack_offset = offset;
23092+
23093+ addr = vm_unmapped_area(&info);
23094+ if (!(addr & ~PAGE_MASK))
23095+ return addr;
23096+ VM_BUG_ON(addr != -ENOMEM);
23097+
23098+bottomup:
23099+ /*
23100+ * A failed mmap() very likely causes application failure,
23101+ * so fall back to the bottom-up function here. This scenario
23102+ * can happen with large stack limits and large mmap()
23103+ * allocations.
23104+ */
23105+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23106+}
23107diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23108index dbded5a..ace2781 100644
23109--- a/arch/x86/kernel/sys_x86_64.c
23110+++ b/arch/x86/kernel/sys_x86_64.c
23111@@ -81,8 +81,8 @@ out:
23112 return error;
23113 }
23114
23115-static void find_start_end(unsigned long flags, unsigned long *begin,
23116- unsigned long *end)
23117+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23118+ unsigned long *begin, unsigned long *end)
23119 {
23120 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23121 unsigned long new_begin;
23122@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23123 *begin = new_begin;
23124 }
23125 } else {
23126- *begin = TASK_UNMAPPED_BASE;
23127+ *begin = mm->mmap_base;
23128 *end = TASK_SIZE;
23129 }
23130 }
23131@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23132 struct vm_area_struct *vma;
23133 struct vm_unmapped_area_info info;
23134 unsigned long begin, end;
23135+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23136
23137 if (flags & MAP_FIXED)
23138 return addr;
23139
23140- find_start_end(flags, &begin, &end);
23141+ find_start_end(mm, flags, &begin, &end);
23142
23143 if (len > end)
23144 return -ENOMEM;
23145
23146+#ifdef CONFIG_PAX_RANDMMAP
23147+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23148+#endif
23149+
23150 if (addr) {
23151 addr = PAGE_ALIGN(addr);
23152 vma = find_vma(mm, addr);
23153- if (end - len >= addr &&
23154- (!vma || addr + len <= vma->vm_start))
23155+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23156 return addr;
23157 }
23158
23159@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23160 info.high_limit = end;
23161 info.align_mask = filp ? get_align_mask() : 0;
23162 info.align_offset = pgoff << PAGE_SHIFT;
23163+ info.threadstack_offset = offset;
23164 return vm_unmapped_area(&info);
23165 }
23166
23167@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23168 struct mm_struct *mm = current->mm;
23169 unsigned long addr = addr0;
23170 struct vm_unmapped_area_info info;
23171+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23172
23173 /* requested length too big for entire address space */
23174 if (len > TASK_SIZE)
23175@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23176 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23177 goto bottomup;
23178
23179+#ifdef CONFIG_PAX_RANDMMAP
23180+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23181+#endif
23182+
23183 /* requesting a specific address */
23184 if (addr) {
23185 addr = PAGE_ALIGN(addr);
23186 vma = find_vma(mm, addr);
23187- if (TASK_SIZE - len >= addr &&
23188- (!vma || addr + len <= vma->vm_start))
23189+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23190 return addr;
23191 }
23192
23193@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23194 info.high_limit = mm->mmap_base;
23195 info.align_mask = filp ? get_align_mask() : 0;
23196 info.align_offset = pgoff << PAGE_SHIFT;
23197+ info.threadstack_offset = offset;
23198 addr = vm_unmapped_area(&info);
23199 if (!(addr & ~PAGE_MASK))
23200 return addr;
23201diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23202index f84fe00..f41d9f1 100644
23203--- a/arch/x86/kernel/tboot.c
23204+++ b/arch/x86/kernel/tboot.c
23205@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23206
23207 void tboot_shutdown(u32 shutdown_type)
23208 {
23209- void (*shutdown)(void);
23210+ void (* __noreturn shutdown)(void);
23211
23212 if (!tboot_enabled())
23213 return;
23214@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23215
23216 switch_to_tboot_pt();
23217
23218- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23219+ shutdown = (void *)tboot->shutdown_entry;
23220 shutdown();
23221
23222 /* should not reach here */
23223@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23224 return 0;
23225 }
23226
23227-static atomic_t ap_wfs_count;
23228+static atomic_unchecked_t ap_wfs_count;
23229
23230 static int tboot_wait_for_aps(int num_aps)
23231 {
23232@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23233 {
23234 switch (action) {
23235 case CPU_DYING:
23236- atomic_inc(&ap_wfs_count);
23237+ atomic_inc_unchecked(&ap_wfs_count);
23238 if (num_online_cpus() == 1)
23239- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23240+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23241 return NOTIFY_BAD;
23242 break;
23243 }
23244 return NOTIFY_OK;
23245 }
23246
23247-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23248+static struct notifier_block tboot_cpu_notifier =
23249 {
23250 .notifier_call = tboot_cpu_callback,
23251 };
23252@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23253
23254 tboot_create_trampoline();
23255
23256- atomic_set(&ap_wfs_count, 0);
23257+ atomic_set_unchecked(&ap_wfs_count, 0);
23258 register_hotcpu_notifier(&tboot_cpu_notifier);
23259
23260 acpi_os_set_prepare_sleep(&tboot_sleep);
23261diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23262index 24d3c91..d06b473 100644
23263--- a/arch/x86/kernel/time.c
23264+++ b/arch/x86/kernel/time.c
23265@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23266 {
23267 unsigned long pc = instruction_pointer(regs);
23268
23269- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23270+ if (!user_mode(regs) && in_lock_functions(pc)) {
23271 #ifdef CONFIG_FRAME_POINTER
23272- return *(unsigned long *)(regs->bp + sizeof(long));
23273+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23274 #else
23275 unsigned long *sp =
23276 (unsigned long *)kernel_stack_pointer(regs);
23277@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23278 * or above a saved flags. Eflags has bits 22-31 zero,
23279 * kernel addresses don't.
23280 */
23281+
23282+#ifdef CONFIG_PAX_KERNEXEC
23283+ return ktla_ktva(sp[0]);
23284+#else
23285 if (sp[0] >> 22)
23286 return sp[0];
23287 if (sp[1] >> 22)
23288 return sp[1];
23289 #endif
23290+
23291+#endif
23292 }
23293 return pc;
23294 }
23295diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23296index f7fec09..9991981 100644
23297--- a/arch/x86/kernel/tls.c
23298+++ b/arch/x86/kernel/tls.c
23299@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23300 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23301 return -EINVAL;
23302
23303+#ifdef CONFIG_PAX_SEGMEXEC
23304+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23305+ return -EINVAL;
23306+#endif
23307+
23308 set_tls_desc(p, idx, &info, 1);
23309
23310 return 0;
23311@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23312
23313 if (kbuf)
23314 info = kbuf;
23315- else if (__copy_from_user(infobuf, ubuf, count))
23316+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23317 return -EFAULT;
23318 else
23319 info = infobuf;
23320diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23321index 772e2a8..bad5bf6 100644
23322--- a/arch/x86/kernel/traps.c
23323+++ b/arch/x86/kernel/traps.c
23324@@ -68,12 +68,6 @@
23325 #include <asm/setup.h>
23326
23327 asmlinkage int system_call(void);
23328-
23329-/*
23330- * The IDT has to be page-aligned to simplify the Pentium
23331- * F0 0F bug workaround.
23332- */
23333-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23334 #endif
23335
23336 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23337@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23338 }
23339
23340 static int __kprobes
23341-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23342+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23343 struct pt_regs *regs, long error_code)
23344 {
23345 #ifdef CONFIG_X86_32
23346- if (regs->flags & X86_VM_MASK) {
23347+ if (v8086_mode(regs)) {
23348 /*
23349 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23350 * On nmi (interrupt 2), do_trap should not be called.
23351@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23352 return -1;
23353 }
23354 #endif
23355- if (!user_mode(regs)) {
23356+ if (!user_mode_novm(regs)) {
23357 if (!fixup_exception(regs)) {
23358 tsk->thread.error_code = error_code;
23359 tsk->thread.trap_nr = trapnr;
23360+
23361+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23362+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23363+ str = "PAX: suspicious stack segment fault";
23364+#endif
23365+
23366 die(str, regs, error_code);
23367 }
23368+
23369+#ifdef CONFIG_PAX_REFCOUNT
23370+ if (trapnr == 4)
23371+ pax_report_refcount_overflow(regs);
23372+#endif
23373+
23374 return 0;
23375 }
23376
23377@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23378 }
23379
23380 static void __kprobes
23381-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23382+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23383 long error_code, siginfo_t *info)
23384 {
23385 struct task_struct *tsk = current;
23386@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23387 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23388 printk_ratelimit()) {
23389 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23390- tsk->comm, tsk->pid, str,
23391+ tsk->comm, task_pid_nr(tsk), str,
23392 regs->ip, regs->sp, error_code);
23393 print_vma_addr(" in ", regs->ip);
23394 pr_cont("\n");
23395@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23396 conditional_sti(regs);
23397
23398 #ifdef CONFIG_X86_32
23399- if (regs->flags & X86_VM_MASK) {
23400+ if (v8086_mode(regs)) {
23401 local_irq_enable();
23402 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23403 goto exit;
23404@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23405 #endif
23406
23407 tsk = current;
23408- if (!user_mode(regs)) {
23409+ if (!user_mode_novm(regs)) {
23410 if (fixup_exception(regs))
23411 goto exit;
23412
23413 tsk->thread.error_code = error_code;
23414 tsk->thread.trap_nr = X86_TRAP_GP;
23415 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23416- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23417+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23418+
23419+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23420+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23421+ die("PAX: suspicious general protection fault", regs, error_code);
23422+ else
23423+#endif
23424+
23425 die("general protection fault", regs, error_code);
23426+ }
23427 goto exit;
23428 }
23429
23430+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23431+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23432+ struct mm_struct *mm = tsk->mm;
23433+ unsigned long limit;
23434+
23435+ down_write(&mm->mmap_sem);
23436+ limit = mm->context.user_cs_limit;
23437+ if (limit < TASK_SIZE) {
23438+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23439+ up_write(&mm->mmap_sem);
23440+ return;
23441+ }
23442+ up_write(&mm->mmap_sem);
23443+ }
23444+#endif
23445+
23446 tsk->thread.error_code = error_code;
23447 tsk->thread.trap_nr = X86_TRAP_GP;
23448
23449@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23450 /* It's safe to allow irq's after DR6 has been saved */
23451 preempt_conditional_sti(regs);
23452
23453- if (regs->flags & X86_VM_MASK) {
23454+ if (v8086_mode(regs)) {
23455 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23456 X86_TRAP_DB);
23457 preempt_conditional_cli(regs);
23458@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23459 * We already checked v86 mode above, so we can check for kernel mode
23460 * by just checking the CPL of CS.
23461 */
23462- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23463+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23464 tsk->thread.debugreg6 &= ~DR_STEP;
23465 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23466 regs->flags &= ~X86_EFLAGS_TF;
23467@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23468 return;
23469 conditional_sti(regs);
23470
23471- if (!user_mode_vm(regs))
23472+ if (!user_mode(regs))
23473 {
23474 if (!fixup_exception(regs)) {
23475 task->thread.error_code = error_code;
23476diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23477index 2ed8459..7cf329f 100644
23478--- a/arch/x86/kernel/uprobes.c
23479+++ b/arch/x86/kernel/uprobes.c
23480@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23481 int ret = NOTIFY_DONE;
23482
23483 /* We are only interested in userspace traps */
23484- if (regs && !user_mode_vm(regs))
23485+ if (regs && !user_mode(regs))
23486 return NOTIFY_DONE;
23487
23488 switch (val) {
23489@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
23490
23491 if (ncopied != rasize) {
23492 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
23493- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
23494+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
23495
23496 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
23497 }
23498diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23499index b9242ba..50c5edd 100644
23500--- a/arch/x86/kernel/verify_cpu.S
23501+++ b/arch/x86/kernel/verify_cpu.S
23502@@ -20,6 +20,7 @@
23503 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23504 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23505 * arch/x86/kernel/head_32.S: processor startup
23506+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23507 *
23508 * verify_cpu, returns the status of longmode and SSE in register %eax.
23509 * 0: Success 1: Failure
23510diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23511index e8edcf5..27f9344 100644
23512--- a/arch/x86/kernel/vm86_32.c
23513+++ b/arch/x86/kernel/vm86_32.c
23514@@ -44,6 +44,7 @@
23515 #include <linux/ptrace.h>
23516 #include <linux/audit.h>
23517 #include <linux/stddef.h>
23518+#include <linux/grsecurity.h>
23519
23520 #include <asm/uaccess.h>
23521 #include <asm/io.h>
23522@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23523 do_exit(SIGSEGV);
23524 }
23525
23526- tss = &per_cpu(init_tss, get_cpu());
23527+ tss = init_tss + get_cpu();
23528 current->thread.sp0 = current->thread.saved_sp0;
23529 current->thread.sysenter_cs = __KERNEL_CS;
23530 load_sp0(tss, &current->thread);
23531@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
23532
23533 if (tsk->thread.saved_sp0)
23534 return -EPERM;
23535+
23536+#ifdef CONFIG_GRKERNSEC_VM86
23537+ if (!capable(CAP_SYS_RAWIO)) {
23538+ gr_handle_vm86();
23539+ return -EPERM;
23540+ }
23541+#endif
23542+
23543 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
23544 offsetof(struct kernel_vm86_struct, vm86plus) -
23545 sizeof(info.regs));
23546@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
23547 int tmp;
23548 struct vm86plus_struct __user *v86;
23549
23550+#ifdef CONFIG_GRKERNSEC_VM86
23551+ if (!capable(CAP_SYS_RAWIO)) {
23552+ gr_handle_vm86();
23553+ return -EPERM;
23554+ }
23555+#endif
23556+
23557 tsk = current;
23558 switch (cmd) {
23559 case VM86_REQUEST_IRQ:
23560@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23561 tsk->thread.saved_fs = info->regs32->fs;
23562 tsk->thread.saved_gs = get_user_gs(info->regs32);
23563
23564- tss = &per_cpu(init_tss, get_cpu());
23565+ tss = init_tss + get_cpu();
23566 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23567 if (cpu_has_sep)
23568 tsk->thread.sysenter_cs = 0;
23569@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23570 goto cannot_handle;
23571 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23572 goto cannot_handle;
23573- intr_ptr = (unsigned long __user *) (i << 2);
23574+ intr_ptr = (__force unsigned long __user *) (i << 2);
23575 if (get_user(segoffs, intr_ptr))
23576 goto cannot_handle;
23577 if ((segoffs >> 16) == BIOSSEG)
23578diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23579index 10c4f30..57377c2 100644
23580--- a/arch/x86/kernel/vmlinux.lds.S
23581+++ b/arch/x86/kernel/vmlinux.lds.S
23582@@ -26,6 +26,13 @@
23583 #include <asm/page_types.h>
23584 #include <asm/cache.h>
23585 #include <asm/boot.h>
23586+#include <asm/segment.h>
23587+
23588+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23589+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23590+#else
23591+#define __KERNEL_TEXT_OFFSET 0
23592+#endif
23593
23594 #undef i386 /* in case the preprocessor is a 32bit one */
23595
23596@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23597
23598 PHDRS {
23599 text PT_LOAD FLAGS(5); /* R_E */
23600+#ifdef CONFIG_X86_32
23601+ module PT_LOAD FLAGS(5); /* R_E */
23602+#endif
23603+#ifdef CONFIG_XEN
23604+ rodata PT_LOAD FLAGS(5); /* R_E */
23605+#else
23606+ rodata PT_LOAD FLAGS(4); /* R__ */
23607+#endif
23608 data PT_LOAD FLAGS(6); /* RW_ */
23609-#ifdef CONFIG_X86_64
23610+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23611 #ifdef CONFIG_SMP
23612 percpu PT_LOAD FLAGS(6); /* RW_ */
23613 #endif
23614+ text.init PT_LOAD FLAGS(5); /* R_E */
23615+ text.exit PT_LOAD FLAGS(5); /* R_E */
23616 init PT_LOAD FLAGS(7); /* RWE */
23617-#endif
23618 note PT_NOTE FLAGS(0); /* ___ */
23619 }
23620
23621 SECTIONS
23622 {
23623 #ifdef CONFIG_X86_32
23624- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23625- phys_startup_32 = startup_32 - LOAD_OFFSET;
23626+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23627 #else
23628- . = __START_KERNEL;
23629- phys_startup_64 = startup_64 - LOAD_OFFSET;
23630+ . = __START_KERNEL;
23631 #endif
23632
23633 /* Text and read-only data */
23634- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23635- _text = .;
23636+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23637 /* bootstrapping code */
23638+#ifdef CONFIG_X86_32
23639+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23640+#else
23641+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23642+#endif
23643+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23644+ _text = .;
23645 HEAD_TEXT
23646 . = ALIGN(8);
23647 _stext = .;
23648@@ -104,13 +124,48 @@ SECTIONS
23649 IRQENTRY_TEXT
23650 *(.fixup)
23651 *(.gnu.warning)
23652- /* End of text section */
23653- _etext = .;
23654 } :text = 0x9090
23655
23656- NOTES :text :note
23657+ . += __KERNEL_TEXT_OFFSET;
23658
23659- EXCEPTION_TABLE(16) :text = 0x9090
23660+#ifdef CONFIG_X86_32
23661+ . = ALIGN(PAGE_SIZE);
23662+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23663+
23664+#ifdef CONFIG_PAX_KERNEXEC
23665+ MODULES_EXEC_VADDR = .;
23666+ BYTE(0)
23667+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23668+ . = ALIGN(HPAGE_SIZE) - 1;
23669+ MODULES_EXEC_END = .;
23670+#endif
23671+
23672+ } :module
23673+#endif
23674+
23675+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23676+ /* End of text section */
23677+ BYTE(0)
23678+ _etext = . - __KERNEL_TEXT_OFFSET;
23679+ }
23680+
23681+#ifdef CONFIG_X86_32
23682+ . = ALIGN(PAGE_SIZE);
23683+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23684+ *(.idt)
23685+ . = ALIGN(PAGE_SIZE);
23686+ *(.empty_zero_page)
23687+ *(.initial_pg_fixmap)
23688+ *(.initial_pg_pmd)
23689+ *(.initial_page_table)
23690+ *(.swapper_pg_dir)
23691+ } :rodata
23692+#endif
23693+
23694+ . = ALIGN(PAGE_SIZE);
23695+ NOTES :rodata :note
23696+
23697+ EXCEPTION_TABLE(16) :rodata
23698
23699 #if defined(CONFIG_DEBUG_RODATA)
23700 /* .text should occupy whole number of pages */
23701@@ -122,16 +177,20 @@ SECTIONS
23702
23703 /* Data */
23704 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23705+
23706+#ifdef CONFIG_PAX_KERNEXEC
23707+ . = ALIGN(HPAGE_SIZE);
23708+#else
23709+ . = ALIGN(PAGE_SIZE);
23710+#endif
23711+
23712 /* Start of data section */
23713 _sdata = .;
23714
23715 /* init_task */
23716 INIT_TASK_DATA(THREAD_SIZE)
23717
23718-#ifdef CONFIG_X86_32
23719- /* 32 bit has nosave before _edata */
23720 NOSAVE_DATA
23721-#endif
23722
23723 PAGE_ALIGNED_DATA(PAGE_SIZE)
23724
23725@@ -172,12 +231,19 @@ SECTIONS
23726 #endif /* CONFIG_X86_64 */
23727
23728 /* Init code and data - will be freed after init */
23729- . = ALIGN(PAGE_SIZE);
23730 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23731+ BYTE(0)
23732+
23733+#ifdef CONFIG_PAX_KERNEXEC
23734+ . = ALIGN(HPAGE_SIZE);
23735+#else
23736+ . = ALIGN(PAGE_SIZE);
23737+#endif
23738+
23739 __init_begin = .; /* paired with __init_end */
23740- }
23741+ } :init.begin
23742
23743-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23744+#ifdef CONFIG_SMP
23745 /*
23746 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23747 * output PHDR, so the next output section - .init.text - should
23748@@ -186,12 +252,27 @@ SECTIONS
23749 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23750 #endif
23751
23752- INIT_TEXT_SECTION(PAGE_SIZE)
23753-#ifdef CONFIG_X86_64
23754- :init
23755-#endif
23756+ . = ALIGN(PAGE_SIZE);
23757+ init_begin = .;
23758+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23759+ VMLINUX_SYMBOL(_sinittext) = .;
23760+ INIT_TEXT
23761+ VMLINUX_SYMBOL(_einittext) = .;
23762+ . = ALIGN(PAGE_SIZE);
23763+ } :text.init
23764
23765- INIT_DATA_SECTION(16)
23766+ /*
23767+ * .exit.text is discard at runtime, not link time, to deal with
23768+ * references from .altinstructions and .eh_frame
23769+ */
23770+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23771+ EXIT_TEXT
23772+ . = ALIGN(16);
23773+ } :text.exit
23774+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23775+
23776+ . = ALIGN(PAGE_SIZE);
23777+ INIT_DATA_SECTION(16) :init
23778
23779 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23780 __x86_cpu_dev_start = .;
23781@@ -253,19 +334,12 @@ SECTIONS
23782 }
23783
23784 . = ALIGN(8);
23785- /*
23786- * .exit.text is discard at runtime, not link time, to deal with
23787- * references from .altinstructions and .eh_frame
23788- */
23789- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23790- EXIT_TEXT
23791- }
23792
23793 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23794 EXIT_DATA
23795 }
23796
23797-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23798+#ifndef CONFIG_SMP
23799 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23800 #endif
23801
23802@@ -284,16 +358,10 @@ SECTIONS
23803 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23804 __smp_locks = .;
23805 *(.smp_locks)
23806- . = ALIGN(PAGE_SIZE);
23807 __smp_locks_end = .;
23808+ . = ALIGN(PAGE_SIZE);
23809 }
23810
23811-#ifdef CONFIG_X86_64
23812- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23813- NOSAVE_DATA
23814- }
23815-#endif
23816-
23817 /* BSS */
23818 . = ALIGN(PAGE_SIZE);
23819 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23820@@ -309,6 +377,7 @@ SECTIONS
23821 __brk_base = .;
23822 . += 64 * 1024; /* 64k alignment slop space */
23823 *(.brk_reservation) /* areas brk users have reserved */
23824+ . = ALIGN(HPAGE_SIZE);
23825 __brk_limit = .;
23826 }
23827
23828@@ -335,13 +404,12 @@ SECTIONS
23829 * for the boot processor.
23830 */
23831 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23832-INIT_PER_CPU(gdt_page);
23833 INIT_PER_CPU(irq_stack_union);
23834
23835 /*
23836 * Build-time check on the image size:
23837 */
23838-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23839+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23840 "kernel image bigger than KERNEL_IMAGE_SIZE");
23841
23842 #ifdef CONFIG_SMP
23843diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23844index 9a907a6..f83f921 100644
23845--- a/arch/x86/kernel/vsyscall_64.c
23846+++ b/arch/x86/kernel/vsyscall_64.c
23847@@ -56,15 +56,13 @@
23848 DEFINE_VVAR(int, vgetcpu_mode);
23849 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23850
23851-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23852+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23853
23854 static int __init vsyscall_setup(char *str)
23855 {
23856 if (str) {
23857 if (!strcmp("emulate", str))
23858 vsyscall_mode = EMULATE;
23859- else if (!strcmp("native", str))
23860- vsyscall_mode = NATIVE;
23861 else if (!strcmp("none", str))
23862 vsyscall_mode = NONE;
23863 else
23864@@ -323,8 +321,7 @@ do_ret:
23865 return true;
23866
23867 sigsegv:
23868- force_sig(SIGSEGV, current);
23869- return true;
23870+ do_group_exit(SIGKILL);
23871 }
23872
23873 /*
23874@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23875 extern char __vvar_page;
23876 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23877
23878- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23879- vsyscall_mode == NATIVE
23880- ? PAGE_KERNEL_VSYSCALL
23881- : PAGE_KERNEL_VVAR);
23882+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23883 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23884 (unsigned long)VSYSCALL_START);
23885
23886diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23887index b014d94..6d6ca7b 100644
23888--- a/arch/x86/kernel/x8664_ksyms_64.c
23889+++ b/arch/x86/kernel/x8664_ksyms_64.c
23890@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23891 EXPORT_SYMBOL(copy_user_generic_unrolled);
23892 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23893 EXPORT_SYMBOL(__copy_user_nocache);
23894-EXPORT_SYMBOL(_copy_from_user);
23895-EXPORT_SYMBOL(_copy_to_user);
23896
23897 EXPORT_SYMBOL(copy_page);
23898 EXPORT_SYMBOL(clear_page);
23899diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23900index 45a14db..075bb9b 100644
23901--- a/arch/x86/kernel/x86_init.c
23902+++ b/arch/x86/kernel/x86_init.c
23903@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
23904 },
23905 };
23906
23907-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23908+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23909 .early_percpu_clock_init = x86_init_noop,
23910 .setup_percpu_clockev = setup_secondary_APIC_clock,
23911 };
23912@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23913 static void default_nmi_init(void) { };
23914 static int default_i8042_detect(void) { return 1; };
23915
23916-struct x86_platform_ops x86_platform = {
23917+struct x86_platform_ops x86_platform __read_only = {
23918 .calibrate_tsc = native_calibrate_tsc,
23919 .get_wallclock = mach_get_cmos_time,
23920 .set_wallclock = mach_set_rtc_mmss,
23921@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
23922 };
23923
23924 EXPORT_SYMBOL_GPL(x86_platform);
23925-struct x86_msi_ops x86_msi = {
23926+struct x86_msi_ops x86_msi __read_only = {
23927 .setup_msi_irqs = native_setup_msi_irqs,
23928 .compose_msi_msg = native_compose_msi_msg,
23929 .teardown_msi_irq = native_teardown_msi_irq,
23930@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
23931 .setup_hpet_msi = default_setup_hpet_msi,
23932 };
23933
23934-struct x86_io_apic_ops x86_io_apic_ops = {
23935+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23936 .init = native_io_apic_init_mappings,
23937 .read = native_io_apic_read,
23938 .write = native_io_apic_write,
23939diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23940index ada87a3..afea76d 100644
23941--- a/arch/x86/kernel/xsave.c
23942+++ b/arch/x86/kernel/xsave.c
23943@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23944 {
23945 int err;
23946
23947+ buf = (struct xsave_struct __user *)____m(buf);
23948 if (use_xsave())
23949 err = xsave_user(buf);
23950 else if (use_fxsr())
23951@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23952 */
23953 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23954 {
23955+ buf = (void __user *)____m(buf);
23956 if (use_xsave()) {
23957 if ((unsigned long)buf % 64 || fx_only) {
23958 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23959diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23960index a20ecb5..d0e2194 100644
23961--- a/arch/x86/kvm/cpuid.c
23962+++ b/arch/x86/kvm/cpuid.c
23963@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23964 struct kvm_cpuid2 *cpuid,
23965 struct kvm_cpuid_entry2 __user *entries)
23966 {
23967- int r;
23968+ int r, i;
23969
23970 r = -E2BIG;
23971 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23972 goto out;
23973 r = -EFAULT;
23974- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23975- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23976+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23977 goto out;
23978+ for (i = 0; i < cpuid->nent; ++i) {
23979+ struct kvm_cpuid_entry2 cpuid_entry;
23980+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23981+ goto out;
23982+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
23983+ }
23984 vcpu->arch.cpuid_nent = cpuid->nent;
23985 kvm_apic_set_version(vcpu);
23986 kvm_x86_ops->cpuid_update(vcpu);
23987@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
23988 struct kvm_cpuid2 *cpuid,
23989 struct kvm_cpuid_entry2 __user *entries)
23990 {
23991- int r;
23992+ int r, i;
23993
23994 r = -E2BIG;
23995 if (cpuid->nent < vcpu->arch.cpuid_nent)
23996 goto out;
23997 r = -EFAULT;
23998- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
23999- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24000+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24001 goto out;
24002+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24003+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24004+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24005+ goto out;
24006+ }
24007 return 0;
24008
24009 out:
24010diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24011index 5953dce..f11a7d2 100644
24012--- a/arch/x86/kvm/emulate.c
24013+++ b/arch/x86/kvm/emulate.c
24014@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24015
24016 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24017 do { \
24018+ unsigned long _tmp; \
24019 __asm__ __volatile__ ( \
24020 _PRE_EFLAGS("0", "4", "2") \
24021 _op _suffix " %"_x"3,%1; " \
24022@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24023 /* Raw emulation: instruction has two explicit operands. */
24024 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24025 do { \
24026- unsigned long _tmp; \
24027- \
24028 switch ((ctxt)->dst.bytes) { \
24029 case 2: \
24030 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24031@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24032
24033 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24034 do { \
24035- unsigned long _tmp; \
24036 switch ((ctxt)->dst.bytes) { \
24037 case 1: \
24038 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24039diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24040index 0eee2c8..94a32c3 100644
24041--- a/arch/x86/kvm/lapic.c
24042+++ b/arch/x86/kvm/lapic.c
24043@@ -55,7 +55,7 @@
24044 #define APIC_BUS_CYCLE_NS 1
24045
24046 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24047-#define apic_debug(fmt, arg...)
24048+#define apic_debug(fmt, arg...) do {} while (0)
24049
24050 #define APIC_LVT_NUM 6
24051 /* 14 is the version for Xeon and Pentium 8.4.8*/
24052diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24053index da20860..d19fdf5 100644
24054--- a/arch/x86/kvm/paging_tmpl.h
24055+++ b/arch/x86/kvm/paging_tmpl.h
24056@@ -208,7 +208,7 @@ retry_walk:
24057 if (unlikely(kvm_is_error_hva(host_addr)))
24058 goto error;
24059
24060- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24061+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24062 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24063 goto error;
24064 walker->ptep_user[walker->level - 1] = ptep_user;
24065diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24066index a14a6ea..dc86cf0 100644
24067--- a/arch/x86/kvm/svm.c
24068+++ b/arch/x86/kvm/svm.c
24069@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24070 int cpu = raw_smp_processor_id();
24071
24072 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24073+
24074+ pax_open_kernel();
24075 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24076+ pax_close_kernel();
24077+
24078 load_TR_desc();
24079 }
24080
24081@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24082 #endif
24083 #endif
24084
24085+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24086+ __set_fs(current_thread_info()->addr_limit);
24087+#endif
24088+
24089 reload_tss(vcpu);
24090
24091 local_irq_disable();
24092diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24093index 5402c94..c3bdeee 100644
24094--- a/arch/x86/kvm/vmx.c
24095+++ b/arch/x86/kvm/vmx.c
24096@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
24097 #endif
24098 }
24099
24100-static void vmcs_clear_bits(unsigned long field, u32 mask)
24101+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
24102 {
24103 vmcs_writel(field, vmcs_readl(field) & ~mask);
24104 }
24105
24106-static void vmcs_set_bits(unsigned long field, u32 mask)
24107+static void vmcs_set_bits(unsigned long field, unsigned long mask)
24108 {
24109 vmcs_writel(field, vmcs_readl(field) | mask);
24110 }
24111@@ -1517,7 +1517,11 @@ static void reload_tss(void)
24112 struct desc_struct *descs;
24113
24114 descs = (void *)gdt->address;
24115+
24116+ pax_open_kernel();
24117 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24118+ pax_close_kernel();
24119+
24120 load_TR_desc();
24121 }
24122
24123@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24124 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24125 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24126
24127+#ifdef CONFIG_PAX_PER_CPU_PGD
24128+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24129+#endif
24130+
24131 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24132 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24133 vmx->loaded_vmcs->cpu = cpu;
24134@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
24135 if (!cpu_has_vmx_flexpriority())
24136 flexpriority_enabled = 0;
24137
24138- if (!cpu_has_vmx_tpr_shadow())
24139- kvm_x86_ops->update_cr8_intercept = NULL;
24140+ if (!cpu_has_vmx_tpr_shadow()) {
24141+ pax_open_kernel();
24142+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24143+ pax_close_kernel();
24144+ }
24145
24146 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24147 kvm_disable_largepages();
24148@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
24149 if (!cpu_has_vmx_apicv())
24150 enable_apicv = 0;
24151
24152+ pax_open_kernel();
24153 if (enable_apicv)
24154- kvm_x86_ops->update_cr8_intercept = NULL;
24155+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24156 else {
24157- kvm_x86_ops->hwapic_irr_update = NULL;
24158- kvm_x86_ops->deliver_posted_interrupt = NULL;
24159- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
24160+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
24161+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
24162+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
24163 }
24164+ pax_close_kernel();
24165
24166 if (nested)
24167 nested_vmx_setup_ctls_msrs();
24168@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
24169
24170 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24171 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24172+
24173+#ifndef CONFIG_PAX_PER_CPU_PGD
24174 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24175+#endif
24176
24177 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24178 #ifdef CONFIG_X86_64
24179@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
24180 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24181 vmx->host_idt_base = dt.address;
24182
24183- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24184+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24185
24186 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24187 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24188@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24189 "jmp 2f \n\t"
24190 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24191 "2: "
24192+
24193+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24194+ "ljmp %[cs],$3f\n\t"
24195+ "3: "
24196+#endif
24197+
24198 /* Save guest registers, load host registers, keep flags */
24199 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24200 "pop %0 \n\t"
24201@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24202 #endif
24203 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24204 [wordsize]"i"(sizeof(ulong))
24205+
24206+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24207+ ,[cs]"i"(__KERNEL_CS)
24208+#endif
24209+
24210 : "cc", "memory"
24211 #ifdef CONFIG_X86_64
24212 , "rax", "rbx", "rdi", "rsi"
24213@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24214 if (debugctlmsr)
24215 update_debugctlmsr(debugctlmsr);
24216
24217-#ifndef CONFIG_X86_64
24218+#ifdef CONFIG_X86_32
24219 /*
24220 * The sysexit path does not restore ds/es, so we must set them to
24221 * a reasonable value ourselves.
24222@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24223 * may be executed in interrupt context, which saves and restore segments
24224 * around it, nullifying its effect.
24225 */
24226- loadsegment(ds, __USER_DS);
24227- loadsegment(es, __USER_DS);
24228+ loadsegment(ds, __KERNEL_DS);
24229+ loadsegment(es, __KERNEL_DS);
24230+ loadsegment(ss, __KERNEL_DS);
24231+
24232+#ifdef CONFIG_PAX_KERNEXEC
24233+ loadsegment(fs, __KERNEL_PERCPU);
24234+#endif
24235+
24236+#ifdef CONFIG_PAX_MEMORY_UDEREF
24237+ __set_fs(current_thread_info()->addr_limit);
24238+#endif
24239+
24240 #endif
24241
24242 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24243diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24244index e8ba99c..ee9d7d9 100644
24245--- a/arch/x86/kvm/x86.c
24246+++ b/arch/x86/kvm/x86.c
24247@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24248 {
24249 struct kvm *kvm = vcpu->kvm;
24250 int lm = is_long_mode(vcpu);
24251- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24252- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24253+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24254+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24255 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24256 : kvm->arch.xen_hvm_config.blob_size_32;
24257 u32 page_num = data & ~PAGE_MASK;
24258@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24259 if (n < msr_list.nmsrs)
24260 goto out;
24261 r = -EFAULT;
24262+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24263+ goto out;
24264 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24265 num_msrs_to_save * sizeof(u32)))
24266 goto out;
24267@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24268 };
24269 #endif
24270
24271-int kvm_arch_init(void *opaque)
24272+int kvm_arch_init(const void *opaque)
24273 {
24274 int r;
24275 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24276diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24277index 7114c63..a1018fc 100644
24278--- a/arch/x86/lguest/boot.c
24279+++ b/arch/x86/lguest/boot.c
24280@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24281 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24282 * Launcher to reboot us.
24283 */
24284-static void lguest_restart(char *reason)
24285+static __noreturn void lguest_restart(char *reason)
24286 {
24287 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24288+ BUG();
24289 }
24290
24291 /*G:050
24292diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24293index 00933d5..3a64af9 100644
24294--- a/arch/x86/lib/atomic64_386_32.S
24295+++ b/arch/x86/lib/atomic64_386_32.S
24296@@ -48,6 +48,10 @@ BEGIN(read)
24297 movl (v), %eax
24298 movl 4(v), %edx
24299 RET_ENDP
24300+BEGIN(read_unchecked)
24301+ movl (v), %eax
24302+ movl 4(v), %edx
24303+RET_ENDP
24304 #undef v
24305
24306 #define v %esi
24307@@ -55,6 +59,10 @@ BEGIN(set)
24308 movl %ebx, (v)
24309 movl %ecx, 4(v)
24310 RET_ENDP
24311+BEGIN(set_unchecked)
24312+ movl %ebx, (v)
24313+ movl %ecx, 4(v)
24314+RET_ENDP
24315 #undef v
24316
24317 #define v %esi
24318@@ -70,6 +78,20 @@ RET_ENDP
24319 BEGIN(add)
24320 addl %eax, (v)
24321 adcl %edx, 4(v)
24322+
24323+#ifdef CONFIG_PAX_REFCOUNT
24324+ jno 0f
24325+ subl %eax, (v)
24326+ sbbl %edx, 4(v)
24327+ int $4
24328+0:
24329+ _ASM_EXTABLE(0b, 0b)
24330+#endif
24331+
24332+RET_ENDP
24333+BEGIN(add_unchecked)
24334+ addl %eax, (v)
24335+ adcl %edx, 4(v)
24336 RET_ENDP
24337 #undef v
24338
24339@@ -77,6 +99,24 @@ RET_ENDP
24340 BEGIN(add_return)
24341 addl (v), %eax
24342 adcl 4(v), %edx
24343+
24344+#ifdef CONFIG_PAX_REFCOUNT
24345+ into
24346+1234:
24347+ _ASM_EXTABLE(1234b, 2f)
24348+#endif
24349+
24350+ movl %eax, (v)
24351+ movl %edx, 4(v)
24352+
24353+#ifdef CONFIG_PAX_REFCOUNT
24354+2:
24355+#endif
24356+
24357+RET_ENDP
24358+BEGIN(add_return_unchecked)
24359+ addl (v), %eax
24360+ adcl 4(v), %edx
24361 movl %eax, (v)
24362 movl %edx, 4(v)
24363 RET_ENDP
24364@@ -86,6 +126,20 @@ RET_ENDP
24365 BEGIN(sub)
24366 subl %eax, (v)
24367 sbbl %edx, 4(v)
24368+
24369+#ifdef CONFIG_PAX_REFCOUNT
24370+ jno 0f
24371+ addl %eax, (v)
24372+ adcl %edx, 4(v)
24373+ int $4
24374+0:
24375+ _ASM_EXTABLE(0b, 0b)
24376+#endif
24377+
24378+RET_ENDP
24379+BEGIN(sub_unchecked)
24380+ subl %eax, (v)
24381+ sbbl %edx, 4(v)
24382 RET_ENDP
24383 #undef v
24384
24385@@ -96,6 +150,27 @@ BEGIN(sub_return)
24386 sbbl $0, %edx
24387 addl (v), %eax
24388 adcl 4(v), %edx
24389+
24390+#ifdef CONFIG_PAX_REFCOUNT
24391+ into
24392+1234:
24393+ _ASM_EXTABLE(1234b, 2f)
24394+#endif
24395+
24396+ movl %eax, (v)
24397+ movl %edx, 4(v)
24398+
24399+#ifdef CONFIG_PAX_REFCOUNT
24400+2:
24401+#endif
24402+
24403+RET_ENDP
24404+BEGIN(sub_return_unchecked)
24405+ negl %edx
24406+ negl %eax
24407+ sbbl $0, %edx
24408+ addl (v), %eax
24409+ adcl 4(v), %edx
24410 movl %eax, (v)
24411 movl %edx, 4(v)
24412 RET_ENDP
24413@@ -105,6 +180,20 @@ RET_ENDP
24414 BEGIN(inc)
24415 addl $1, (v)
24416 adcl $0, 4(v)
24417+
24418+#ifdef CONFIG_PAX_REFCOUNT
24419+ jno 0f
24420+ subl $1, (v)
24421+ sbbl $0, 4(v)
24422+ int $4
24423+0:
24424+ _ASM_EXTABLE(0b, 0b)
24425+#endif
24426+
24427+RET_ENDP
24428+BEGIN(inc_unchecked)
24429+ addl $1, (v)
24430+ adcl $0, 4(v)
24431 RET_ENDP
24432 #undef v
24433
24434@@ -114,6 +203,26 @@ BEGIN(inc_return)
24435 movl 4(v), %edx
24436 addl $1, %eax
24437 adcl $0, %edx
24438+
24439+#ifdef CONFIG_PAX_REFCOUNT
24440+ into
24441+1234:
24442+ _ASM_EXTABLE(1234b, 2f)
24443+#endif
24444+
24445+ movl %eax, (v)
24446+ movl %edx, 4(v)
24447+
24448+#ifdef CONFIG_PAX_REFCOUNT
24449+2:
24450+#endif
24451+
24452+RET_ENDP
24453+BEGIN(inc_return_unchecked)
24454+ movl (v), %eax
24455+ movl 4(v), %edx
24456+ addl $1, %eax
24457+ adcl $0, %edx
24458 movl %eax, (v)
24459 movl %edx, 4(v)
24460 RET_ENDP
24461@@ -123,6 +232,20 @@ RET_ENDP
24462 BEGIN(dec)
24463 subl $1, (v)
24464 sbbl $0, 4(v)
24465+
24466+#ifdef CONFIG_PAX_REFCOUNT
24467+ jno 0f
24468+ addl $1, (v)
24469+ adcl $0, 4(v)
24470+ int $4
24471+0:
24472+ _ASM_EXTABLE(0b, 0b)
24473+#endif
24474+
24475+RET_ENDP
24476+BEGIN(dec_unchecked)
24477+ subl $1, (v)
24478+ sbbl $0, 4(v)
24479 RET_ENDP
24480 #undef v
24481
24482@@ -132,6 +255,26 @@ BEGIN(dec_return)
24483 movl 4(v), %edx
24484 subl $1, %eax
24485 sbbl $0, %edx
24486+
24487+#ifdef CONFIG_PAX_REFCOUNT
24488+ into
24489+1234:
24490+ _ASM_EXTABLE(1234b, 2f)
24491+#endif
24492+
24493+ movl %eax, (v)
24494+ movl %edx, 4(v)
24495+
24496+#ifdef CONFIG_PAX_REFCOUNT
24497+2:
24498+#endif
24499+
24500+RET_ENDP
24501+BEGIN(dec_return_unchecked)
24502+ movl (v), %eax
24503+ movl 4(v), %edx
24504+ subl $1, %eax
24505+ sbbl $0, %edx
24506 movl %eax, (v)
24507 movl %edx, 4(v)
24508 RET_ENDP
24509@@ -143,6 +286,13 @@ BEGIN(add_unless)
24510 adcl %edx, %edi
24511 addl (v), %eax
24512 adcl 4(v), %edx
24513+
24514+#ifdef CONFIG_PAX_REFCOUNT
24515+ into
24516+1234:
24517+ _ASM_EXTABLE(1234b, 2f)
24518+#endif
24519+
24520 cmpl %eax, %ecx
24521 je 3f
24522 1:
24523@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24524 1:
24525 addl $1, %eax
24526 adcl $0, %edx
24527+
24528+#ifdef CONFIG_PAX_REFCOUNT
24529+ into
24530+1234:
24531+ _ASM_EXTABLE(1234b, 2f)
24532+#endif
24533+
24534 movl %eax, (v)
24535 movl %edx, 4(v)
24536 movl $1, %eax
24537@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24538 movl 4(v), %edx
24539 subl $1, %eax
24540 sbbl $0, %edx
24541+
24542+#ifdef CONFIG_PAX_REFCOUNT
24543+ into
24544+1234:
24545+ _ASM_EXTABLE(1234b, 1f)
24546+#endif
24547+
24548 js 1f
24549 movl %eax, (v)
24550 movl %edx, 4(v)
24551diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24552index f5cc9eb..51fa319 100644
24553--- a/arch/x86/lib/atomic64_cx8_32.S
24554+++ b/arch/x86/lib/atomic64_cx8_32.S
24555@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24556 CFI_STARTPROC
24557
24558 read64 %ecx
24559+ pax_force_retaddr
24560 ret
24561 CFI_ENDPROC
24562 ENDPROC(atomic64_read_cx8)
24563
24564+ENTRY(atomic64_read_unchecked_cx8)
24565+ CFI_STARTPROC
24566+
24567+ read64 %ecx
24568+ pax_force_retaddr
24569+ ret
24570+ CFI_ENDPROC
24571+ENDPROC(atomic64_read_unchecked_cx8)
24572+
24573 ENTRY(atomic64_set_cx8)
24574 CFI_STARTPROC
24575
24576@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24577 cmpxchg8b (%esi)
24578 jne 1b
24579
24580+ pax_force_retaddr
24581 ret
24582 CFI_ENDPROC
24583 ENDPROC(atomic64_set_cx8)
24584
24585+ENTRY(atomic64_set_unchecked_cx8)
24586+ CFI_STARTPROC
24587+
24588+1:
24589+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24590+ * are atomic on 586 and newer */
24591+ cmpxchg8b (%esi)
24592+ jne 1b
24593+
24594+ pax_force_retaddr
24595+ ret
24596+ CFI_ENDPROC
24597+ENDPROC(atomic64_set_unchecked_cx8)
24598+
24599 ENTRY(atomic64_xchg_cx8)
24600 CFI_STARTPROC
24601
24602@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24603 cmpxchg8b (%esi)
24604 jne 1b
24605
24606+ pax_force_retaddr
24607 ret
24608 CFI_ENDPROC
24609 ENDPROC(atomic64_xchg_cx8)
24610
24611-.macro addsub_return func ins insc
24612-ENTRY(atomic64_\func\()_return_cx8)
24613+.macro addsub_return func ins insc unchecked=""
24614+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24615 CFI_STARTPROC
24616 SAVE ebp
24617 SAVE ebx
24618@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24619 movl %edx, %ecx
24620 \ins\()l %esi, %ebx
24621 \insc\()l %edi, %ecx
24622+
24623+.ifb \unchecked
24624+#ifdef CONFIG_PAX_REFCOUNT
24625+ into
24626+2:
24627+ _ASM_EXTABLE(2b, 3f)
24628+#endif
24629+.endif
24630+
24631 LOCK_PREFIX
24632 cmpxchg8b (%ebp)
24633 jne 1b
24634-
24635-10:
24636 movl %ebx, %eax
24637 movl %ecx, %edx
24638+
24639+.ifb \unchecked
24640+#ifdef CONFIG_PAX_REFCOUNT
24641+3:
24642+#endif
24643+.endif
24644+
24645 RESTORE edi
24646 RESTORE esi
24647 RESTORE ebx
24648 RESTORE ebp
24649+ pax_force_retaddr
24650 ret
24651 CFI_ENDPROC
24652-ENDPROC(atomic64_\func\()_return_cx8)
24653+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24654 .endm
24655
24656 addsub_return add add adc
24657 addsub_return sub sub sbb
24658+addsub_return add add adc _unchecked
24659+addsub_return sub sub sbb _unchecked
24660
24661-.macro incdec_return func ins insc
24662-ENTRY(atomic64_\func\()_return_cx8)
24663+.macro incdec_return func ins insc unchecked=""
24664+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24665 CFI_STARTPROC
24666 SAVE ebx
24667
24668@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24669 movl %edx, %ecx
24670 \ins\()l $1, %ebx
24671 \insc\()l $0, %ecx
24672+
24673+.ifb \unchecked
24674+#ifdef CONFIG_PAX_REFCOUNT
24675+ into
24676+2:
24677+ _ASM_EXTABLE(2b, 3f)
24678+#endif
24679+.endif
24680+
24681 LOCK_PREFIX
24682 cmpxchg8b (%esi)
24683 jne 1b
24684
24685-10:
24686 movl %ebx, %eax
24687 movl %ecx, %edx
24688+
24689+.ifb \unchecked
24690+#ifdef CONFIG_PAX_REFCOUNT
24691+3:
24692+#endif
24693+.endif
24694+
24695 RESTORE ebx
24696+ pax_force_retaddr
24697 ret
24698 CFI_ENDPROC
24699-ENDPROC(atomic64_\func\()_return_cx8)
24700+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24701 .endm
24702
24703 incdec_return inc add adc
24704 incdec_return dec sub sbb
24705+incdec_return inc add adc _unchecked
24706+incdec_return dec sub sbb _unchecked
24707
24708 ENTRY(atomic64_dec_if_positive_cx8)
24709 CFI_STARTPROC
24710@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24711 movl %edx, %ecx
24712 subl $1, %ebx
24713 sbb $0, %ecx
24714+
24715+#ifdef CONFIG_PAX_REFCOUNT
24716+ into
24717+1234:
24718+ _ASM_EXTABLE(1234b, 2f)
24719+#endif
24720+
24721 js 2f
24722 LOCK_PREFIX
24723 cmpxchg8b (%esi)
24724@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24725 movl %ebx, %eax
24726 movl %ecx, %edx
24727 RESTORE ebx
24728+ pax_force_retaddr
24729 ret
24730 CFI_ENDPROC
24731 ENDPROC(atomic64_dec_if_positive_cx8)
24732@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24733 movl %edx, %ecx
24734 addl %ebp, %ebx
24735 adcl %edi, %ecx
24736+
24737+#ifdef CONFIG_PAX_REFCOUNT
24738+ into
24739+1234:
24740+ _ASM_EXTABLE(1234b, 3f)
24741+#endif
24742+
24743 LOCK_PREFIX
24744 cmpxchg8b (%esi)
24745 jne 1b
24746@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24747 CFI_ADJUST_CFA_OFFSET -8
24748 RESTORE ebx
24749 RESTORE ebp
24750+ pax_force_retaddr
24751 ret
24752 4:
24753 cmpl %edx, 4(%esp)
24754@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24755 xorl %ecx, %ecx
24756 addl $1, %ebx
24757 adcl %edx, %ecx
24758+
24759+#ifdef CONFIG_PAX_REFCOUNT
24760+ into
24761+1234:
24762+ _ASM_EXTABLE(1234b, 3f)
24763+#endif
24764+
24765 LOCK_PREFIX
24766 cmpxchg8b (%esi)
24767 jne 1b
24768@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24769 movl $1, %eax
24770 3:
24771 RESTORE ebx
24772+ pax_force_retaddr
24773 ret
24774 CFI_ENDPROC
24775 ENDPROC(atomic64_inc_not_zero_cx8)
24776diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24777index e78b8ee..7e173a8 100644
24778--- a/arch/x86/lib/checksum_32.S
24779+++ b/arch/x86/lib/checksum_32.S
24780@@ -29,7 +29,8 @@
24781 #include <asm/dwarf2.h>
24782 #include <asm/errno.h>
24783 #include <asm/asm.h>
24784-
24785+#include <asm/segment.h>
24786+
24787 /*
24788 * computes a partial checksum, e.g. for TCP/UDP fragments
24789 */
24790@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24791
24792 #define ARGBASE 16
24793 #define FP 12
24794-
24795-ENTRY(csum_partial_copy_generic)
24796+
24797+ENTRY(csum_partial_copy_generic_to_user)
24798 CFI_STARTPROC
24799+
24800+#ifdef CONFIG_PAX_MEMORY_UDEREF
24801+ pushl_cfi %gs
24802+ popl_cfi %es
24803+ jmp csum_partial_copy_generic
24804+#endif
24805+
24806+ENTRY(csum_partial_copy_generic_from_user)
24807+
24808+#ifdef CONFIG_PAX_MEMORY_UDEREF
24809+ pushl_cfi %gs
24810+ popl_cfi %ds
24811+#endif
24812+
24813+ENTRY(csum_partial_copy_generic)
24814 subl $4,%esp
24815 CFI_ADJUST_CFA_OFFSET 4
24816 pushl_cfi %edi
24817@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24818 jmp 4f
24819 SRC(1: movw (%esi), %bx )
24820 addl $2, %esi
24821-DST( movw %bx, (%edi) )
24822+DST( movw %bx, %es:(%edi) )
24823 addl $2, %edi
24824 addw %bx, %ax
24825 adcl $0, %eax
24826@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24827 SRC(1: movl (%esi), %ebx )
24828 SRC( movl 4(%esi), %edx )
24829 adcl %ebx, %eax
24830-DST( movl %ebx, (%edi) )
24831+DST( movl %ebx, %es:(%edi) )
24832 adcl %edx, %eax
24833-DST( movl %edx, 4(%edi) )
24834+DST( movl %edx, %es:4(%edi) )
24835
24836 SRC( movl 8(%esi), %ebx )
24837 SRC( movl 12(%esi), %edx )
24838 adcl %ebx, %eax
24839-DST( movl %ebx, 8(%edi) )
24840+DST( movl %ebx, %es:8(%edi) )
24841 adcl %edx, %eax
24842-DST( movl %edx, 12(%edi) )
24843+DST( movl %edx, %es:12(%edi) )
24844
24845 SRC( movl 16(%esi), %ebx )
24846 SRC( movl 20(%esi), %edx )
24847 adcl %ebx, %eax
24848-DST( movl %ebx, 16(%edi) )
24849+DST( movl %ebx, %es:16(%edi) )
24850 adcl %edx, %eax
24851-DST( movl %edx, 20(%edi) )
24852+DST( movl %edx, %es:20(%edi) )
24853
24854 SRC( movl 24(%esi), %ebx )
24855 SRC( movl 28(%esi), %edx )
24856 adcl %ebx, %eax
24857-DST( movl %ebx, 24(%edi) )
24858+DST( movl %ebx, %es:24(%edi) )
24859 adcl %edx, %eax
24860-DST( movl %edx, 28(%edi) )
24861+DST( movl %edx, %es:28(%edi) )
24862
24863 lea 32(%esi), %esi
24864 lea 32(%edi), %edi
24865@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24866 shrl $2, %edx # This clears CF
24867 SRC(3: movl (%esi), %ebx )
24868 adcl %ebx, %eax
24869-DST( movl %ebx, (%edi) )
24870+DST( movl %ebx, %es:(%edi) )
24871 lea 4(%esi), %esi
24872 lea 4(%edi), %edi
24873 dec %edx
24874@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24875 jb 5f
24876 SRC( movw (%esi), %cx )
24877 leal 2(%esi), %esi
24878-DST( movw %cx, (%edi) )
24879+DST( movw %cx, %es:(%edi) )
24880 leal 2(%edi), %edi
24881 je 6f
24882 shll $16,%ecx
24883 SRC(5: movb (%esi), %cl )
24884-DST( movb %cl, (%edi) )
24885+DST( movb %cl, %es:(%edi) )
24886 6: addl %ecx, %eax
24887 adcl $0, %eax
24888 7:
24889@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24890
24891 6001:
24892 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24893- movl $-EFAULT, (%ebx)
24894+ movl $-EFAULT, %ss:(%ebx)
24895
24896 # zero the complete destination - computing the rest
24897 # is too much work
24898@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24899
24900 6002:
24901 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24902- movl $-EFAULT,(%ebx)
24903+ movl $-EFAULT,%ss:(%ebx)
24904 jmp 5000b
24905
24906 .previous
24907
24908+ pushl_cfi %ss
24909+ popl_cfi %ds
24910+ pushl_cfi %ss
24911+ popl_cfi %es
24912 popl_cfi %ebx
24913 CFI_RESTORE ebx
24914 popl_cfi %esi
24915@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24916 popl_cfi %ecx # equivalent to addl $4,%esp
24917 ret
24918 CFI_ENDPROC
24919-ENDPROC(csum_partial_copy_generic)
24920+ENDPROC(csum_partial_copy_generic_to_user)
24921
24922 #else
24923
24924 /* Version for PentiumII/PPro */
24925
24926 #define ROUND1(x) \
24927+ nop; nop; nop; \
24928 SRC(movl x(%esi), %ebx ) ; \
24929 addl %ebx, %eax ; \
24930- DST(movl %ebx, x(%edi) ) ;
24931+ DST(movl %ebx, %es:x(%edi)) ;
24932
24933 #define ROUND(x) \
24934+ nop; nop; nop; \
24935 SRC(movl x(%esi), %ebx ) ; \
24936 adcl %ebx, %eax ; \
24937- DST(movl %ebx, x(%edi) ) ;
24938+ DST(movl %ebx, %es:x(%edi)) ;
24939
24940 #define ARGBASE 12
24941-
24942-ENTRY(csum_partial_copy_generic)
24943+
24944+ENTRY(csum_partial_copy_generic_to_user)
24945 CFI_STARTPROC
24946+
24947+#ifdef CONFIG_PAX_MEMORY_UDEREF
24948+ pushl_cfi %gs
24949+ popl_cfi %es
24950+ jmp csum_partial_copy_generic
24951+#endif
24952+
24953+ENTRY(csum_partial_copy_generic_from_user)
24954+
24955+#ifdef CONFIG_PAX_MEMORY_UDEREF
24956+ pushl_cfi %gs
24957+ popl_cfi %ds
24958+#endif
24959+
24960+ENTRY(csum_partial_copy_generic)
24961 pushl_cfi %ebx
24962 CFI_REL_OFFSET ebx, 0
24963 pushl_cfi %edi
24964@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24965 subl %ebx, %edi
24966 lea -1(%esi),%edx
24967 andl $-32,%edx
24968- lea 3f(%ebx,%ebx), %ebx
24969+ lea 3f(%ebx,%ebx,2), %ebx
24970 testl %esi, %esi
24971 jmp *%ebx
24972 1: addl $64,%esi
24973@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24974 jb 5f
24975 SRC( movw (%esi), %dx )
24976 leal 2(%esi), %esi
24977-DST( movw %dx, (%edi) )
24978+DST( movw %dx, %es:(%edi) )
24979 leal 2(%edi), %edi
24980 je 6f
24981 shll $16,%edx
24982 5:
24983 SRC( movb (%esi), %dl )
24984-DST( movb %dl, (%edi) )
24985+DST( movb %dl, %es:(%edi) )
24986 6: addl %edx, %eax
24987 adcl $0, %eax
24988 7:
24989 .section .fixup, "ax"
24990 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
24991- movl $-EFAULT, (%ebx)
24992+ movl $-EFAULT, %ss:(%ebx)
24993 # zero the complete destination (computing the rest is too much work)
24994 movl ARGBASE+8(%esp),%edi # dst
24995 movl ARGBASE+12(%esp),%ecx # len
24996@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
24997 rep; stosb
24998 jmp 7b
24999 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25000- movl $-EFAULT, (%ebx)
25001+ movl $-EFAULT, %ss:(%ebx)
25002 jmp 7b
25003 .previous
25004
25005+#ifdef CONFIG_PAX_MEMORY_UDEREF
25006+ pushl_cfi %ss
25007+ popl_cfi %ds
25008+ pushl_cfi %ss
25009+ popl_cfi %es
25010+#endif
25011+
25012 popl_cfi %esi
25013 CFI_RESTORE esi
25014 popl_cfi %edi
25015@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25016 CFI_RESTORE ebx
25017 ret
25018 CFI_ENDPROC
25019-ENDPROC(csum_partial_copy_generic)
25020+ENDPROC(csum_partial_copy_generic_to_user)
25021
25022 #undef ROUND
25023 #undef ROUND1
25024diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25025index f2145cf..cea889d 100644
25026--- a/arch/x86/lib/clear_page_64.S
25027+++ b/arch/x86/lib/clear_page_64.S
25028@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25029 movl $4096/8,%ecx
25030 xorl %eax,%eax
25031 rep stosq
25032+ pax_force_retaddr
25033 ret
25034 CFI_ENDPROC
25035 ENDPROC(clear_page_c)
25036@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25037 movl $4096,%ecx
25038 xorl %eax,%eax
25039 rep stosb
25040+ pax_force_retaddr
25041 ret
25042 CFI_ENDPROC
25043 ENDPROC(clear_page_c_e)
25044@@ -43,6 +45,7 @@ ENTRY(clear_page)
25045 leaq 64(%rdi),%rdi
25046 jnz .Lloop
25047 nop
25048+ pax_force_retaddr
25049 ret
25050 CFI_ENDPROC
25051 .Lclear_page_end:
25052@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25053
25054 #include <asm/cpufeature.h>
25055
25056- .section .altinstr_replacement,"ax"
25057+ .section .altinstr_replacement,"a"
25058 1: .byte 0xeb /* jmp <disp8> */
25059 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25060 2: .byte 0xeb /* jmp <disp8> */
25061diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25062index 1e572c5..2a162cd 100644
25063--- a/arch/x86/lib/cmpxchg16b_emu.S
25064+++ b/arch/x86/lib/cmpxchg16b_emu.S
25065@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25066
25067 popf
25068 mov $1, %al
25069+ pax_force_retaddr
25070 ret
25071
25072 not_same:
25073 popf
25074 xor %al,%al
25075+ pax_force_retaddr
25076 ret
25077
25078 CFI_ENDPROC
25079diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25080index 176cca6..1166c50 100644
25081--- a/arch/x86/lib/copy_page_64.S
25082+++ b/arch/x86/lib/copy_page_64.S
25083@@ -9,6 +9,7 @@ copy_page_rep:
25084 CFI_STARTPROC
25085 movl $4096/8, %ecx
25086 rep movsq
25087+ pax_force_retaddr
25088 ret
25089 CFI_ENDPROC
25090 ENDPROC(copy_page_rep)
25091@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25092
25093 ENTRY(copy_page)
25094 CFI_STARTPROC
25095- subq $2*8, %rsp
25096- CFI_ADJUST_CFA_OFFSET 2*8
25097+ subq $3*8, %rsp
25098+ CFI_ADJUST_CFA_OFFSET 3*8
25099 movq %rbx, (%rsp)
25100 CFI_REL_OFFSET rbx, 0
25101 movq %r12, 1*8(%rsp)
25102 CFI_REL_OFFSET r12, 1*8
25103+ movq %r13, 2*8(%rsp)
25104+ CFI_REL_OFFSET r13, 2*8
25105
25106 movl $(4096/64)-5, %ecx
25107 .p2align 4
25108@@ -36,7 +39,7 @@ ENTRY(copy_page)
25109 movq 0x8*2(%rsi), %rdx
25110 movq 0x8*3(%rsi), %r8
25111 movq 0x8*4(%rsi), %r9
25112- movq 0x8*5(%rsi), %r10
25113+ movq 0x8*5(%rsi), %r13
25114 movq 0x8*6(%rsi), %r11
25115 movq 0x8*7(%rsi), %r12
25116
25117@@ -47,7 +50,7 @@ ENTRY(copy_page)
25118 movq %rdx, 0x8*2(%rdi)
25119 movq %r8, 0x8*3(%rdi)
25120 movq %r9, 0x8*4(%rdi)
25121- movq %r10, 0x8*5(%rdi)
25122+ movq %r13, 0x8*5(%rdi)
25123 movq %r11, 0x8*6(%rdi)
25124 movq %r12, 0x8*7(%rdi)
25125
25126@@ -66,7 +69,7 @@ ENTRY(copy_page)
25127 movq 0x8*2(%rsi), %rdx
25128 movq 0x8*3(%rsi), %r8
25129 movq 0x8*4(%rsi), %r9
25130- movq 0x8*5(%rsi), %r10
25131+ movq 0x8*5(%rsi), %r13
25132 movq 0x8*6(%rsi), %r11
25133 movq 0x8*7(%rsi), %r12
25134
25135@@ -75,7 +78,7 @@ ENTRY(copy_page)
25136 movq %rdx, 0x8*2(%rdi)
25137 movq %r8, 0x8*3(%rdi)
25138 movq %r9, 0x8*4(%rdi)
25139- movq %r10, 0x8*5(%rdi)
25140+ movq %r13, 0x8*5(%rdi)
25141 movq %r11, 0x8*6(%rdi)
25142 movq %r12, 0x8*7(%rdi)
25143
25144@@ -87,8 +90,11 @@ ENTRY(copy_page)
25145 CFI_RESTORE rbx
25146 movq 1*8(%rsp), %r12
25147 CFI_RESTORE r12
25148- addq $2*8, %rsp
25149- CFI_ADJUST_CFA_OFFSET -2*8
25150+ movq 2*8(%rsp), %r13
25151+ CFI_RESTORE r13
25152+ addq $3*8, %rsp
25153+ CFI_ADJUST_CFA_OFFSET -3*8
25154+ pax_force_retaddr
25155 ret
25156 .Lcopy_page_end:
25157 CFI_ENDPROC
25158@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25159
25160 #include <asm/cpufeature.h>
25161
25162- .section .altinstr_replacement,"ax"
25163+ .section .altinstr_replacement,"a"
25164 1: .byte 0xeb /* jmp <disp8> */
25165 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25166 2:
25167diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25168index a30ca15..d25fab6 100644
25169--- a/arch/x86/lib/copy_user_64.S
25170+++ b/arch/x86/lib/copy_user_64.S
25171@@ -18,6 +18,7 @@
25172 #include <asm/alternative-asm.h>
25173 #include <asm/asm.h>
25174 #include <asm/smap.h>
25175+#include <asm/pgtable.h>
25176
25177 /*
25178 * By placing feature2 after feature1 in altinstructions section, we logically
25179@@ -31,7 +32,7 @@
25180 .byte 0xe9 /* 32bit jump */
25181 .long \orig-1f /* by default jump to orig */
25182 1:
25183- .section .altinstr_replacement,"ax"
25184+ .section .altinstr_replacement,"a"
25185 2: .byte 0xe9 /* near jump with 32bit immediate */
25186 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25187 3: .byte 0xe9 /* near jump with 32bit immediate */
25188@@ -70,47 +71,20 @@
25189 #endif
25190 .endm
25191
25192-/* Standard copy_to_user with segment limit checking */
25193-ENTRY(_copy_to_user)
25194- CFI_STARTPROC
25195- GET_THREAD_INFO(%rax)
25196- movq %rdi,%rcx
25197- addq %rdx,%rcx
25198- jc bad_to_user
25199- cmpq TI_addr_limit(%rax),%rcx
25200- ja bad_to_user
25201- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25202- copy_user_generic_unrolled,copy_user_generic_string, \
25203- copy_user_enhanced_fast_string
25204- CFI_ENDPROC
25205-ENDPROC(_copy_to_user)
25206-
25207-/* Standard copy_from_user with segment limit checking */
25208-ENTRY(_copy_from_user)
25209- CFI_STARTPROC
25210- GET_THREAD_INFO(%rax)
25211- movq %rsi,%rcx
25212- addq %rdx,%rcx
25213- jc bad_from_user
25214- cmpq TI_addr_limit(%rax),%rcx
25215- ja bad_from_user
25216- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25217- copy_user_generic_unrolled,copy_user_generic_string, \
25218- copy_user_enhanced_fast_string
25219- CFI_ENDPROC
25220-ENDPROC(_copy_from_user)
25221-
25222 .section .fixup,"ax"
25223 /* must zero dest */
25224 ENTRY(bad_from_user)
25225 bad_from_user:
25226 CFI_STARTPROC
25227+ testl %edx,%edx
25228+ js bad_to_user
25229 movl %edx,%ecx
25230 xorl %eax,%eax
25231 rep
25232 stosb
25233 bad_to_user:
25234 movl %edx,%eax
25235+ pax_force_retaddr
25236 ret
25237 CFI_ENDPROC
25238 ENDPROC(bad_from_user)
25239@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25240 jz 17f
25241 1: movq (%rsi),%r8
25242 2: movq 1*8(%rsi),%r9
25243-3: movq 2*8(%rsi),%r10
25244+3: movq 2*8(%rsi),%rax
25245 4: movq 3*8(%rsi),%r11
25246 5: movq %r8,(%rdi)
25247 6: movq %r9,1*8(%rdi)
25248-7: movq %r10,2*8(%rdi)
25249+7: movq %rax,2*8(%rdi)
25250 8: movq %r11,3*8(%rdi)
25251 9: movq 4*8(%rsi),%r8
25252 10: movq 5*8(%rsi),%r9
25253-11: movq 6*8(%rsi),%r10
25254+11: movq 6*8(%rsi),%rax
25255 12: movq 7*8(%rsi),%r11
25256 13: movq %r8,4*8(%rdi)
25257 14: movq %r9,5*8(%rdi)
25258-15: movq %r10,6*8(%rdi)
25259+15: movq %rax,6*8(%rdi)
25260 16: movq %r11,7*8(%rdi)
25261 leaq 64(%rsi),%rsi
25262 leaq 64(%rdi),%rdi
25263@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25264 jnz 21b
25265 23: xor %eax,%eax
25266 ASM_CLAC
25267+ pax_force_retaddr
25268 ret
25269
25270 .section .fixup,"ax"
25271@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25272 movsb
25273 4: xorl %eax,%eax
25274 ASM_CLAC
25275+ pax_force_retaddr
25276 ret
25277
25278 .section .fixup,"ax"
25279@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25280 movsb
25281 2: xorl %eax,%eax
25282 ASM_CLAC
25283+ pax_force_retaddr
25284 ret
25285
25286 .section .fixup,"ax"
25287diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25288index 6a4f43c..f08b4a2 100644
25289--- a/arch/x86/lib/copy_user_nocache_64.S
25290+++ b/arch/x86/lib/copy_user_nocache_64.S
25291@@ -8,6 +8,7 @@
25292
25293 #include <linux/linkage.h>
25294 #include <asm/dwarf2.h>
25295+#include <asm/alternative-asm.h>
25296
25297 #define FIX_ALIGNMENT 1
25298
25299@@ -16,6 +17,7 @@
25300 #include <asm/thread_info.h>
25301 #include <asm/asm.h>
25302 #include <asm/smap.h>
25303+#include <asm/pgtable.h>
25304
25305 .macro ALIGN_DESTINATION
25306 #ifdef FIX_ALIGNMENT
25307@@ -49,6 +51,15 @@
25308 */
25309 ENTRY(__copy_user_nocache)
25310 CFI_STARTPROC
25311+
25312+#ifdef CONFIG_PAX_MEMORY_UDEREF
25313+ mov pax_user_shadow_base,%rcx
25314+ cmp %rcx,%rsi
25315+ jae 1f
25316+ add %rcx,%rsi
25317+1:
25318+#endif
25319+
25320 ASM_STAC
25321 cmpl $8,%edx
25322 jb 20f /* less then 8 bytes, go to byte copy loop */
25323@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25324 jz 17f
25325 1: movq (%rsi),%r8
25326 2: movq 1*8(%rsi),%r9
25327-3: movq 2*8(%rsi),%r10
25328+3: movq 2*8(%rsi),%rax
25329 4: movq 3*8(%rsi),%r11
25330 5: movnti %r8,(%rdi)
25331 6: movnti %r9,1*8(%rdi)
25332-7: movnti %r10,2*8(%rdi)
25333+7: movnti %rax,2*8(%rdi)
25334 8: movnti %r11,3*8(%rdi)
25335 9: movq 4*8(%rsi),%r8
25336 10: movq 5*8(%rsi),%r9
25337-11: movq 6*8(%rsi),%r10
25338+11: movq 6*8(%rsi),%rax
25339 12: movq 7*8(%rsi),%r11
25340 13: movnti %r8,4*8(%rdi)
25341 14: movnti %r9,5*8(%rdi)
25342-15: movnti %r10,6*8(%rdi)
25343+15: movnti %rax,6*8(%rdi)
25344 16: movnti %r11,7*8(%rdi)
25345 leaq 64(%rsi),%rsi
25346 leaq 64(%rdi),%rdi
25347@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25348 23: xorl %eax,%eax
25349 ASM_CLAC
25350 sfence
25351+ pax_force_retaddr
25352 ret
25353
25354 .section .fixup,"ax"
25355diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25356index 2419d5f..953ee51 100644
25357--- a/arch/x86/lib/csum-copy_64.S
25358+++ b/arch/x86/lib/csum-copy_64.S
25359@@ -9,6 +9,7 @@
25360 #include <asm/dwarf2.h>
25361 #include <asm/errno.h>
25362 #include <asm/asm.h>
25363+#include <asm/alternative-asm.h>
25364
25365 /*
25366 * Checksum copy with exception handling.
25367@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25368 CFI_RESTORE rbp
25369 addq $7*8, %rsp
25370 CFI_ADJUST_CFA_OFFSET -7*8
25371+ pax_force_retaddr 0, 1
25372 ret
25373 CFI_RESTORE_STATE
25374
25375diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25376index 25b7ae8..169fafc 100644
25377--- a/arch/x86/lib/csum-wrappers_64.c
25378+++ b/arch/x86/lib/csum-wrappers_64.c
25379@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25380 len -= 2;
25381 }
25382 }
25383- isum = csum_partial_copy_generic((__force const void *)src,
25384+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25385 dst, len, isum, errp, NULL);
25386 if (unlikely(*errp))
25387 goto out_err;
25388@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25389 }
25390
25391 *errp = 0;
25392- return csum_partial_copy_generic(src, (void __force *)dst,
25393+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25394 len, isum, NULL, errp);
25395 }
25396 EXPORT_SYMBOL(csum_partial_copy_to_user);
25397diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25398index a451235..79fb5cf 100644
25399--- a/arch/x86/lib/getuser.S
25400+++ b/arch/x86/lib/getuser.S
25401@@ -33,17 +33,40 @@
25402 #include <asm/thread_info.h>
25403 #include <asm/asm.h>
25404 #include <asm/smap.h>
25405+#include <asm/segment.h>
25406+#include <asm/pgtable.h>
25407+#include <asm/alternative-asm.h>
25408+
25409+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25410+#define __copyuser_seg gs;
25411+#else
25412+#define __copyuser_seg
25413+#endif
25414
25415 .text
25416 ENTRY(__get_user_1)
25417 CFI_STARTPROC
25418+
25419+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25420 GET_THREAD_INFO(%_ASM_DX)
25421 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25422 jae bad_get_user
25423 ASM_STAC
25424-1: movzbl (%_ASM_AX),%edx
25425+
25426+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25427+ mov pax_user_shadow_base,%_ASM_DX
25428+ cmp %_ASM_DX,%_ASM_AX
25429+ jae 1234f
25430+ add %_ASM_DX,%_ASM_AX
25431+1234:
25432+#endif
25433+
25434+#endif
25435+
25436+1: __copyuser_seg movzbl (%_ASM_AX),%edx
25437 xor %eax,%eax
25438 ASM_CLAC
25439+ pax_force_retaddr
25440 ret
25441 CFI_ENDPROC
25442 ENDPROC(__get_user_1)
25443@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
25444 ENTRY(__get_user_2)
25445 CFI_STARTPROC
25446 add $1,%_ASM_AX
25447+
25448+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25449 jc bad_get_user
25450 GET_THREAD_INFO(%_ASM_DX)
25451 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25452 jae bad_get_user
25453 ASM_STAC
25454-2: movzwl -1(%_ASM_AX),%edx
25455+
25456+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25457+ mov pax_user_shadow_base,%_ASM_DX
25458+ cmp %_ASM_DX,%_ASM_AX
25459+ jae 1234f
25460+ add %_ASM_DX,%_ASM_AX
25461+1234:
25462+#endif
25463+
25464+#endif
25465+
25466+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25467 xor %eax,%eax
25468 ASM_CLAC
25469+ pax_force_retaddr
25470 ret
25471 CFI_ENDPROC
25472 ENDPROC(__get_user_2)
25473@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
25474 ENTRY(__get_user_4)
25475 CFI_STARTPROC
25476 add $3,%_ASM_AX
25477+
25478+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25479 jc bad_get_user
25480 GET_THREAD_INFO(%_ASM_DX)
25481 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25482 jae bad_get_user
25483 ASM_STAC
25484-3: movl -3(%_ASM_AX),%edx
25485+
25486+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25487+ mov pax_user_shadow_base,%_ASM_DX
25488+ cmp %_ASM_DX,%_ASM_AX
25489+ jae 1234f
25490+ add %_ASM_DX,%_ASM_AX
25491+1234:
25492+#endif
25493+
25494+#endif
25495+
25496+3: __copyuser_seg movl -3(%_ASM_AX),%edx
25497 xor %eax,%eax
25498 ASM_CLAC
25499+ pax_force_retaddr
25500 ret
25501 CFI_ENDPROC
25502 ENDPROC(__get_user_4)
25503@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
25504 GET_THREAD_INFO(%_ASM_DX)
25505 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25506 jae bad_get_user
25507+
25508+#ifdef CONFIG_PAX_MEMORY_UDEREF
25509+ mov pax_user_shadow_base,%_ASM_DX
25510+ cmp %_ASM_DX,%_ASM_AX
25511+ jae 1234f
25512+ add %_ASM_DX,%_ASM_AX
25513+1234:
25514+#endif
25515+
25516 ASM_STAC
25517 4: movq -7(%_ASM_AX),%rdx
25518 xor %eax,%eax
25519 ASM_CLAC
25520+ pax_force_retaddr
25521 ret
25522 #else
25523 add $7,%_ASM_AX
25524@@ -102,6 +163,7 @@ ENTRY(__get_user_8)
25525 5: movl -3(%_ASM_AX),%ecx
25526 xor %eax,%eax
25527 ASM_CLAC
25528+ pax_force_retaddr
25529 ret
25530 #endif
25531 CFI_ENDPROC
25532@@ -113,6 +175,7 @@ bad_get_user:
25533 xor %edx,%edx
25534 mov $(-EFAULT),%_ASM_AX
25535 ASM_CLAC
25536+ pax_force_retaddr
25537 ret
25538 CFI_ENDPROC
25539 END(bad_get_user)
25540@@ -124,6 +187,7 @@ bad_get_user_8:
25541 xor %ecx,%ecx
25542 mov $(-EFAULT),%_ASM_AX
25543 ASM_CLAC
25544+ pax_force_retaddr
25545 ret
25546 CFI_ENDPROC
25547 END(bad_get_user_8)
25548diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25549index 54fcffe..7be149e 100644
25550--- a/arch/x86/lib/insn.c
25551+++ b/arch/x86/lib/insn.c
25552@@ -20,8 +20,10 @@
25553
25554 #ifdef __KERNEL__
25555 #include <linux/string.h>
25556+#include <asm/pgtable_types.h>
25557 #else
25558 #include <string.h>
25559+#define ktla_ktva(addr) addr
25560 #endif
25561 #include <asm/inat.h>
25562 #include <asm/insn.h>
25563@@ -53,8 +55,8 @@
25564 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25565 {
25566 memset(insn, 0, sizeof(*insn));
25567- insn->kaddr = kaddr;
25568- insn->next_byte = kaddr;
25569+ insn->kaddr = ktla_ktva(kaddr);
25570+ insn->next_byte = ktla_ktva(kaddr);
25571 insn->x86_64 = x86_64 ? 1 : 0;
25572 insn->opnd_bytes = 4;
25573 if (x86_64)
25574diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25575index 05a95e7..326f2fa 100644
25576--- a/arch/x86/lib/iomap_copy_64.S
25577+++ b/arch/x86/lib/iomap_copy_64.S
25578@@ -17,6 +17,7 @@
25579
25580 #include <linux/linkage.h>
25581 #include <asm/dwarf2.h>
25582+#include <asm/alternative-asm.h>
25583
25584 /*
25585 * override generic version in lib/iomap_copy.c
25586@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25587 CFI_STARTPROC
25588 movl %edx,%ecx
25589 rep movsd
25590+ pax_force_retaddr
25591 ret
25592 CFI_ENDPROC
25593 ENDPROC(__iowrite32_copy)
25594diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25595index 56313a3..aa84a79 100644
25596--- a/arch/x86/lib/memcpy_64.S
25597+++ b/arch/x86/lib/memcpy_64.S
25598@@ -33,6 +33,7 @@
25599 rep movsq
25600 movl %edx, %ecx
25601 rep movsb
25602+ pax_force_retaddr
25603 ret
25604 .Lmemcpy_e:
25605 .previous
25606@@ -49,6 +50,7 @@
25607 movq %rdi, %rax
25608 movq %rdx, %rcx
25609 rep movsb
25610+ pax_force_retaddr
25611 ret
25612 .Lmemcpy_e_e:
25613 .previous
25614@@ -76,13 +78,13 @@ ENTRY(memcpy)
25615 */
25616 movq 0*8(%rsi), %r8
25617 movq 1*8(%rsi), %r9
25618- movq 2*8(%rsi), %r10
25619+ movq 2*8(%rsi), %rcx
25620 movq 3*8(%rsi), %r11
25621 leaq 4*8(%rsi), %rsi
25622
25623 movq %r8, 0*8(%rdi)
25624 movq %r9, 1*8(%rdi)
25625- movq %r10, 2*8(%rdi)
25626+ movq %rcx, 2*8(%rdi)
25627 movq %r11, 3*8(%rdi)
25628 leaq 4*8(%rdi), %rdi
25629 jae .Lcopy_forward_loop
25630@@ -105,12 +107,12 @@ ENTRY(memcpy)
25631 subq $0x20, %rdx
25632 movq -1*8(%rsi), %r8
25633 movq -2*8(%rsi), %r9
25634- movq -3*8(%rsi), %r10
25635+ movq -3*8(%rsi), %rcx
25636 movq -4*8(%rsi), %r11
25637 leaq -4*8(%rsi), %rsi
25638 movq %r8, -1*8(%rdi)
25639 movq %r9, -2*8(%rdi)
25640- movq %r10, -3*8(%rdi)
25641+ movq %rcx, -3*8(%rdi)
25642 movq %r11, -4*8(%rdi)
25643 leaq -4*8(%rdi), %rdi
25644 jae .Lcopy_backward_loop
25645@@ -130,12 +132,13 @@ ENTRY(memcpy)
25646 */
25647 movq 0*8(%rsi), %r8
25648 movq 1*8(%rsi), %r9
25649- movq -2*8(%rsi, %rdx), %r10
25650+ movq -2*8(%rsi, %rdx), %rcx
25651 movq -1*8(%rsi, %rdx), %r11
25652 movq %r8, 0*8(%rdi)
25653 movq %r9, 1*8(%rdi)
25654- movq %r10, -2*8(%rdi, %rdx)
25655+ movq %rcx, -2*8(%rdi, %rdx)
25656 movq %r11, -1*8(%rdi, %rdx)
25657+ pax_force_retaddr
25658 retq
25659 .p2align 4
25660 .Lless_16bytes:
25661@@ -148,6 +151,7 @@ ENTRY(memcpy)
25662 movq -1*8(%rsi, %rdx), %r9
25663 movq %r8, 0*8(%rdi)
25664 movq %r9, -1*8(%rdi, %rdx)
25665+ pax_force_retaddr
25666 retq
25667 .p2align 4
25668 .Lless_8bytes:
25669@@ -161,6 +165,7 @@ ENTRY(memcpy)
25670 movl -4(%rsi, %rdx), %r8d
25671 movl %ecx, (%rdi)
25672 movl %r8d, -4(%rdi, %rdx)
25673+ pax_force_retaddr
25674 retq
25675 .p2align 4
25676 .Lless_3bytes:
25677@@ -179,6 +184,7 @@ ENTRY(memcpy)
25678 movb %cl, (%rdi)
25679
25680 .Lend:
25681+ pax_force_retaddr
25682 retq
25683 CFI_ENDPROC
25684 ENDPROC(memcpy)
25685diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25686index 65268a6..c9518d1 100644
25687--- a/arch/x86/lib/memmove_64.S
25688+++ b/arch/x86/lib/memmove_64.S
25689@@ -61,13 +61,13 @@ ENTRY(memmove)
25690 5:
25691 sub $0x20, %rdx
25692 movq 0*8(%rsi), %r11
25693- movq 1*8(%rsi), %r10
25694+ movq 1*8(%rsi), %rcx
25695 movq 2*8(%rsi), %r9
25696 movq 3*8(%rsi), %r8
25697 leaq 4*8(%rsi), %rsi
25698
25699 movq %r11, 0*8(%rdi)
25700- movq %r10, 1*8(%rdi)
25701+ movq %rcx, 1*8(%rdi)
25702 movq %r9, 2*8(%rdi)
25703 movq %r8, 3*8(%rdi)
25704 leaq 4*8(%rdi), %rdi
25705@@ -81,10 +81,10 @@ ENTRY(memmove)
25706 4:
25707 movq %rdx, %rcx
25708 movq -8(%rsi, %rdx), %r11
25709- lea -8(%rdi, %rdx), %r10
25710+ lea -8(%rdi, %rdx), %r9
25711 shrq $3, %rcx
25712 rep movsq
25713- movq %r11, (%r10)
25714+ movq %r11, (%r9)
25715 jmp 13f
25716 .Lmemmove_end_forward:
25717
25718@@ -95,14 +95,14 @@ ENTRY(memmove)
25719 7:
25720 movq %rdx, %rcx
25721 movq (%rsi), %r11
25722- movq %rdi, %r10
25723+ movq %rdi, %r9
25724 leaq -8(%rsi, %rdx), %rsi
25725 leaq -8(%rdi, %rdx), %rdi
25726 shrq $3, %rcx
25727 std
25728 rep movsq
25729 cld
25730- movq %r11, (%r10)
25731+ movq %r11, (%r9)
25732 jmp 13f
25733
25734 /*
25735@@ -127,13 +127,13 @@ ENTRY(memmove)
25736 8:
25737 subq $0x20, %rdx
25738 movq -1*8(%rsi), %r11
25739- movq -2*8(%rsi), %r10
25740+ movq -2*8(%rsi), %rcx
25741 movq -3*8(%rsi), %r9
25742 movq -4*8(%rsi), %r8
25743 leaq -4*8(%rsi), %rsi
25744
25745 movq %r11, -1*8(%rdi)
25746- movq %r10, -2*8(%rdi)
25747+ movq %rcx, -2*8(%rdi)
25748 movq %r9, -3*8(%rdi)
25749 movq %r8, -4*8(%rdi)
25750 leaq -4*8(%rdi), %rdi
25751@@ -151,11 +151,11 @@ ENTRY(memmove)
25752 * Move data from 16 bytes to 31 bytes.
25753 */
25754 movq 0*8(%rsi), %r11
25755- movq 1*8(%rsi), %r10
25756+ movq 1*8(%rsi), %rcx
25757 movq -2*8(%rsi, %rdx), %r9
25758 movq -1*8(%rsi, %rdx), %r8
25759 movq %r11, 0*8(%rdi)
25760- movq %r10, 1*8(%rdi)
25761+ movq %rcx, 1*8(%rdi)
25762 movq %r9, -2*8(%rdi, %rdx)
25763 movq %r8, -1*8(%rdi, %rdx)
25764 jmp 13f
25765@@ -167,9 +167,9 @@ ENTRY(memmove)
25766 * Move data from 8 bytes to 15 bytes.
25767 */
25768 movq 0*8(%rsi), %r11
25769- movq -1*8(%rsi, %rdx), %r10
25770+ movq -1*8(%rsi, %rdx), %r9
25771 movq %r11, 0*8(%rdi)
25772- movq %r10, -1*8(%rdi, %rdx)
25773+ movq %r9, -1*8(%rdi, %rdx)
25774 jmp 13f
25775 10:
25776 cmpq $4, %rdx
25777@@ -178,9 +178,9 @@ ENTRY(memmove)
25778 * Move data from 4 bytes to 7 bytes.
25779 */
25780 movl (%rsi), %r11d
25781- movl -4(%rsi, %rdx), %r10d
25782+ movl -4(%rsi, %rdx), %r9d
25783 movl %r11d, (%rdi)
25784- movl %r10d, -4(%rdi, %rdx)
25785+ movl %r9d, -4(%rdi, %rdx)
25786 jmp 13f
25787 11:
25788 cmp $2, %rdx
25789@@ -189,9 +189,9 @@ ENTRY(memmove)
25790 * Move data from 2 bytes to 3 bytes.
25791 */
25792 movw (%rsi), %r11w
25793- movw -2(%rsi, %rdx), %r10w
25794+ movw -2(%rsi, %rdx), %r9w
25795 movw %r11w, (%rdi)
25796- movw %r10w, -2(%rdi, %rdx)
25797+ movw %r9w, -2(%rdi, %rdx)
25798 jmp 13f
25799 12:
25800 cmp $1, %rdx
25801@@ -202,6 +202,7 @@ ENTRY(memmove)
25802 movb (%rsi), %r11b
25803 movb %r11b, (%rdi)
25804 13:
25805+ pax_force_retaddr
25806 retq
25807 CFI_ENDPROC
25808
25809@@ -210,6 +211,7 @@ ENTRY(memmove)
25810 /* Forward moving data. */
25811 movq %rdx, %rcx
25812 rep movsb
25813+ pax_force_retaddr
25814 retq
25815 .Lmemmove_end_forward_efs:
25816 .previous
25817diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25818index 2dcb380..963660a 100644
25819--- a/arch/x86/lib/memset_64.S
25820+++ b/arch/x86/lib/memset_64.S
25821@@ -30,6 +30,7 @@
25822 movl %edx,%ecx
25823 rep stosb
25824 movq %r9,%rax
25825+ pax_force_retaddr
25826 ret
25827 .Lmemset_e:
25828 .previous
25829@@ -52,6 +53,7 @@
25830 movq %rdx,%rcx
25831 rep stosb
25832 movq %r9,%rax
25833+ pax_force_retaddr
25834 ret
25835 .Lmemset_e_e:
25836 .previous
25837@@ -59,7 +61,7 @@
25838 ENTRY(memset)
25839 ENTRY(__memset)
25840 CFI_STARTPROC
25841- movq %rdi,%r10
25842+ movq %rdi,%r11
25843
25844 /* expand byte value */
25845 movzbl %sil,%ecx
25846@@ -117,7 +119,8 @@ ENTRY(__memset)
25847 jnz .Lloop_1
25848
25849 .Lende:
25850- movq %r10,%rax
25851+ movq %r11,%rax
25852+ pax_force_retaddr
25853 ret
25854
25855 CFI_RESTORE_STATE
25856diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25857index c9f2d9b..e7fd2c0 100644
25858--- a/arch/x86/lib/mmx_32.c
25859+++ b/arch/x86/lib/mmx_32.c
25860@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25861 {
25862 void *p;
25863 int i;
25864+ unsigned long cr0;
25865
25866 if (unlikely(in_interrupt()))
25867 return __memcpy(to, from, len);
25868@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25869 kernel_fpu_begin();
25870
25871 __asm__ __volatile__ (
25872- "1: prefetch (%0)\n" /* This set is 28 bytes */
25873- " prefetch 64(%0)\n"
25874- " prefetch 128(%0)\n"
25875- " prefetch 192(%0)\n"
25876- " prefetch 256(%0)\n"
25877+ "1: prefetch (%1)\n" /* This set is 28 bytes */
25878+ " prefetch 64(%1)\n"
25879+ " prefetch 128(%1)\n"
25880+ " prefetch 192(%1)\n"
25881+ " prefetch 256(%1)\n"
25882 "2: \n"
25883 ".section .fixup, \"ax\"\n"
25884- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25885+ "3: \n"
25886+
25887+#ifdef CONFIG_PAX_KERNEXEC
25888+ " movl %%cr0, %0\n"
25889+ " movl %0, %%eax\n"
25890+ " andl $0xFFFEFFFF, %%eax\n"
25891+ " movl %%eax, %%cr0\n"
25892+#endif
25893+
25894+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25895+
25896+#ifdef CONFIG_PAX_KERNEXEC
25897+ " movl %0, %%cr0\n"
25898+#endif
25899+
25900 " jmp 2b\n"
25901 ".previous\n"
25902 _ASM_EXTABLE(1b, 3b)
25903- : : "r" (from));
25904+ : "=&r" (cr0) : "r" (from) : "ax");
25905
25906 for ( ; i > 5; i--) {
25907 __asm__ __volatile__ (
25908- "1: prefetch 320(%0)\n"
25909- "2: movq (%0), %%mm0\n"
25910- " movq 8(%0), %%mm1\n"
25911- " movq 16(%0), %%mm2\n"
25912- " movq 24(%0), %%mm3\n"
25913- " movq %%mm0, (%1)\n"
25914- " movq %%mm1, 8(%1)\n"
25915- " movq %%mm2, 16(%1)\n"
25916- " movq %%mm3, 24(%1)\n"
25917- " movq 32(%0), %%mm0\n"
25918- " movq 40(%0), %%mm1\n"
25919- " movq 48(%0), %%mm2\n"
25920- " movq 56(%0), %%mm3\n"
25921- " movq %%mm0, 32(%1)\n"
25922- " movq %%mm1, 40(%1)\n"
25923- " movq %%mm2, 48(%1)\n"
25924- " movq %%mm3, 56(%1)\n"
25925+ "1: prefetch 320(%1)\n"
25926+ "2: movq (%1), %%mm0\n"
25927+ " movq 8(%1), %%mm1\n"
25928+ " movq 16(%1), %%mm2\n"
25929+ " movq 24(%1), %%mm3\n"
25930+ " movq %%mm0, (%2)\n"
25931+ " movq %%mm1, 8(%2)\n"
25932+ " movq %%mm2, 16(%2)\n"
25933+ " movq %%mm3, 24(%2)\n"
25934+ " movq 32(%1), %%mm0\n"
25935+ " movq 40(%1), %%mm1\n"
25936+ " movq 48(%1), %%mm2\n"
25937+ " movq 56(%1), %%mm3\n"
25938+ " movq %%mm0, 32(%2)\n"
25939+ " movq %%mm1, 40(%2)\n"
25940+ " movq %%mm2, 48(%2)\n"
25941+ " movq %%mm3, 56(%2)\n"
25942 ".section .fixup, \"ax\"\n"
25943- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25944+ "3:\n"
25945+
25946+#ifdef CONFIG_PAX_KERNEXEC
25947+ " movl %%cr0, %0\n"
25948+ " movl %0, %%eax\n"
25949+ " andl $0xFFFEFFFF, %%eax\n"
25950+ " movl %%eax, %%cr0\n"
25951+#endif
25952+
25953+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25954+
25955+#ifdef CONFIG_PAX_KERNEXEC
25956+ " movl %0, %%cr0\n"
25957+#endif
25958+
25959 " jmp 2b\n"
25960 ".previous\n"
25961 _ASM_EXTABLE(1b, 3b)
25962- : : "r" (from), "r" (to) : "memory");
25963+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25964
25965 from += 64;
25966 to += 64;
25967@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25968 static void fast_copy_page(void *to, void *from)
25969 {
25970 int i;
25971+ unsigned long cr0;
25972
25973 kernel_fpu_begin();
25974
25975@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25976 * but that is for later. -AV
25977 */
25978 __asm__ __volatile__(
25979- "1: prefetch (%0)\n"
25980- " prefetch 64(%0)\n"
25981- " prefetch 128(%0)\n"
25982- " prefetch 192(%0)\n"
25983- " prefetch 256(%0)\n"
25984+ "1: prefetch (%1)\n"
25985+ " prefetch 64(%1)\n"
25986+ " prefetch 128(%1)\n"
25987+ " prefetch 192(%1)\n"
25988+ " prefetch 256(%1)\n"
25989 "2: \n"
25990 ".section .fixup, \"ax\"\n"
25991- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25992+ "3: \n"
25993+
25994+#ifdef CONFIG_PAX_KERNEXEC
25995+ " movl %%cr0, %0\n"
25996+ " movl %0, %%eax\n"
25997+ " andl $0xFFFEFFFF, %%eax\n"
25998+ " movl %%eax, %%cr0\n"
25999+#endif
26000+
26001+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26002+
26003+#ifdef CONFIG_PAX_KERNEXEC
26004+ " movl %0, %%cr0\n"
26005+#endif
26006+
26007 " jmp 2b\n"
26008 ".previous\n"
26009- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26010+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26011
26012 for (i = 0; i < (4096-320)/64; i++) {
26013 __asm__ __volatile__ (
26014- "1: prefetch 320(%0)\n"
26015- "2: movq (%0), %%mm0\n"
26016- " movntq %%mm0, (%1)\n"
26017- " movq 8(%0), %%mm1\n"
26018- " movntq %%mm1, 8(%1)\n"
26019- " movq 16(%0), %%mm2\n"
26020- " movntq %%mm2, 16(%1)\n"
26021- " movq 24(%0), %%mm3\n"
26022- " movntq %%mm3, 24(%1)\n"
26023- " movq 32(%0), %%mm4\n"
26024- " movntq %%mm4, 32(%1)\n"
26025- " movq 40(%0), %%mm5\n"
26026- " movntq %%mm5, 40(%1)\n"
26027- " movq 48(%0), %%mm6\n"
26028- " movntq %%mm6, 48(%1)\n"
26029- " movq 56(%0), %%mm7\n"
26030- " movntq %%mm7, 56(%1)\n"
26031+ "1: prefetch 320(%1)\n"
26032+ "2: movq (%1), %%mm0\n"
26033+ " movntq %%mm0, (%2)\n"
26034+ " movq 8(%1), %%mm1\n"
26035+ " movntq %%mm1, 8(%2)\n"
26036+ " movq 16(%1), %%mm2\n"
26037+ " movntq %%mm2, 16(%2)\n"
26038+ " movq 24(%1), %%mm3\n"
26039+ " movntq %%mm3, 24(%2)\n"
26040+ " movq 32(%1), %%mm4\n"
26041+ " movntq %%mm4, 32(%2)\n"
26042+ " movq 40(%1), %%mm5\n"
26043+ " movntq %%mm5, 40(%2)\n"
26044+ " movq 48(%1), %%mm6\n"
26045+ " movntq %%mm6, 48(%2)\n"
26046+ " movq 56(%1), %%mm7\n"
26047+ " movntq %%mm7, 56(%2)\n"
26048 ".section .fixup, \"ax\"\n"
26049- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26050+ "3:\n"
26051+
26052+#ifdef CONFIG_PAX_KERNEXEC
26053+ " movl %%cr0, %0\n"
26054+ " movl %0, %%eax\n"
26055+ " andl $0xFFFEFFFF, %%eax\n"
26056+ " movl %%eax, %%cr0\n"
26057+#endif
26058+
26059+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26060+
26061+#ifdef CONFIG_PAX_KERNEXEC
26062+ " movl %0, %%cr0\n"
26063+#endif
26064+
26065 " jmp 2b\n"
26066 ".previous\n"
26067- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26068+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26069
26070 from += 64;
26071 to += 64;
26072@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26073 static void fast_copy_page(void *to, void *from)
26074 {
26075 int i;
26076+ unsigned long cr0;
26077
26078 kernel_fpu_begin();
26079
26080 __asm__ __volatile__ (
26081- "1: prefetch (%0)\n"
26082- " prefetch 64(%0)\n"
26083- " prefetch 128(%0)\n"
26084- " prefetch 192(%0)\n"
26085- " prefetch 256(%0)\n"
26086+ "1: prefetch (%1)\n"
26087+ " prefetch 64(%1)\n"
26088+ " prefetch 128(%1)\n"
26089+ " prefetch 192(%1)\n"
26090+ " prefetch 256(%1)\n"
26091 "2: \n"
26092 ".section .fixup, \"ax\"\n"
26093- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26094+ "3: \n"
26095+
26096+#ifdef CONFIG_PAX_KERNEXEC
26097+ " movl %%cr0, %0\n"
26098+ " movl %0, %%eax\n"
26099+ " andl $0xFFFEFFFF, %%eax\n"
26100+ " movl %%eax, %%cr0\n"
26101+#endif
26102+
26103+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26104+
26105+#ifdef CONFIG_PAX_KERNEXEC
26106+ " movl %0, %%cr0\n"
26107+#endif
26108+
26109 " jmp 2b\n"
26110 ".previous\n"
26111- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26112+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26113
26114 for (i = 0; i < 4096/64; i++) {
26115 __asm__ __volatile__ (
26116- "1: prefetch 320(%0)\n"
26117- "2: movq (%0), %%mm0\n"
26118- " movq 8(%0), %%mm1\n"
26119- " movq 16(%0), %%mm2\n"
26120- " movq 24(%0), %%mm3\n"
26121- " movq %%mm0, (%1)\n"
26122- " movq %%mm1, 8(%1)\n"
26123- " movq %%mm2, 16(%1)\n"
26124- " movq %%mm3, 24(%1)\n"
26125- " movq 32(%0), %%mm0\n"
26126- " movq 40(%0), %%mm1\n"
26127- " movq 48(%0), %%mm2\n"
26128- " movq 56(%0), %%mm3\n"
26129- " movq %%mm0, 32(%1)\n"
26130- " movq %%mm1, 40(%1)\n"
26131- " movq %%mm2, 48(%1)\n"
26132- " movq %%mm3, 56(%1)\n"
26133+ "1: prefetch 320(%1)\n"
26134+ "2: movq (%1), %%mm0\n"
26135+ " movq 8(%1), %%mm1\n"
26136+ " movq 16(%1), %%mm2\n"
26137+ " movq 24(%1), %%mm3\n"
26138+ " movq %%mm0, (%2)\n"
26139+ " movq %%mm1, 8(%2)\n"
26140+ " movq %%mm2, 16(%2)\n"
26141+ " movq %%mm3, 24(%2)\n"
26142+ " movq 32(%1), %%mm0\n"
26143+ " movq 40(%1), %%mm1\n"
26144+ " movq 48(%1), %%mm2\n"
26145+ " movq 56(%1), %%mm3\n"
26146+ " movq %%mm0, 32(%2)\n"
26147+ " movq %%mm1, 40(%2)\n"
26148+ " movq %%mm2, 48(%2)\n"
26149+ " movq %%mm3, 56(%2)\n"
26150 ".section .fixup, \"ax\"\n"
26151- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26152+ "3:\n"
26153+
26154+#ifdef CONFIG_PAX_KERNEXEC
26155+ " movl %%cr0, %0\n"
26156+ " movl %0, %%eax\n"
26157+ " andl $0xFFFEFFFF, %%eax\n"
26158+ " movl %%eax, %%cr0\n"
26159+#endif
26160+
26161+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26162+
26163+#ifdef CONFIG_PAX_KERNEXEC
26164+ " movl %0, %%cr0\n"
26165+#endif
26166+
26167 " jmp 2b\n"
26168 ".previous\n"
26169 _ASM_EXTABLE(1b, 3b)
26170- : : "r" (from), "r" (to) : "memory");
26171+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26172
26173 from += 64;
26174 to += 64;
26175diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26176index f6d13ee..aca5f0b 100644
26177--- a/arch/x86/lib/msr-reg.S
26178+++ b/arch/x86/lib/msr-reg.S
26179@@ -3,6 +3,7 @@
26180 #include <asm/dwarf2.h>
26181 #include <asm/asm.h>
26182 #include <asm/msr.h>
26183+#include <asm/alternative-asm.h>
26184
26185 #ifdef CONFIG_X86_64
26186 /*
26187@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26188 CFI_STARTPROC
26189 pushq_cfi %rbx
26190 pushq_cfi %rbp
26191- movq %rdi, %r10 /* Save pointer */
26192+ movq %rdi, %r9 /* Save pointer */
26193 xorl %r11d, %r11d /* Return value */
26194 movl (%rdi), %eax
26195 movl 4(%rdi), %ecx
26196@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26197 movl 28(%rdi), %edi
26198 CFI_REMEMBER_STATE
26199 1: \op
26200-2: movl %eax, (%r10)
26201+2: movl %eax, (%r9)
26202 movl %r11d, %eax /* Return value */
26203- movl %ecx, 4(%r10)
26204- movl %edx, 8(%r10)
26205- movl %ebx, 12(%r10)
26206- movl %ebp, 20(%r10)
26207- movl %esi, 24(%r10)
26208- movl %edi, 28(%r10)
26209+ movl %ecx, 4(%r9)
26210+ movl %edx, 8(%r9)
26211+ movl %ebx, 12(%r9)
26212+ movl %ebp, 20(%r9)
26213+ movl %esi, 24(%r9)
26214+ movl %edi, 28(%r9)
26215 popq_cfi %rbp
26216 popq_cfi %rbx
26217+ pax_force_retaddr
26218 ret
26219 3:
26220 CFI_RESTORE_STATE
26221diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26222index fc6ba17..d4d989d 100644
26223--- a/arch/x86/lib/putuser.S
26224+++ b/arch/x86/lib/putuser.S
26225@@ -16,7 +16,9 @@
26226 #include <asm/errno.h>
26227 #include <asm/asm.h>
26228 #include <asm/smap.h>
26229-
26230+#include <asm/segment.h>
26231+#include <asm/pgtable.h>
26232+#include <asm/alternative-asm.h>
26233
26234 /*
26235 * __put_user_X
26236@@ -30,57 +32,125 @@
26237 * as they get called from within inline assembly.
26238 */
26239
26240-#define ENTER CFI_STARTPROC ; \
26241- GET_THREAD_INFO(%_ASM_BX)
26242-#define EXIT ASM_CLAC ; \
26243- ret ; \
26244+#define ENTER CFI_STARTPROC
26245+#define EXIT ASM_CLAC ; \
26246+ pax_force_retaddr ; \
26247+ ret ; \
26248 CFI_ENDPROC
26249
26250+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26251+#define _DEST %_ASM_CX,%_ASM_BX
26252+#else
26253+#define _DEST %_ASM_CX
26254+#endif
26255+
26256+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26257+#define __copyuser_seg gs;
26258+#else
26259+#define __copyuser_seg
26260+#endif
26261+
26262 .text
26263 ENTRY(__put_user_1)
26264 ENTER
26265+
26266+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26267+ GET_THREAD_INFO(%_ASM_BX)
26268 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26269 jae bad_put_user
26270 ASM_STAC
26271-1: movb %al,(%_ASM_CX)
26272+
26273+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26274+ mov pax_user_shadow_base,%_ASM_BX
26275+ cmp %_ASM_BX,%_ASM_CX
26276+ jb 1234f
26277+ xor %ebx,%ebx
26278+1234:
26279+#endif
26280+
26281+#endif
26282+
26283+1: __copyuser_seg movb %al,(_DEST)
26284 xor %eax,%eax
26285 EXIT
26286 ENDPROC(__put_user_1)
26287
26288 ENTRY(__put_user_2)
26289 ENTER
26290+
26291+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26292+ GET_THREAD_INFO(%_ASM_BX)
26293 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26294 sub $1,%_ASM_BX
26295 cmp %_ASM_BX,%_ASM_CX
26296 jae bad_put_user
26297 ASM_STAC
26298-2: movw %ax,(%_ASM_CX)
26299+
26300+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26301+ mov pax_user_shadow_base,%_ASM_BX
26302+ cmp %_ASM_BX,%_ASM_CX
26303+ jb 1234f
26304+ xor %ebx,%ebx
26305+1234:
26306+#endif
26307+
26308+#endif
26309+
26310+2: __copyuser_seg movw %ax,(_DEST)
26311 xor %eax,%eax
26312 EXIT
26313 ENDPROC(__put_user_2)
26314
26315 ENTRY(__put_user_4)
26316 ENTER
26317+
26318+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26319+ GET_THREAD_INFO(%_ASM_BX)
26320 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26321 sub $3,%_ASM_BX
26322 cmp %_ASM_BX,%_ASM_CX
26323 jae bad_put_user
26324 ASM_STAC
26325-3: movl %eax,(%_ASM_CX)
26326+
26327+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26328+ mov pax_user_shadow_base,%_ASM_BX
26329+ cmp %_ASM_BX,%_ASM_CX
26330+ jb 1234f
26331+ xor %ebx,%ebx
26332+1234:
26333+#endif
26334+
26335+#endif
26336+
26337+3: __copyuser_seg movl %eax,(_DEST)
26338 xor %eax,%eax
26339 EXIT
26340 ENDPROC(__put_user_4)
26341
26342 ENTRY(__put_user_8)
26343 ENTER
26344+
26345+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26346+ GET_THREAD_INFO(%_ASM_BX)
26347 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26348 sub $7,%_ASM_BX
26349 cmp %_ASM_BX,%_ASM_CX
26350 jae bad_put_user
26351 ASM_STAC
26352-4: mov %_ASM_AX,(%_ASM_CX)
26353+
26354+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26355+ mov pax_user_shadow_base,%_ASM_BX
26356+ cmp %_ASM_BX,%_ASM_CX
26357+ jb 1234f
26358+ xor %ebx,%ebx
26359+1234:
26360+#endif
26361+
26362+#endif
26363+
26364+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26365 #ifdef CONFIG_X86_32
26366-5: movl %edx,4(%_ASM_CX)
26367+5: __copyuser_seg movl %edx,4(_DEST)
26368 #endif
26369 xor %eax,%eax
26370 EXIT
26371diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26372index 1cad221..de671ee 100644
26373--- a/arch/x86/lib/rwlock.S
26374+++ b/arch/x86/lib/rwlock.S
26375@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26376 FRAME
26377 0: LOCK_PREFIX
26378 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26379+
26380+#ifdef CONFIG_PAX_REFCOUNT
26381+ jno 1234f
26382+ LOCK_PREFIX
26383+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26384+ int $4
26385+1234:
26386+ _ASM_EXTABLE(1234b, 1234b)
26387+#endif
26388+
26389 1: rep; nop
26390 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26391 jne 1b
26392 LOCK_PREFIX
26393 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26394+
26395+#ifdef CONFIG_PAX_REFCOUNT
26396+ jno 1234f
26397+ LOCK_PREFIX
26398+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26399+ int $4
26400+1234:
26401+ _ASM_EXTABLE(1234b, 1234b)
26402+#endif
26403+
26404 jnz 0b
26405 ENDFRAME
26406+ pax_force_retaddr
26407 ret
26408 CFI_ENDPROC
26409 END(__write_lock_failed)
26410@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26411 FRAME
26412 0: LOCK_PREFIX
26413 READ_LOCK_SIZE(inc) (%__lock_ptr)
26414+
26415+#ifdef CONFIG_PAX_REFCOUNT
26416+ jno 1234f
26417+ LOCK_PREFIX
26418+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26419+ int $4
26420+1234:
26421+ _ASM_EXTABLE(1234b, 1234b)
26422+#endif
26423+
26424 1: rep; nop
26425 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26426 js 1b
26427 LOCK_PREFIX
26428 READ_LOCK_SIZE(dec) (%__lock_ptr)
26429+
26430+#ifdef CONFIG_PAX_REFCOUNT
26431+ jno 1234f
26432+ LOCK_PREFIX
26433+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26434+ int $4
26435+1234:
26436+ _ASM_EXTABLE(1234b, 1234b)
26437+#endif
26438+
26439 js 0b
26440 ENDFRAME
26441+ pax_force_retaddr
26442 ret
26443 CFI_ENDPROC
26444 END(__read_lock_failed)
26445diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26446index 5dff5f0..cadebf4 100644
26447--- a/arch/x86/lib/rwsem.S
26448+++ b/arch/x86/lib/rwsem.S
26449@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26450 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26451 CFI_RESTORE __ASM_REG(dx)
26452 restore_common_regs
26453+ pax_force_retaddr
26454 ret
26455 CFI_ENDPROC
26456 ENDPROC(call_rwsem_down_read_failed)
26457@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26458 movq %rax,%rdi
26459 call rwsem_down_write_failed
26460 restore_common_regs
26461+ pax_force_retaddr
26462 ret
26463 CFI_ENDPROC
26464 ENDPROC(call_rwsem_down_write_failed)
26465@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26466 movq %rax,%rdi
26467 call rwsem_wake
26468 restore_common_regs
26469-1: ret
26470+1: pax_force_retaddr
26471+ ret
26472 CFI_ENDPROC
26473 ENDPROC(call_rwsem_wake)
26474
26475@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26476 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26477 CFI_RESTORE __ASM_REG(dx)
26478 restore_common_regs
26479+ pax_force_retaddr
26480 ret
26481 CFI_ENDPROC
26482 ENDPROC(call_rwsem_downgrade_wake)
26483diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26484index a63efd6..ccecad8 100644
26485--- a/arch/x86/lib/thunk_64.S
26486+++ b/arch/x86/lib/thunk_64.S
26487@@ -8,6 +8,7 @@
26488 #include <linux/linkage.h>
26489 #include <asm/dwarf2.h>
26490 #include <asm/calling.h>
26491+#include <asm/alternative-asm.h>
26492
26493 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26494 .macro THUNK name, func, put_ret_addr_in_rdi=0
26495@@ -41,5 +42,6 @@
26496 SAVE_ARGS
26497 restore:
26498 RESTORE_ARGS
26499+ pax_force_retaddr
26500 ret
26501 CFI_ENDPROC
26502diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26503index 3eb18ac..6890bc3 100644
26504--- a/arch/x86/lib/usercopy_32.c
26505+++ b/arch/x86/lib/usercopy_32.c
26506@@ -42,11 +42,13 @@ do { \
26507 int __d0; \
26508 might_fault(); \
26509 __asm__ __volatile__( \
26510+ __COPYUSER_SET_ES \
26511 ASM_STAC "\n" \
26512 "0: rep; stosl\n" \
26513 " movl %2,%0\n" \
26514 "1: rep; stosb\n" \
26515 "2: " ASM_CLAC "\n" \
26516+ __COPYUSER_RESTORE_ES \
26517 ".section .fixup,\"ax\"\n" \
26518 "3: lea 0(%2,%0,4),%0\n" \
26519 " jmp 2b\n" \
26520@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26521
26522 #ifdef CONFIG_X86_INTEL_USERCOPY
26523 static unsigned long
26524-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26525+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26526 {
26527 int d0, d1;
26528 __asm__ __volatile__(
26529@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26530 " .align 2,0x90\n"
26531 "3: movl 0(%4), %%eax\n"
26532 "4: movl 4(%4), %%edx\n"
26533- "5: movl %%eax, 0(%3)\n"
26534- "6: movl %%edx, 4(%3)\n"
26535+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26536+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26537 "7: movl 8(%4), %%eax\n"
26538 "8: movl 12(%4),%%edx\n"
26539- "9: movl %%eax, 8(%3)\n"
26540- "10: movl %%edx, 12(%3)\n"
26541+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26542+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26543 "11: movl 16(%4), %%eax\n"
26544 "12: movl 20(%4), %%edx\n"
26545- "13: movl %%eax, 16(%3)\n"
26546- "14: movl %%edx, 20(%3)\n"
26547+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26548+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26549 "15: movl 24(%4), %%eax\n"
26550 "16: movl 28(%4), %%edx\n"
26551- "17: movl %%eax, 24(%3)\n"
26552- "18: movl %%edx, 28(%3)\n"
26553+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26554+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26555 "19: movl 32(%4), %%eax\n"
26556 "20: movl 36(%4), %%edx\n"
26557- "21: movl %%eax, 32(%3)\n"
26558- "22: movl %%edx, 36(%3)\n"
26559+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26560+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26561 "23: movl 40(%4), %%eax\n"
26562 "24: movl 44(%4), %%edx\n"
26563- "25: movl %%eax, 40(%3)\n"
26564- "26: movl %%edx, 44(%3)\n"
26565+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26566+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26567 "27: movl 48(%4), %%eax\n"
26568 "28: movl 52(%4), %%edx\n"
26569- "29: movl %%eax, 48(%3)\n"
26570- "30: movl %%edx, 52(%3)\n"
26571+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26572+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26573 "31: movl 56(%4), %%eax\n"
26574 "32: movl 60(%4), %%edx\n"
26575- "33: movl %%eax, 56(%3)\n"
26576- "34: movl %%edx, 60(%3)\n"
26577+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26578+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26579 " addl $-64, %0\n"
26580 " addl $64, %4\n"
26581 " addl $64, %3\n"
26582@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26583 " shrl $2, %0\n"
26584 " andl $3, %%eax\n"
26585 " cld\n"
26586+ __COPYUSER_SET_ES
26587 "99: rep; movsl\n"
26588 "36: movl %%eax, %0\n"
26589 "37: rep; movsb\n"
26590 "100:\n"
26591+ __COPYUSER_RESTORE_ES
26592 ".section .fixup,\"ax\"\n"
26593 "101: lea 0(%%eax,%0,4),%0\n"
26594 " jmp 100b\n"
26595@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26596 }
26597
26598 static unsigned long
26599+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26600+{
26601+ int d0, d1;
26602+ __asm__ __volatile__(
26603+ " .align 2,0x90\n"
26604+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26605+ " cmpl $67, %0\n"
26606+ " jbe 3f\n"
26607+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26608+ " .align 2,0x90\n"
26609+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26610+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26611+ "5: movl %%eax, 0(%3)\n"
26612+ "6: movl %%edx, 4(%3)\n"
26613+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26614+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26615+ "9: movl %%eax, 8(%3)\n"
26616+ "10: movl %%edx, 12(%3)\n"
26617+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26618+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26619+ "13: movl %%eax, 16(%3)\n"
26620+ "14: movl %%edx, 20(%3)\n"
26621+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26622+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26623+ "17: movl %%eax, 24(%3)\n"
26624+ "18: movl %%edx, 28(%3)\n"
26625+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26626+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26627+ "21: movl %%eax, 32(%3)\n"
26628+ "22: movl %%edx, 36(%3)\n"
26629+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26630+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26631+ "25: movl %%eax, 40(%3)\n"
26632+ "26: movl %%edx, 44(%3)\n"
26633+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26634+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26635+ "29: movl %%eax, 48(%3)\n"
26636+ "30: movl %%edx, 52(%3)\n"
26637+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26638+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26639+ "33: movl %%eax, 56(%3)\n"
26640+ "34: movl %%edx, 60(%3)\n"
26641+ " addl $-64, %0\n"
26642+ " addl $64, %4\n"
26643+ " addl $64, %3\n"
26644+ " cmpl $63, %0\n"
26645+ " ja 1b\n"
26646+ "35: movl %0, %%eax\n"
26647+ " shrl $2, %0\n"
26648+ " andl $3, %%eax\n"
26649+ " cld\n"
26650+ "99: rep; "__copyuser_seg" movsl\n"
26651+ "36: movl %%eax, %0\n"
26652+ "37: rep; "__copyuser_seg" movsb\n"
26653+ "100:\n"
26654+ ".section .fixup,\"ax\"\n"
26655+ "101: lea 0(%%eax,%0,4),%0\n"
26656+ " jmp 100b\n"
26657+ ".previous\n"
26658+ _ASM_EXTABLE(1b,100b)
26659+ _ASM_EXTABLE(2b,100b)
26660+ _ASM_EXTABLE(3b,100b)
26661+ _ASM_EXTABLE(4b,100b)
26662+ _ASM_EXTABLE(5b,100b)
26663+ _ASM_EXTABLE(6b,100b)
26664+ _ASM_EXTABLE(7b,100b)
26665+ _ASM_EXTABLE(8b,100b)
26666+ _ASM_EXTABLE(9b,100b)
26667+ _ASM_EXTABLE(10b,100b)
26668+ _ASM_EXTABLE(11b,100b)
26669+ _ASM_EXTABLE(12b,100b)
26670+ _ASM_EXTABLE(13b,100b)
26671+ _ASM_EXTABLE(14b,100b)
26672+ _ASM_EXTABLE(15b,100b)
26673+ _ASM_EXTABLE(16b,100b)
26674+ _ASM_EXTABLE(17b,100b)
26675+ _ASM_EXTABLE(18b,100b)
26676+ _ASM_EXTABLE(19b,100b)
26677+ _ASM_EXTABLE(20b,100b)
26678+ _ASM_EXTABLE(21b,100b)
26679+ _ASM_EXTABLE(22b,100b)
26680+ _ASM_EXTABLE(23b,100b)
26681+ _ASM_EXTABLE(24b,100b)
26682+ _ASM_EXTABLE(25b,100b)
26683+ _ASM_EXTABLE(26b,100b)
26684+ _ASM_EXTABLE(27b,100b)
26685+ _ASM_EXTABLE(28b,100b)
26686+ _ASM_EXTABLE(29b,100b)
26687+ _ASM_EXTABLE(30b,100b)
26688+ _ASM_EXTABLE(31b,100b)
26689+ _ASM_EXTABLE(32b,100b)
26690+ _ASM_EXTABLE(33b,100b)
26691+ _ASM_EXTABLE(34b,100b)
26692+ _ASM_EXTABLE(35b,100b)
26693+ _ASM_EXTABLE(36b,100b)
26694+ _ASM_EXTABLE(37b,100b)
26695+ _ASM_EXTABLE(99b,101b)
26696+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26697+ : "1"(to), "2"(from), "0"(size)
26698+ : "eax", "edx", "memory");
26699+ return size;
26700+}
26701+
26702+static unsigned long __size_overflow(3)
26703 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26704 {
26705 int d0, d1;
26706 __asm__ __volatile__(
26707 " .align 2,0x90\n"
26708- "0: movl 32(%4), %%eax\n"
26709+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26710 " cmpl $67, %0\n"
26711 " jbe 2f\n"
26712- "1: movl 64(%4), %%eax\n"
26713+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26714 " .align 2,0x90\n"
26715- "2: movl 0(%4), %%eax\n"
26716- "21: movl 4(%4), %%edx\n"
26717+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26718+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26719 " movl %%eax, 0(%3)\n"
26720 " movl %%edx, 4(%3)\n"
26721- "3: movl 8(%4), %%eax\n"
26722- "31: movl 12(%4),%%edx\n"
26723+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26724+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26725 " movl %%eax, 8(%3)\n"
26726 " movl %%edx, 12(%3)\n"
26727- "4: movl 16(%4), %%eax\n"
26728- "41: movl 20(%4), %%edx\n"
26729+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26730+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26731 " movl %%eax, 16(%3)\n"
26732 " movl %%edx, 20(%3)\n"
26733- "10: movl 24(%4), %%eax\n"
26734- "51: movl 28(%4), %%edx\n"
26735+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26736+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26737 " movl %%eax, 24(%3)\n"
26738 " movl %%edx, 28(%3)\n"
26739- "11: movl 32(%4), %%eax\n"
26740- "61: movl 36(%4), %%edx\n"
26741+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26742+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26743 " movl %%eax, 32(%3)\n"
26744 " movl %%edx, 36(%3)\n"
26745- "12: movl 40(%4), %%eax\n"
26746- "71: movl 44(%4), %%edx\n"
26747+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26748+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26749 " movl %%eax, 40(%3)\n"
26750 " movl %%edx, 44(%3)\n"
26751- "13: movl 48(%4), %%eax\n"
26752- "81: movl 52(%4), %%edx\n"
26753+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26754+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26755 " movl %%eax, 48(%3)\n"
26756 " movl %%edx, 52(%3)\n"
26757- "14: movl 56(%4), %%eax\n"
26758- "91: movl 60(%4), %%edx\n"
26759+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26760+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26761 " movl %%eax, 56(%3)\n"
26762 " movl %%edx, 60(%3)\n"
26763 " addl $-64, %0\n"
26764@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26765 " shrl $2, %0\n"
26766 " andl $3, %%eax\n"
26767 " cld\n"
26768- "6: rep; movsl\n"
26769+ "6: rep; "__copyuser_seg" movsl\n"
26770 " movl %%eax,%0\n"
26771- "7: rep; movsb\n"
26772+ "7: rep; "__copyuser_seg" movsb\n"
26773 "8:\n"
26774 ".section .fixup,\"ax\"\n"
26775 "9: lea 0(%%eax,%0,4),%0\n"
26776@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26777 * hyoshiok@miraclelinux.com
26778 */
26779
26780-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26781+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26782 const void __user *from, unsigned long size)
26783 {
26784 int d0, d1;
26785
26786 __asm__ __volatile__(
26787 " .align 2,0x90\n"
26788- "0: movl 32(%4), %%eax\n"
26789+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26790 " cmpl $67, %0\n"
26791 " jbe 2f\n"
26792- "1: movl 64(%4), %%eax\n"
26793+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26794 " .align 2,0x90\n"
26795- "2: movl 0(%4), %%eax\n"
26796- "21: movl 4(%4), %%edx\n"
26797+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26798+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26799 " movnti %%eax, 0(%3)\n"
26800 " movnti %%edx, 4(%3)\n"
26801- "3: movl 8(%4), %%eax\n"
26802- "31: movl 12(%4),%%edx\n"
26803+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26804+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26805 " movnti %%eax, 8(%3)\n"
26806 " movnti %%edx, 12(%3)\n"
26807- "4: movl 16(%4), %%eax\n"
26808- "41: movl 20(%4), %%edx\n"
26809+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26810+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26811 " movnti %%eax, 16(%3)\n"
26812 " movnti %%edx, 20(%3)\n"
26813- "10: movl 24(%4), %%eax\n"
26814- "51: movl 28(%4), %%edx\n"
26815+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26816+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26817 " movnti %%eax, 24(%3)\n"
26818 " movnti %%edx, 28(%3)\n"
26819- "11: movl 32(%4), %%eax\n"
26820- "61: movl 36(%4), %%edx\n"
26821+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26822+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26823 " movnti %%eax, 32(%3)\n"
26824 " movnti %%edx, 36(%3)\n"
26825- "12: movl 40(%4), %%eax\n"
26826- "71: movl 44(%4), %%edx\n"
26827+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26828+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26829 " movnti %%eax, 40(%3)\n"
26830 " movnti %%edx, 44(%3)\n"
26831- "13: movl 48(%4), %%eax\n"
26832- "81: movl 52(%4), %%edx\n"
26833+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26834+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26835 " movnti %%eax, 48(%3)\n"
26836 " movnti %%edx, 52(%3)\n"
26837- "14: movl 56(%4), %%eax\n"
26838- "91: movl 60(%4), %%edx\n"
26839+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26840+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26841 " movnti %%eax, 56(%3)\n"
26842 " movnti %%edx, 60(%3)\n"
26843 " addl $-64, %0\n"
26844@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26845 " shrl $2, %0\n"
26846 " andl $3, %%eax\n"
26847 " cld\n"
26848- "6: rep; movsl\n"
26849+ "6: rep; "__copyuser_seg" movsl\n"
26850 " movl %%eax,%0\n"
26851- "7: rep; movsb\n"
26852+ "7: rep; "__copyuser_seg" movsb\n"
26853 "8:\n"
26854 ".section .fixup,\"ax\"\n"
26855 "9: lea 0(%%eax,%0,4),%0\n"
26856@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26857 return size;
26858 }
26859
26860-static unsigned long __copy_user_intel_nocache(void *to,
26861+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26862 const void __user *from, unsigned long size)
26863 {
26864 int d0, d1;
26865
26866 __asm__ __volatile__(
26867 " .align 2,0x90\n"
26868- "0: movl 32(%4), %%eax\n"
26869+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26870 " cmpl $67, %0\n"
26871 " jbe 2f\n"
26872- "1: movl 64(%4), %%eax\n"
26873+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26874 " .align 2,0x90\n"
26875- "2: movl 0(%4), %%eax\n"
26876- "21: movl 4(%4), %%edx\n"
26877+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26878+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26879 " movnti %%eax, 0(%3)\n"
26880 " movnti %%edx, 4(%3)\n"
26881- "3: movl 8(%4), %%eax\n"
26882- "31: movl 12(%4),%%edx\n"
26883+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26884+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26885 " movnti %%eax, 8(%3)\n"
26886 " movnti %%edx, 12(%3)\n"
26887- "4: movl 16(%4), %%eax\n"
26888- "41: movl 20(%4), %%edx\n"
26889+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26890+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26891 " movnti %%eax, 16(%3)\n"
26892 " movnti %%edx, 20(%3)\n"
26893- "10: movl 24(%4), %%eax\n"
26894- "51: movl 28(%4), %%edx\n"
26895+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26896+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26897 " movnti %%eax, 24(%3)\n"
26898 " movnti %%edx, 28(%3)\n"
26899- "11: movl 32(%4), %%eax\n"
26900- "61: movl 36(%4), %%edx\n"
26901+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26902+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26903 " movnti %%eax, 32(%3)\n"
26904 " movnti %%edx, 36(%3)\n"
26905- "12: movl 40(%4), %%eax\n"
26906- "71: movl 44(%4), %%edx\n"
26907+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26908+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26909 " movnti %%eax, 40(%3)\n"
26910 " movnti %%edx, 44(%3)\n"
26911- "13: movl 48(%4), %%eax\n"
26912- "81: movl 52(%4), %%edx\n"
26913+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26914+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26915 " movnti %%eax, 48(%3)\n"
26916 " movnti %%edx, 52(%3)\n"
26917- "14: movl 56(%4), %%eax\n"
26918- "91: movl 60(%4), %%edx\n"
26919+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26920+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26921 " movnti %%eax, 56(%3)\n"
26922 " movnti %%edx, 60(%3)\n"
26923 " addl $-64, %0\n"
26924@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26925 " shrl $2, %0\n"
26926 " andl $3, %%eax\n"
26927 " cld\n"
26928- "6: rep; movsl\n"
26929+ "6: rep; "__copyuser_seg" movsl\n"
26930 " movl %%eax,%0\n"
26931- "7: rep; movsb\n"
26932+ "7: rep; "__copyuser_seg" movsb\n"
26933 "8:\n"
26934 ".section .fixup,\"ax\"\n"
26935 "9: lea 0(%%eax,%0,4),%0\n"
26936@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26937 */
26938 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26939 unsigned long size);
26940-unsigned long __copy_user_intel(void __user *to, const void *from,
26941+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26942+ unsigned long size);
26943+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26944 unsigned long size);
26945 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26946 const void __user *from, unsigned long size);
26947 #endif /* CONFIG_X86_INTEL_USERCOPY */
26948
26949 /* Generic arbitrary sized copy. */
26950-#define __copy_user(to, from, size) \
26951+#define __copy_user(to, from, size, prefix, set, restore) \
26952 do { \
26953 int __d0, __d1, __d2; \
26954 __asm__ __volatile__( \
26955+ set \
26956 " cmp $7,%0\n" \
26957 " jbe 1f\n" \
26958 " movl %1,%0\n" \
26959 " negl %0\n" \
26960 " andl $7,%0\n" \
26961 " subl %0,%3\n" \
26962- "4: rep; movsb\n" \
26963+ "4: rep; "prefix"movsb\n" \
26964 " movl %3,%0\n" \
26965 " shrl $2,%0\n" \
26966 " andl $3,%3\n" \
26967 " .align 2,0x90\n" \
26968- "0: rep; movsl\n" \
26969+ "0: rep; "prefix"movsl\n" \
26970 " movl %3,%0\n" \
26971- "1: rep; movsb\n" \
26972+ "1: rep; "prefix"movsb\n" \
26973 "2:\n" \
26974+ restore \
26975 ".section .fixup,\"ax\"\n" \
26976 "5: addl %3,%0\n" \
26977 " jmp 2b\n" \
26978@@ -538,14 +650,14 @@ do { \
26979 " negl %0\n" \
26980 " andl $7,%0\n" \
26981 " subl %0,%3\n" \
26982- "4: rep; movsb\n" \
26983+ "4: rep; "__copyuser_seg"movsb\n" \
26984 " movl %3,%0\n" \
26985 " shrl $2,%0\n" \
26986 " andl $3,%3\n" \
26987 " .align 2,0x90\n" \
26988- "0: rep; movsl\n" \
26989+ "0: rep; "__copyuser_seg"movsl\n" \
26990 " movl %3,%0\n" \
26991- "1: rep; movsb\n" \
26992+ "1: rep; "__copyuser_seg"movsb\n" \
26993 "2:\n" \
26994 ".section .fixup,\"ax\"\n" \
26995 "5: addl %3,%0\n" \
26996@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
26997 {
26998 stac();
26999 if (movsl_is_ok(to, from, n))
27000- __copy_user(to, from, n);
27001+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27002 else
27003- n = __copy_user_intel(to, from, n);
27004+ n = __generic_copy_to_user_intel(to, from, n);
27005 clac();
27006 return n;
27007 }
27008@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27009 {
27010 stac();
27011 if (movsl_is_ok(to, from, n))
27012- __copy_user(to, from, n);
27013+ __copy_user(to, from, n, __copyuser_seg, "", "");
27014 else
27015- n = __copy_user_intel((void __user *)to,
27016- (const void *)from, n);
27017+ n = __generic_copy_from_user_intel(to, from, n);
27018 clac();
27019 return n;
27020 }
27021@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27022 if (n > 64 && cpu_has_xmm2)
27023 n = __copy_user_intel_nocache(to, from, n);
27024 else
27025- __copy_user(to, from, n);
27026+ __copy_user(to, from, n, __copyuser_seg, "", "");
27027 #else
27028- __copy_user(to, from, n);
27029+ __copy_user(to, from, n, __copyuser_seg, "", "");
27030 #endif
27031 clac();
27032 return n;
27033 }
27034 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27035
27036-/**
27037- * copy_to_user: - Copy a block of data into user space.
27038- * @to: Destination address, in user space.
27039- * @from: Source address, in kernel space.
27040- * @n: Number of bytes to copy.
27041- *
27042- * Context: User context only. This function may sleep.
27043- *
27044- * Copy data from kernel space to user space.
27045- *
27046- * Returns number of bytes that could not be copied.
27047- * On success, this will be zero.
27048- */
27049-unsigned long
27050-copy_to_user(void __user *to, const void *from, unsigned long n)
27051+#ifdef CONFIG_PAX_MEMORY_UDEREF
27052+void __set_fs(mm_segment_t x)
27053 {
27054- if (access_ok(VERIFY_WRITE, to, n))
27055- n = __copy_to_user(to, from, n);
27056- return n;
27057+ switch (x.seg) {
27058+ case 0:
27059+ loadsegment(gs, 0);
27060+ break;
27061+ case TASK_SIZE_MAX:
27062+ loadsegment(gs, __USER_DS);
27063+ break;
27064+ case -1UL:
27065+ loadsegment(gs, __KERNEL_DS);
27066+ break;
27067+ default:
27068+ BUG();
27069+ }
27070 }
27071-EXPORT_SYMBOL(copy_to_user);
27072+EXPORT_SYMBOL(__set_fs);
27073
27074-/**
27075- * copy_from_user: - Copy a block of data from user space.
27076- * @to: Destination address, in kernel space.
27077- * @from: Source address, in user space.
27078- * @n: Number of bytes to copy.
27079- *
27080- * Context: User context only. This function may sleep.
27081- *
27082- * Copy data from user space to kernel space.
27083- *
27084- * Returns number of bytes that could not be copied.
27085- * On success, this will be zero.
27086- *
27087- * If some data could not be copied, this function will pad the copied
27088- * data to the requested size using zero bytes.
27089- */
27090-unsigned long
27091-_copy_from_user(void *to, const void __user *from, unsigned long n)
27092+void set_fs(mm_segment_t x)
27093 {
27094- if (access_ok(VERIFY_READ, from, n))
27095- n = __copy_from_user(to, from, n);
27096- else
27097- memset(to, 0, n);
27098- return n;
27099+ current_thread_info()->addr_limit = x;
27100+ __set_fs(x);
27101 }
27102-EXPORT_SYMBOL(_copy_from_user);
27103+EXPORT_SYMBOL(set_fs);
27104+#endif
27105diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27106index 906fea3..5646695 100644
27107--- a/arch/x86/lib/usercopy_64.c
27108+++ b/arch/x86/lib/usercopy_64.c
27109@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27110 _ASM_EXTABLE(0b,3b)
27111 _ASM_EXTABLE(1b,2b)
27112 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27113- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27114+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27115 [zero] "r" (0UL), [eight] "r" (8UL));
27116 clac();
27117 return size;
27118@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27119 }
27120 EXPORT_SYMBOL(clear_user);
27121
27122-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27123+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27124 {
27125- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27126- return copy_user_generic((__force void *)to, (__force void *)from, len);
27127- }
27128- return len;
27129+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27130+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27131+ return len;
27132 }
27133 EXPORT_SYMBOL(copy_in_user);
27134
27135@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27136 * it is not necessary to optimize tail handling.
27137 */
27138 unsigned long
27139-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27140+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27141 {
27142 char c;
27143 unsigned zero_len;
27144diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27145index 903ec1e..c4166b2 100644
27146--- a/arch/x86/mm/extable.c
27147+++ b/arch/x86/mm/extable.c
27148@@ -6,12 +6,24 @@
27149 static inline unsigned long
27150 ex_insn_addr(const struct exception_table_entry *x)
27151 {
27152- return (unsigned long)&x->insn + x->insn;
27153+ unsigned long reloc = 0;
27154+
27155+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27156+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27157+#endif
27158+
27159+ return (unsigned long)&x->insn + x->insn + reloc;
27160 }
27161 static inline unsigned long
27162 ex_fixup_addr(const struct exception_table_entry *x)
27163 {
27164- return (unsigned long)&x->fixup + x->fixup;
27165+ unsigned long reloc = 0;
27166+
27167+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27168+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27169+#endif
27170+
27171+ return (unsigned long)&x->fixup + x->fixup + reloc;
27172 }
27173
27174 int fixup_exception(struct pt_regs *regs)
27175@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27176 unsigned long new_ip;
27177
27178 #ifdef CONFIG_PNPBIOS
27179- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27180+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27181 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27182 extern u32 pnp_bios_is_utter_crap;
27183 pnp_bios_is_utter_crap = 1;
27184@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27185 i += 4;
27186 p->fixup -= i;
27187 i += 4;
27188+
27189+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27190+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27191+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27192+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27193+#endif
27194+
27195 }
27196 }
27197
27198diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27199index 654be4a..d36985f 100644
27200--- a/arch/x86/mm/fault.c
27201+++ b/arch/x86/mm/fault.c
27202@@ -14,11 +14,18 @@
27203 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27204 #include <linux/prefetch.h> /* prefetchw */
27205 #include <linux/context_tracking.h> /* exception_enter(), ... */
27206+#include <linux/unistd.h>
27207+#include <linux/compiler.h>
27208
27209 #include <asm/traps.h> /* dotraplinkage, ... */
27210 #include <asm/pgalloc.h> /* pgd_*(), ... */
27211 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27212 #include <asm/fixmap.h> /* VSYSCALL_START */
27213+#include <asm/tlbflush.h>
27214+
27215+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27216+#include <asm/stacktrace.h>
27217+#endif
27218
27219 /*
27220 * Page fault error code bits:
27221@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27222 int ret = 0;
27223
27224 /* kprobe_running() needs smp_processor_id() */
27225- if (kprobes_built_in() && !user_mode_vm(regs)) {
27226+ if (kprobes_built_in() && !user_mode(regs)) {
27227 preempt_disable();
27228 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27229 ret = 1;
27230@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27231 return !instr_lo || (instr_lo>>1) == 1;
27232 case 0x00:
27233 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27234- if (probe_kernel_address(instr, opcode))
27235+ if (user_mode(regs)) {
27236+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27237+ return 0;
27238+ } else if (probe_kernel_address(instr, opcode))
27239 return 0;
27240
27241 *prefetch = (instr_lo == 0xF) &&
27242@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27243 while (instr < max_instr) {
27244 unsigned char opcode;
27245
27246- if (probe_kernel_address(instr, opcode))
27247+ if (user_mode(regs)) {
27248+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27249+ break;
27250+ } else if (probe_kernel_address(instr, opcode))
27251 break;
27252
27253 instr++;
27254@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27255 force_sig_info(si_signo, &info, tsk);
27256 }
27257
27258+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27259+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27260+#endif
27261+
27262+#ifdef CONFIG_PAX_EMUTRAMP
27263+static int pax_handle_fetch_fault(struct pt_regs *regs);
27264+#endif
27265+
27266+#ifdef CONFIG_PAX_PAGEEXEC
27267+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27268+{
27269+ pgd_t *pgd;
27270+ pud_t *pud;
27271+ pmd_t *pmd;
27272+
27273+ pgd = pgd_offset(mm, address);
27274+ if (!pgd_present(*pgd))
27275+ return NULL;
27276+ pud = pud_offset(pgd, address);
27277+ if (!pud_present(*pud))
27278+ return NULL;
27279+ pmd = pmd_offset(pud, address);
27280+ if (!pmd_present(*pmd))
27281+ return NULL;
27282+ return pmd;
27283+}
27284+#endif
27285+
27286 DEFINE_SPINLOCK(pgd_lock);
27287 LIST_HEAD(pgd_list);
27288
27289@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27290 for (address = VMALLOC_START & PMD_MASK;
27291 address >= TASK_SIZE && address < FIXADDR_TOP;
27292 address += PMD_SIZE) {
27293+
27294+#ifdef CONFIG_PAX_PER_CPU_PGD
27295+ unsigned long cpu;
27296+#else
27297 struct page *page;
27298+#endif
27299
27300 spin_lock(&pgd_lock);
27301+
27302+#ifdef CONFIG_PAX_PER_CPU_PGD
27303+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27304+ pgd_t *pgd = get_cpu_pgd(cpu);
27305+ pmd_t *ret;
27306+#else
27307 list_for_each_entry(page, &pgd_list, lru) {
27308+ pgd_t *pgd;
27309 spinlock_t *pgt_lock;
27310 pmd_t *ret;
27311
27312@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27313 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27314
27315 spin_lock(pgt_lock);
27316- ret = vmalloc_sync_one(page_address(page), address);
27317+ pgd = page_address(page);
27318+#endif
27319+
27320+ ret = vmalloc_sync_one(pgd, address);
27321+
27322+#ifndef CONFIG_PAX_PER_CPU_PGD
27323 spin_unlock(pgt_lock);
27324+#endif
27325
27326 if (!ret)
27327 break;
27328@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27329 * an interrupt in the middle of a task switch..
27330 */
27331 pgd_paddr = read_cr3();
27332+
27333+#ifdef CONFIG_PAX_PER_CPU_PGD
27334+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27335+#endif
27336+
27337 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27338 if (!pmd_k)
27339 return -1;
27340@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27341 * happen within a race in page table update. In the later
27342 * case just flush:
27343 */
27344+
27345+#ifdef CONFIG_PAX_PER_CPU_PGD
27346+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27347+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27348+#else
27349 pgd = pgd_offset(current->active_mm, address);
27350+#endif
27351+
27352 pgd_ref = pgd_offset_k(address);
27353 if (pgd_none(*pgd_ref))
27354 return -1;
27355@@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27356 static int is_errata100(struct pt_regs *regs, unsigned long address)
27357 {
27358 #ifdef CONFIG_X86_64
27359- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27360+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27361 return 1;
27362 #endif
27363 return 0;
27364@@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27365 }
27366
27367 static const char nx_warning[] = KERN_CRIT
27368-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27369+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27370
27371 static void
27372 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27373@@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27374 if (!oops_may_print())
27375 return;
27376
27377- if (error_code & PF_INSTR) {
27378+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27379 unsigned int level;
27380
27381 pte_t *pte = lookup_address(address, &level);
27382
27383 if (pte && pte_present(*pte) && !pte_exec(*pte))
27384- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27385+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27386 }
27387
27388+#ifdef CONFIG_PAX_KERNEXEC
27389+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27390+ if (current->signal->curr_ip)
27391+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27392+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27393+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27394+ else
27395+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27396+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27397+ }
27398+#endif
27399+
27400 printk(KERN_ALERT "BUG: unable to handle kernel ");
27401 if (address < PAGE_SIZE)
27402 printk(KERN_CONT "NULL pointer dereference");
27403@@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27404 return;
27405 }
27406 #endif
27407+
27408+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27409+ if (pax_is_fetch_fault(regs, error_code, address)) {
27410+
27411+#ifdef CONFIG_PAX_EMUTRAMP
27412+ switch (pax_handle_fetch_fault(regs)) {
27413+ case 2:
27414+ return;
27415+ }
27416+#endif
27417+
27418+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27419+ do_group_exit(SIGKILL);
27420+ }
27421+#endif
27422+
27423 /* Kernel addresses are always protection faults: */
27424 if (address >= TASK_SIZE)
27425 error_code |= PF_PROT;
27426@@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27427 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27428 printk(KERN_ERR
27429 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27430- tsk->comm, tsk->pid, address);
27431+ tsk->comm, task_pid_nr(tsk), address);
27432 code = BUS_MCEERR_AR;
27433 }
27434 #endif
27435@@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27436 return 1;
27437 }
27438
27439+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27440+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27441+{
27442+ pte_t *pte;
27443+ pmd_t *pmd;
27444+ spinlock_t *ptl;
27445+ unsigned char pte_mask;
27446+
27447+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27448+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27449+ return 0;
27450+
27451+ /* PaX: it's our fault, let's handle it if we can */
27452+
27453+ /* PaX: take a look at read faults before acquiring any locks */
27454+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27455+ /* instruction fetch attempt from a protected page in user mode */
27456+ up_read(&mm->mmap_sem);
27457+
27458+#ifdef CONFIG_PAX_EMUTRAMP
27459+ switch (pax_handle_fetch_fault(regs)) {
27460+ case 2:
27461+ return 1;
27462+ }
27463+#endif
27464+
27465+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27466+ do_group_exit(SIGKILL);
27467+ }
27468+
27469+ pmd = pax_get_pmd(mm, address);
27470+ if (unlikely(!pmd))
27471+ return 0;
27472+
27473+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27474+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27475+ pte_unmap_unlock(pte, ptl);
27476+ return 0;
27477+ }
27478+
27479+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27480+ /* write attempt to a protected page in user mode */
27481+ pte_unmap_unlock(pte, ptl);
27482+ return 0;
27483+ }
27484+
27485+#ifdef CONFIG_SMP
27486+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27487+#else
27488+ if (likely(address > get_limit(regs->cs)))
27489+#endif
27490+ {
27491+ set_pte(pte, pte_mkread(*pte));
27492+ __flush_tlb_one(address);
27493+ pte_unmap_unlock(pte, ptl);
27494+ up_read(&mm->mmap_sem);
27495+ return 1;
27496+ }
27497+
27498+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27499+
27500+ /*
27501+ * PaX: fill DTLB with user rights and retry
27502+ */
27503+ __asm__ __volatile__ (
27504+ "orb %2,(%1)\n"
27505+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27506+/*
27507+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27508+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27509+ * page fault when examined during a TLB load attempt. this is true not only
27510+ * for PTEs holding a non-present entry but also present entries that will
27511+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27512+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27513+ * for our target pages since their PTEs are simply not in the TLBs at all.
27514+
27515+ * the best thing in omitting it is that we gain around 15-20% speed in the
27516+ * fast path of the page fault handler and can get rid of tracing since we
27517+ * can no longer flush unintended entries.
27518+ */
27519+ "invlpg (%0)\n"
27520+#endif
27521+ __copyuser_seg"testb $0,(%0)\n"
27522+ "xorb %3,(%1)\n"
27523+ :
27524+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27525+ : "memory", "cc");
27526+ pte_unmap_unlock(pte, ptl);
27527+ up_read(&mm->mmap_sem);
27528+ return 1;
27529+}
27530+#endif
27531+
27532 /*
27533 * Handle a spurious fault caused by a stale TLB entry.
27534 *
27535@@ -964,6 +1156,9 @@ int show_unhandled_signals = 1;
27536 static inline int
27537 access_error(unsigned long error_code, struct vm_area_struct *vma)
27538 {
27539+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27540+ return 1;
27541+
27542 if (error_code & PF_WRITE) {
27543 /* write, present and write, not present: */
27544 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27545@@ -992,7 +1187,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27546 if (error_code & PF_USER)
27547 return false;
27548
27549- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27550+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27551 return false;
27552
27553 return true;
27554@@ -1008,18 +1203,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27555 {
27556 struct vm_area_struct *vma;
27557 struct task_struct *tsk;
27558- unsigned long address;
27559 struct mm_struct *mm;
27560 int fault;
27561 int write = error_code & PF_WRITE;
27562 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27563 (write ? FAULT_FLAG_WRITE : 0);
27564
27565- tsk = current;
27566- mm = tsk->mm;
27567-
27568 /* Get the faulting address: */
27569- address = read_cr2();
27570+ unsigned long address = read_cr2();
27571+
27572+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27573+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
27574+ if (!search_exception_tables(regs->ip)) {
27575+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27576+ bad_area_nosemaphore(regs, error_code, address);
27577+ return;
27578+ }
27579+ if (address < pax_user_shadow_base) {
27580+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27581+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27582+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27583+ } else
27584+ address -= pax_user_shadow_base;
27585+ }
27586+#endif
27587+
27588+ tsk = current;
27589+ mm = tsk->mm;
27590
27591 /*
27592 * Detect and handle instructions that would cause a page fault for
27593@@ -1080,7 +1290,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27594 * User-mode registers count as a user access even for any
27595 * potential system fault or CPU buglet:
27596 */
27597- if (user_mode_vm(regs)) {
27598+ if (user_mode(regs)) {
27599 local_irq_enable();
27600 error_code |= PF_USER;
27601 } else {
27602@@ -1142,6 +1352,11 @@ retry:
27603 might_sleep();
27604 }
27605
27606+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27607+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27608+ return;
27609+#endif
27610+
27611 vma = find_vma(mm, address);
27612 if (unlikely(!vma)) {
27613 bad_area(regs, error_code, address);
27614@@ -1153,18 +1368,24 @@ retry:
27615 bad_area(regs, error_code, address);
27616 return;
27617 }
27618- if (error_code & PF_USER) {
27619- /*
27620- * Accessing the stack below %sp is always a bug.
27621- * The large cushion allows instructions like enter
27622- * and pusha to work. ("enter $65535, $31" pushes
27623- * 32 pointers and then decrements %sp by 65535.)
27624- */
27625- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27626- bad_area(regs, error_code, address);
27627- return;
27628- }
27629+ /*
27630+ * Accessing the stack below %sp is always a bug.
27631+ * The large cushion allows instructions like enter
27632+ * and pusha to work. ("enter $65535, $31" pushes
27633+ * 32 pointers and then decrements %sp by 65535.)
27634+ */
27635+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27636+ bad_area(regs, error_code, address);
27637+ return;
27638 }
27639+
27640+#ifdef CONFIG_PAX_SEGMEXEC
27641+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27642+ bad_area(regs, error_code, address);
27643+ return;
27644+ }
27645+#endif
27646+
27647 if (unlikely(expand_stack(vma, address))) {
27648 bad_area(regs, error_code, address);
27649 return;
27650@@ -1230,3 +1451,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27651 __do_page_fault(regs, error_code);
27652 exception_exit(prev_state);
27653 }
27654+
27655+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27656+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27657+{
27658+ struct mm_struct *mm = current->mm;
27659+ unsigned long ip = regs->ip;
27660+
27661+ if (v8086_mode(regs))
27662+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27663+
27664+#ifdef CONFIG_PAX_PAGEEXEC
27665+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27666+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27667+ return true;
27668+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27669+ return true;
27670+ return false;
27671+ }
27672+#endif
27673+
27674+#ifdef CONFIG_PAX_SEGMEXEC
27675+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27676+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27677+ return true;
27678+ return false;
27679+ }
27680+#endif
27681+
27682+ return false;
27683+}
27684+#endif
27685+
27686+#ifdef CONFIG_PAX_EMUTRAMP
27687+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27688+{
27689+ int err;
27690+
27691+ do { /* PaX: libffi trampoline emulation */
27692+ unsigned char mov, jmp;
27693+ unsigned int addr1, addr2;
27694+
27695+#ifdef CONFIG_X86_64
27696+ if ((regs->ip + 9) >> 32)
27697+ break;
27698+#endif
27699+
27700+ err = get_user(mov, (unsigned char __user *)regs->ip);
27701+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27702+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27703+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27704+
27705+ if (err)
27706+ break;
27707+
27708+ if (mov == 0xB8 && jmp == 0xE9) {
27709+ regs->ax = addr1;
27710+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27711+ return 2;
27712+ }
27713+ } while (0);
27714+
27715+ do { /* PaX: gcc trampoline emulation #1 */
27716+ unsigned char mov1, mov2;
27717+ unsigned short jmp;
27718+ unsigned int addr1, addr2;
27719+
27720+#ifdef CONFIG_X86_64
27721+ if ((regs->ip + 11) >> 32)
27722+ break;
27723+#endif
27724+
27725+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27726+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27727+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27728+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27729+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27730+
27731+ if (err)
27732+ break;
27733+
27734+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27735+ regs->cx = addr1;
27736+ regs->ax = addr2;
27737+ regs->ip = addr2;
27738+ return 2;
27739+ }
27740+ } while (0);
27741+
27742+ do { /* PaX: gcc trampoline emulation #2 */
27743+ unsigned char mov, jmp;
27744+ unsigned int addr1, addr2;
27745+
27746+#ifdef CONFIG_X86_64
27747+ if ((regs->ip + 9) >> 32)
27748+ break;
27749+#endif
27750+
27751+ err = get_user(mov, (unsigned char __user *)regs->ip);
27752+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27753+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27754+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27755+
27756+ if (err)
27757+ break;
27758+
27759+ if (mov == 0xB9 && jmp == 0xE9) {
27760+ regs->cx = addr1;
27761+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27762+ return 2;
27763+ }
27764+ } while (0);
27765+
27766+ return 1; /* PaX in action */
27767+}
27768+
27769+#ifdef CONFIG_X86_64
27770+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27771+{
27772+ int err;
27773+
27774+ do { /* PaX: libffi trampoline emulation */
27775+ unsigned short mov1, mov2, jmp1;
27776+ unsigned char stcclc, jmp2;
27777+ unsigned long addr1, addr2;
27778+
27779+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27780+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27781+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27782+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27783+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27784+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27785+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27786+
27787+ if (err)
27788+ break;
27789+
27790+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27791+ regs->r11 = addr1;
27792+ regs->r10 = addr2;
27793+ if (stcclc == 0xF8)
27794+ regs->flags &= ~X86_EFLAGS_CF;
27795+ else
27796+ regs->flags |= X86_EFLAGS_CF;
27797+ regs->ip = addr1;
27798+ return 2;
27799+ }
27800+ } while (0);
27801+
27802+ do { /* PaX: gcc trampoline emulation #1 */
27803+ unsigned short mov1, mov2, jmp1;
27804+ unsigned char jmp2;
27805+ unsigned int addr1;
27806+ unsigned long addr2;
27807+
27808+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27809+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27810+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27811+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27812+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27813+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27814+
27815+ if (err)
27816+ break;
27817+
27818+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27819+ regs->r11 = addr1;
27820+ regs->r10 = addr2;
27821+ regs->ip = addr1;
27822+ return 2;
27823+ }
27824+ } while (0);
27825+
27826+ do { /* PaX: gcc trampoline emulation #2 */
27827+ unsigned short mov1, mov2, jmp1;
27828+ unsigned char jmp2;
27829+ unsigned long addr1, addr2;
27830+
27831+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27832+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27833+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27834+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27835+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27836+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27837+
27838+ if (err)
27839+ break;
27840+
27841+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27842+ regs->r11 = addr1;
27843+ regs->r10 = addr2;
27844+ regs->ip = addr1;
27845+ return 2;
27846+ }
27847+ } while (0);
27848+
27849+ return 1; /* PaX in action */
27850+}
27851+#endif
27852+
27853+/*
27854+ * PaX: decide what to do with offenders (regs->ip = fault address)
27855+ *
27856+ * returns 1 when task should be killed
27857+ * 2 when gcc trampoline was detected
27858+ */
27859+static int pax_handle_fetch_fault(struct pt_regs *regs)
27860+{
27861+ if (v8086_mode(regs))
27862+ return 1;
27863+
27864+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27865+ return 1;
27866+
27867+#ifdef CONFIG_X86_32
27868+ return pax_handle_fetch_fault_32(regs);
27869+#else
27870+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27871+ return pax_handle_fetch_fault_32(regs);
27872+ else
27873+ return pax_handle_fetch_fault_64(regs);
27874+#endif
27875+}
27876+#endif
27877+
27878+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27879+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27880+{
27881+ long i;
27882+
27883+ printk(KERN_ERR "PAX: bytes at PC: ");
27884+ for (i = 0; i < 20; i++) {
27885+ unsigned char c;
27886+ if (get_user(c, (unsigned char __force_user *)pc+i))
27887+ printk(KERN_CONT "?? ");
27888+ else
27889+ printk(KERN_CONT "%02x ", c);
27890+ }
27891+ printk("\n");
27892+
27893+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27894+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
27895+ unsigned long c;
27896+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
27897+#ifdef CONFIG_X86_32
27898+ printk(KERN_CONT "???????? ");
27899+#else
27900+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27901+ printk(KERN_CONT "???????? ???????? ");
27902+ else
27903+ printk(KERN_CONT "???????????????? ");
27904+#endif
27905+ } else {
27906+#ifdef CONFIG_X86_64
27907+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27908+ printk(KERN_CONT "%08x ", (unsigned int)c);
27909+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27910+ } else
27911+#endif
27912+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27913+ }
27914+ }
27915+ printk("\n");
27916+}
27917+#endif
27918+
27919+/**
27920+ * probe_kernel_write(): safely attempt to write to a location
27921+ * @dst: address to write to
27922+ * @src: pointer to the data that shall be written
27923+ * @size: size of the data chunk
27924+ *
27925+ * Safely write to address @dst from the buffer at @src. If a kernel fault
27926+ * happens, handle that and return -EFAULT.
27927+ */
27928+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27929+{
27930+ long ret;
27931+ mm_segment_t old_fs = get_fs();
27932+
27933+ set_fs(KERNEL_DS);
27934+ pagefault_disable();
27935+ pax_open_kernel();
27936+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27937+ pax_close_kernel();
27938+ pagefault_enable();
27939+ set_fs(old_fs);
27940+
27941+ return ret ? -EFAULT : 0;
27942+}
27943diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27944index dd74e46..7d26398 100644
27945--- a/arch/x86/mm/gup.c
27946+++ b/arch/x86/mm/gup.c
27947@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27948 addr = start;
27949 len = (unsigned long) nr_pages << PAGE_SHIFT;
27950 end = start + len;
27951- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27952+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27953 (void __user *)start, len)))
27954 return 0;
27955
27956diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27957index 252b8f5..4dcfdc1 100644
27958--- a/arch/x86/mm/highmem_32.c
27959+++ b/arch/x86/mm/highmem_32.c
27960@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27961 idx = type + KM_TYPE_NR*smp_processor_id();
27962 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27963 BUG_ON(!pte_none(*(kmap_pte-idx)));
27964+
27965+ pax_open_kernel();
27966 set_pte(kmap_pte-idx, mk_pte(page, prot));
27967+ pax_close_kernel();
27968+
27969 arch_flush_lazy_mmu_mode();
27970
27971 return (void *)vaddr;
27972diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27973index ae1aa71..d9bea75 100644
27974--- a/arch/x86/mm/hugetlbpage.c
27975+++ b/arch/x86/mm/hugetlbpage.c
27976@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
27977 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
27978 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27979 unsigned long addr, unsigned long len,
27980- unsigned long pgoff, unsigned long flags)
27981+ unsigned long pgoff, unsigned long flags, unsigned long offset)
27982 {
27983 struct hstate *h = hstate_file(file);
27984 struct vm_unmapped_area_info info;
27985-
27986+
27987 info.flags = 0;
27988 info.length = len;
27989 info.low_limit = TASK_UNMAPPED_BASE;
27990+
27991+#ifdef CONFIG_PAX_RANDMMAP
27992+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
27993+ info.low_limit += current->mm->delta_mmap;
27994+#endif
27995+
27996 info.high_limit = TASK_SIZE;
27997 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
27998 info.align_offset = 0;
27999+ info.threadstack_offset = offset;
28000 return vm_unmapped_area(&info);
28001 }
28002
28003 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28004 unsigned long addr0, unsigned long len,
28005- unsigned long pgoff, unsigned long flags)
28006+ unsigned long pgoff, unsigned long flags, unsigned long offset)
28007 {
28008 struct hstate *h = hstate_file(file);
28009 struct vm_unmapped_area_info info;
28010@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28011 info.high_limit = current->mm->mmap_base;
28012 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28013 info.align_offset = 0;
28014+ info.threadstack_offset = offset;
28015 addr = vm_unmapped_area(&info);
28016
28017 /*
28018@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28019 VM_BUG_ON(addr != -ENOMEM);
28020 info.flags = 0;
28021 info.low_limit = TASK_UNMAPPED_BASE;
28022+
28023+#ifdef CONFIG_PAX_RANDMMAP
28024+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28025+ info.low_limit += current->mm->delta_mmap;
28026+#endif
28027+
28028 info.high_limit = TASK_SIZE;
28029 addr = vm_unmapped_area(&info);
28030 }
28031@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28032 struct hstate *h = hstate_file(file);
28033 struct mm_struct *mm = current->mm;
28034 struct vm_area_struct *vma;
28035+ unsigned long pax_task_size = TASK_SIZE;
28036+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28037
28038 if (len & ~huge_page_mask(h))
28039 return -EINVAL;
28040- if (len > TASK_SIZE)
28041+
28042+#ifdef CONFIG_PAX_SEGMEXEC
28043+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28044+ pax_task_size = SEGMEXEC_TASK_SIZE;
28045+#endif
28046+
28047+ pax_task_size -= PAGE_SIZE;
28048+
28049+ if (len > pax_task_size)
28050 return -ENOMEM;
28051
28052 if (flags & MAP_FIXED) {
28053@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28054 return addr;
28055 }
28056
28057+#ifdef CONFIG_PAX_RANDMMAP
28058+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28059+#endif
28060+
28061 if (addr) {
28062 addr = ALIGN(addr, huge_page_size(h));
28063 vma = find_vma(mm, addr);
28064- if (TASK_SIZE - len >= addr &&
28065- (!vma || addr + len <= vma->vm_start))
28066+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28067 return addr;
28068 }
28069 if (mm->get_unmapped_area == arch_get_unmapped_area)
28070 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
28071- pgoff, flags);
28072+ pgoff, flags, offset);
28073 else
28074 return hugetlb_get_unmapped_area_topdown(file, addr, len,
28075- pgoff, flags);
28076+ pgoff, flags, offset);
28077 }
28078
28079 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
28080diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28081index 1f34e92..d252637 100644
28082--- a/arch/x86/mm/init.c
28083+++ b/arch/x86/mm/init.c
28084@@ -4,6 +4,7 @@
28085 #include <linux/swap.h>
28086 #include <linux/memblock.h>
28087 #include <linux/bootmem.h> /* for max_low_pfn */
28088+#include <linux/tboot.h>
28089
28090 #include <asm/cacheflush.h>
28091 #include <asm/e820.h>
28092@@ -17,6 +18,8 @@
28093 #include <asm/proto.h>
28094 #include <asm/dma.h> /* for MAX_DMA_PFN */
28095 #include <asm/microcode.h>
28096+#include <asm/desc.h>
28097+#include <asm/bios_ebda.h>
28098
28099 #include "mm_internal.h"
28100
28101@@ -465,7 +468,15 @@ void __init init_mem_mapping(void)
28102 early_ioremap_page_table_range_init();
28103 #endif
28104
28105+#ifdef CONFIG_PAX_PER_CPU_PGD
28106+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28107+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28108+ KERNEL_PGD_PTRS);
28109+ load_cr3(get_cpu_pgd(0));
28110+#else
28111 load_cr3(swapper_pg_dir);
28112+#endif
28113+
28114 __flush_tlb_all();
28115
28116 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
28117@@ -481,10 +492,40 @@ void __init init_mem_mapping(void)
28118 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28119 * mmio resources as well as potential bios/acpi data regions.
28120 */
28121+
28122+#ifdef CONFIG_GRKERNSEC_KMEM
28123+static unsigned int ebda_start __read_only;
28124+static unsigned int ebda_end __read_only;
28125+#endif
28126+
28127 int devmem_is_allowed(unsigned long pagenr)
28128 {
28129- if (pagenr < 256)
28130+#ifdef CONFIG_GRKERNSEC_KMEM
28131+ /* allow BDA */
28132+ if (!pagenr)
28133 return 1;
28134+ /* allow EBDA */
28135+ if (pagenr >= ebda_start && pagenr < ebda_end)
28136+ return 1;
28137+ /* if tboot is in use, allow access to its hardcoded serial log range */
28138+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28139+ return 1;
28140+#else
28141+ if (!pagenr)
28142+ return 1;
28143+#ifdef CONFIG_VM86
28144+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28145+ return 1;
28146+#endif
28147+#endif
28148+
28149+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28150+ return 1;
28151+#ifdef CONFIG_GRKERNSEC_KMEM
28152+ /* throw out everything else below 1MB */
28153+ if (pagenr <= 256)
28154+ return 0;
28155+#endif
28156 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28157 return 0;
28158 if (!page_is_ram(pagenr))
28159@@ -538,8 +579,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28160 #endif
28161 }
28162
28163+#ifdef CONFIG_GRKERNSEC_KMEM
28164+static inline void gr_init_ebda(void)
28165+{
28166+ unsigned int ebda_addr;
28167+ unsigned int ebda_size = 0;
28168+
28169+ ebda_addr = get_bios_ebda();
28170+ if (ebda_addr) {
28171+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28172+ ebda_size <<= 10;
28173+ }
28174+ if (ebda_addr && ebda_size) {
28175+ ebda_start = ebda_addr >> PAGE_SHIFT;
28176+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28177+ } else {
28178+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28179+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28180+ }
28181+}
28182+#else
28183+static inline void gr_init_ebda(void) { }
28184+#endif
28185+
28186 void free_initmem(void)
28187 {
28188+#ifdef CONFIG_PAX_KERNEXEC
28189+#ifdef CONFIG_X86_32
28190+ /* PaX: limit KERNEL_CS to actual size */
28191+ unsigned long addr, limit;
28192+ struct desc_struct d;
28193+ int cpu;
28194+#else
28195+ pgd_t *pgd;
28196+ pud_t *pud;
28197+ pmd_t *pmd;
28198+ unsigned long addr, end;
28199+#endif
28200+#endif
28201+
28202+ gr_init_ebda();
28203+
28204+#ifdef CONFIG_PAX_KERNEXEC
28205+#ifdef CONFIG_X86_32
28206+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28207+ limit = (limit - 1UL) >> PAGE_SHIFT;
28208+
28209+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28210+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28211+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28212+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28213+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28214+ }
28215+
28216+ /* PaX: make KERNEL_CS read-only */
28217+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28218+ if (!paravirt_enabled())
28219+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28220+/*
28221+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28222+ pgd = pgd_offset_k(addr);
28223+ pud = pud_offset(pgd, addr);
28224+ pmd = pmd_offset(pud, addr);
28225+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28226+ }
28227+*/
28228+#ifdef CONFIG_X86_PAE
28229+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28230+/*
28231+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28232+ pgd = pgd_offset_k(addr);
28233+ pud = pud_offset(pgd, addr);
28234+ pmd = pmd_offset(pud, addr);
28235+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28236+ }
28237+*/
28238+#endif
28239+
28240+#ifdef CONFIG_MODULES
28241+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28242+#endif
28243+
28244+#else
28245+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28246+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28247+ pgd = pgd_offset_k(addr);
28248+ pud = pud_offset(pgd, addr);
28249+ pmd = pmd_offset(pud, addr);
28250+ if (!pmd_present(*pmd))
28251+ continue;
28252+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28253+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28254+ else
28255+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28256+ }
28257+
28258+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28259+ end = addr + KERNEL_IMAGE_SIZE;
28260+ for (; addr < end; addr += PMD_SIZE) {
28261+ pgd = pgd_offset_k(addr);
28262+ pud = pud_offset(pgd, addr);
28263+ pmd = pmd_offset(pud, addr);
28264+ if (!pmd_present(*pmd))
28265+ continue;
28266+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28267+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28268+ }
28269+#endif
28270+
28271+ flush_tlb_all();
28272+#endif
28273+
28274 free_init_pages("unused kernel memory",
28275 (unsigned long)(&__init_begin),
28276 (unsigned long)(&__init_end));
28277diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28278index 3ac7e31..89611b7 100644
28279--- a/arch/x86/mm/init_32.c
28280+++ b/arch/x86/mm/init_32.c
28281@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
28282 bool __read_mostly __vmalloc_start_set = false;
28283
28284 /*
28285- * Creates a middle page table and puts a pointer to it in the
28286- * given global directory entry. This only returns the gd entry
28287- * in non-PAE compilation mode, since the middle layer is folded.
28288- */
28289-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28290-{
28291- pud_t *pud;
28292- pmd_t *pmd_table;
28293-
28294-#ifdef CONFIG_X86_PAE
28295- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28296- pmd_table = (pmd_t *)alloc_low_page();
28297- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28298- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28299- pud = pud_offset(pgd, 0);
28300- BUG_ON(pmd_table != pmd_offset(pud, 0));
28301-
28302- return pmd_table;
28303- }
28304-#endif
28305- pud = pud_offset(pgd, 0);
28306- pmd_table = pmd_offset(pud, 0);
28307-
28308- return pmd_table;
28309-}
28310-
28311-/*
28312 * Create a page table and place a pointer to it in a middle page
28313 * directory entry:
28314 */
28315@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28316 pte_t *page_table = (pte_t *)alloc_low_page();
28317
28318 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28319+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28320+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28321+#else
28322 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28323+#endif
28324 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28325 }
28326
28327 return pte_offset_kernel(pmd, 0);
28328 }
28329
28330+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28331+{
28332+ pud_t *pud;
28333+ pmd_t *pmd_table;
28334+
28335+ pud = pud_offset(pgd, 0);
28336+ pmd_table = pmd_offset(pud, 0);
28337+
28338+ return pmd_table;
28339+}
28340+
28341 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28342 {
28343 int pgd_idx = pgd_index(vaddr);
28344@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28345 int pgd_idx, pmd_idx;
28346 unsigned long vaddr;
28347 pgd_t *pgd;
28348+ pud_t *pud;
28349 pmd_t *pmd;
28350 pte_t *pte = NULL;
28351 unsigned long count = page_table_range_init_count(start, end);
28352@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28353 pgd = pgd_base + pgd_idx;
28354
28355 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28356- pmd = one_md_table_init(pgd);
28357- pmd = pmd + pmd_index(vaddr);
28358+ pud = pud_offset(pgd, vaddr);
28359+ pmd = pmd_offset(pud, vaddr);
28360+
28361+#ifdef CONFIG_X86_PAE
28362+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28363+#endif
28364+
28365 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28366 pmd++, pmd_idx++) {
28367 pte = page_table_kmap_check(one_page_table_init(pmd),
28368@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28369 }
28370 }
28371
28372-static inline int is_kernel_text(unsigned long addr)
28373+static inline int is_kernel_text(unsigned long start, unsigned long end)
28374 {
28375- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28376- return 1;
28377- return 0;
28378+ if ((start > ktla_ktva((unsigned long)_etext) ||
28379+ end <= ktla_ktva((unsigned long)_stext)) &&
28380+ (start > ktla_ktva((unsigned long)_einittext) ||
28381+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28382+
28383+#ifdef CONFIG_ACPI_SLEEP
28384+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28385+#endif
28386+
28387+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28388+ return 0;
28389+ return 1;
28390 }
28391
28392 /*
28393@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
28394 unsigned long last_map_addr = end;
28395 unsigned long start_pfn, end_pfn;
28396 pgd_t *pgd_base = swapper_pg_dir;
28397- int pgd_idx, pmd_idx, pte_ofs;
28398+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28399 unsigned long pfn;
28400 pgd_t *pgd;
28401+ pud_t *pud;
28402 pmd_t *pmd;
28403 pte_t *pte;
28404 unsigned pages_2m, pages_4k;
28405@@ -291,8 +295,13 @@ repeat:
28406 pfn = start_pfn;
28407 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28408 pgd = pgd_base + pgd_idx;
28409- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28410- pmd = one_md_table_init(pgd);
28411+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28412+ pud = pud_offset(pgd, 0);
28413+ pmd = pmd_offset(pud, 0);
28414+
28415+#ifdef CONFIG_X86_PAE
28416+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28417+#endif
28418
28419 if (pfn >= end_pfn)
28420 continue;
28421@@ -304,14 +313,13 @@ repeat:
28422 #endif
28423 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28424 pmd++, pmd_idx++) {
28425- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28426+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28427
28428 /*
28429 * Map with big pages if possible, otherwise
28430 * create normal page tables:
28431 */
28432 if (use_pse) {
28433- unsigned int addr2;
28434 pgprot_t prot = PAGE_KERNEL_LARGE;
28435 /*
28436 * first pass will use the same initial
28437@@ -322,11 +330,7 @@ repeat:
28438 _PAGE_PSE);
28439
28440 pfn &= PMD_MASK >> PAGE_SHIFT;
28441- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28442- PAGE_OFFSET + PAGE_SIZE-1;
28443-
28444- if (is_kernel_text(addr) ||
28445- is_kernel_text(addr2))
28446+ if (is_kernel_text(address, address + PMD_SIZE))
28447 prot = PAGE_KERNEL_LARGE_EXEC;
28448
28449 pages_2m++;
28450@@ -343,7 +347,7 @@ repeat:
28451 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28452 pte += pte_ofs;
28453 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28454- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28455+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28456 pgprot_t prot = PAGE_KERNEL;
28457 /*
28458 * first pass will use the same initial
28459@@ -351,7 +355,7 @@ repeat:
28460 */
28461 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28462
28463- if (is_kernel_text(addr))
28464+ if (is_kernel_text(address, address + PAGE_SIZE))
28465 prot = PAGE_KERNEL_EXEC;
28466
28467 pages_4k++;
28468@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
28469
28470 pud = pud_offset(pgd, va);
28471 pmd = pmd_offset(pud, va);
28472- if (!pmd_present(*pmd))
28473+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
28474 break;
28475
28476 /* should not be large page here */
28477@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
28478
28479 static void __init pagetable_init(void)
28480 {
28481- pgd_t *pgd_base = swapper_pg_dir;
28482-
28483- permanent_kmaps_init(pgd_base);
28484+ permanent_kmaps_init(swapper_pg_dir);
28485 }
28486
28487-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28488+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28489 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28490
28491 /* user-defined highmem size */
28492@@ -772,7 +774,7 @@ void __init mem_init(void)
28493 after_bootmem = 1;
28494
28495 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28496- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28497+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28498 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28499
28500 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28501@@ -813,10 +815,10 @@ void __init mem_init(void)
28502 ((unsigned long)&__init_end -
28503 (unsigned long)&__init_begin) >> 10,
28504
28505- (unsigned long)&_etext, (unsigned long)&_edata,
28506- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28507+ (unsigned long)&_sdata, (unsigned long)&_edata,
28508+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28509
28510- (unsigned long)&_text, (unsigned long)&_etext,
28511+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28512 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28513
28514 /*
28515@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
28516 if (!kernel_set_to_readonly)
28517 return;
28518
28519+ start = ktla_ktva(start);
28520 pr_debug("Set kernel text: %lx - %lx for read write\n",
28521 start, start+size);
28522
28523@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
28524 if (!kernel_set_to_readonly)
28525 return;
28526
28527+ start = ktla_ktva(start);
28528 pr_debug("Set kernel text: %lx - %lx for read only\n",
28529 start, start+size);
28530
28531@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
28532 unsigned long start = PFN_ALIGN(_text);
28533 unsigned long size = PFN_ALIGN(_etext) - start;
28534
28535+ start = ktla_ktva(start);
28536 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28537 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28538 size >> 10);
28539diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28540index bb00c46..f31d2f0 100644
28541--- a/arch/x86/mm/init_64.c
28542+++ b/arch/x86/mm/init_64.c
28543@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28544 * around without checking the pgd every time.
28545 */
28546
28547-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28548+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28549 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28550
28551 int force_personality32;
28552@@ -184,12 +184,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28553
28554 for (address = start; address <= end; address += PGDIR_SIZE) {
28555 const pgd_t *pgd_ref = pgd_offset_k(address);
28556+
28557+#ifdef CONFIG_PAX_PER_CPU_PGD
28558+ unsigned long cpu;
28559+#else
28560 struct page *page;
28561+#endif
28562
28563 if (pgd_none(*pgd_ref))
28564 continue;
28565
28566 spin_lock(&pgd_lock);
28567+
28568+#ifdef CONFIG_PAX_PER_CPU_PGD
28569+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28570+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28571+#else
28572 list_for_each_entry(page, &pgd_list, lru) {
28573 pgd_t *pgd;
28574 spinlock_t *pgt_lock;
28575@@ -198,6 +208,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28576 /* the pgt_lock only for Xen */
28577 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28578 spin_lock(pgt_lock);
28579+#endif
28580
28581 if (pgd_none(*pgd))
28582 set_pgd(pgd, *pgd_ref);
28583@@ -205,7 +216,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28584 BUG_ON(pgd_page_vaddr(*pgd)
28585 != pgd_page_vaddr(*pgd_ref));
28586
28587+#ifndef CONFIG_PAX_PER_CPU_PGD
28588 spin_unlock(pgt_lock);
28589+#endif
28590+
28591 }
28592 spin_unlock(&pgd_lock);
28593 }
28594@@ -238,7 +252,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28595 {
28596 if (pgd_none(*pgd)) {
28597 pud_t *pud = (pud_t *)spp_getpage();
28598- pgd_populate(&init_mm, pgd, pud);
28599+ pgd_populate_kernel(&init_mm, pgd, pud);
28600 if (pud != pud_offset(pgd, 0))
28601 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28602 pud, pud_offset(pgd, 0));
28603@@ -250,7 +264,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28604 {
28605 if (pud_none(*pud)) {
28606 pmd_t *pmd = (pmd_t *) spp_getpage();
28607- pud_populate(&init_mm, pud, pmd);
28608+ pud_populate_kernel(&init_mm, pud, pmd);
28609 if (pmd != pmd_offset(pud, 0))
28610 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28611 pmd, pmd_offset(pud, 0));
28612@@ -279,7 +293,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28613 pmd = fill_pmd(pud, vaddr);
28614 pte = fill_pte(pmd, vaddr);
28615
28616+ pax_open_kernel();
28617 set_pte(pte, new_pte);
28618+ pax_close_kernel();
28619
28620 /*
28621 * It's enough to flush this one mapping.
28622@@ -338,14 +354,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28623 pgd = pgd_offset_k((unsigned long)__va(phys));
28624 if (pgd_none(*pgd)) {
28625 pud = (pud_t *) spp_getpage();
28626- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28627- _PAGE_USER));
28628+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28629 }
28630 pud = pud_offset(pgd, (unsigned long)__va(phys));
28631 if (pud_none(*pud)) {
28632 pmd = (pmd_t *) spp_getpage();
28633- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28634- _PAGE_USER));
28635+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28636 }
28637 pmd = pmd_offset(pud, phys);
28638 BUG_ON(!pmd_none(*pmd));
28639@@ -586,7 +600,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28640 prot);
28641
28642 spin_lock(&init_mm.page_table_lock);
28643- pud_populate(&init_mm, pud, pmd);
28644+ pud_populate_kernel(&init_mm, pud, pmd);
28645 spin_unlock(&init_mm.page_table_lock);
28646 }
28647 __flush_tlb_all();
28648@@ -627,7 +641,7 @@ kernel_physical_mapping_init(unsigned long start,
28649 page_size_mask);
28650
28651 spin_lock(&init_mm.page_table_lock);
28652- pgd_populate(&init_mm, pgd, pud);
28653+ pgd_populate_kernel(&init_mm, pgd, pud);
28654 spin_unlock(&init_mm.page_table_lock);
28655 pgd_changed = true;
28656 }
28657@@ -1221,8 +1235,8 @@ int kern_addr_valid(unsigned long addr)
28658 static struct vm_area_struct gate_vma = {
28659 .vm_start = VSYSCALL_START,
28660 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28661- .vm_page_prot = PAGE_READONLY_EXEC,
28662- .vm_flags = VM_READ | VM_EXEC
28663+ .vm_page_prot = PAGE_READONLY,
28664+ .vm_flags = VM_READ
28665 };
28666
28667 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28668@@ -1256,7 +1270,7 @@ int in_gate_area_no_mm(unsigned long addr)
28669
28670 const char *arch_vma_name(struct vm_area_struct *vma)
28671 {
28672- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28673+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28674 return "[vdso]";
28675 if (vma == &gate_vma)
28676 return "[vsyscall]";
28677diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28678index 7b179b4..6bd17777 100644
28679--- a/arch/x86/mm/iomap_32.c
28680+++ b/arch/x86/mm/iomap_32.c
28681@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28682 type = kmap_atomic_idx_push();
28683 idx = type + KM_TYPE_NR * smp_processor_id();
28684 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28685+
28686+ pax_open_kernel();
28687 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28688+ pax_close_kernel();
28689+
28690 arch_flush_lazy_mmu_mode();
28691
28692 return (void *)vaddr;
28693diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28694index 9a1e658..da003f3 100644
28695--- a/arch/x86/mm/ioremap.c
28696+++ b/arch/x86/mm/ioremap.c
28697@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28698 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28699 int is_ram = page_is_ram(pfn);
28700
28701- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28702+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28703 return NULL;
28704 WARN_ON_ONCE(is_ram);
28705 }
28706@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28707 *
28708 * Caller must ensure there is only one unmapping for the same pointer.
28709 */
28710-void iounmap(volatile void __iomem *addr)
28711+void iounmap(const volatile void __iomem *addr)
28712 {
28713 struct vm_struct *p, *o;
28714
28715@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28716
28717 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28718 if (page_is_ram(start >> PAGE_SHIFT))
28719+#ifdef CONFIG_HIGHMEM
28720+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28721+#endif
28722 return __va(phys);
28723
28724 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28725@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28726 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28727 {
28728 if (page_is_ram(phys >> PAGE_SHIFT))
28729+#ifdef CONFIG_HIGHMEM
28730+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
28731+#endif
28732 return;
28733
28734 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
28735@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
28736 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28737
28738 static __initdata int after_paging_init;
28739-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28740+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28741
28742 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28743 {
28744@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
28745 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28746
28747 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28748- memset(bm_pte, 0, sizeof(bm_pte));
28749- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28750+ pmd_populate_user(&init_mm, pmd, bm_pte);
28751
28752 /*
28753 * The boot-ioremap range spans multiple pmds, for which
28754diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28755index d87dd6d..bf3fa66 100644
28756--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28757+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28758@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28759 * memory (e.g. tracked pages)? For now, we need this to avoid
28760 * invoking kmemcheck for PnP BIOS calls.
28761 */
28762- if (regs->flags & X86_VM_MASK)
28763+ if (v8086_mode(regs))
28764 return false;
28765- if (regs->cs != __KERNEL_CS)
28766+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28767 return false;
28768
28769 pte = kmemcheck_pte_lookup(address);
28770diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28771index 845df68..1d8d29f 100644
28772--- a/arch/x86/mm/mmap.c
28773+++ b/arch/x86/mm/mmap.c
28774@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28775 * Leave an at least ~128 MB hole with possible stack randomization.
28776 */
28777 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28778-#define MAX_GAP (TASK_SIZE/6*5)
28779+#define MAX_GAP (pax_task_size/6*5)
28780
28781 static int mmap_is_legacy(void)
28782 {
28783@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28784 return rnd << PAGE_SHIFT;
28785 }
28786
28787-static unsigned long mmap_base(void)
28788+static unsigned long mmap_base(struct mm_struct *mm)
28789 {
28790 unsigned long gap = rlimit(RLIMIT_STACK);
28791+ unsigned long pax_task_size = TASK_SIZE;
28792+
28793+#ifdef CONFIG_PAX_SEGMEXEC
28794+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28795+ pax_task_size = SEGMEXEC_TASK_SIZE;
28796+#endif
28797
28798 if (gap < MIN_GAP)
28799 gap = MIN_GAP;
28800 else if (gap > MAX_GAP)
28801 gap = MAX_GAP;
28802
28803- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28804+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28805 }
28806
28807 /*
28808 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28809 * does, but not when emulating X86_32
28810 */
28811-static unsigned long mmap_legacy_base(void)
28812+static unsigned long mmap_legacy_base(struct mm_struct *mm)
28813 {
28814- if (mmap_is_ia32())
28815+ if (mmap_is_ia32()) {
28816+
28817+#ifdef CONFIG_PAX_SEGMEXEC
28818+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28819+ return SEGMEXEC_TASK_UNMAPPED_BASE;
28820+ else
28821+#endif
28822+
28823 return TASK_UNMAPPED_BASE;
28824- else
28825+ } else
28826 return TASK_UNMAPPED_BASE + mmap_rnd();
28827 }
28828
28829@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28830 void arch_pick_mmap_layout(struct mm_struct *mm)
28831 {
28832 if (mmap_is_legacy()) {
28833- mm->mmap_base = mmap_legacy_base();
28834+ mm->mmap_base = mmap_legacy_base(mm);
28835+
28836+#ifdef CONFIG_PAX_RANDMMAP
28837+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28838+ mm->mmap_base += mm->delta_mmap;
28839+#endif
28840+
28841 mm->get_unmapped_area = arch_get_unmapped_area;
28842 mm->unmap_area = arch_unmap_area;
28843 } else {
28844- mm->mmap_base = mmap_base();
28845+ mm->mmap_base = mmap_base(mm);
28846+
28847+#ifdef CONFIG_PAX_RANDMMAP
28848+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28849+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28850+#endif
28851+
28852 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28853 mm->unmap_area = arch_unmap_area_topdown;
28854 }
28855diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28856index dc0b727..f612039 100644
28857--- a/arch/x86/mm/mmio-mod.c
28858+++ b/arch/x86/mm/mmio-mod.c
28859@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28860 break;
28861 default:
28862 {
28863- unsigned char *ip = (unsigned char *)instptr;
28864+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28865 my_trace->opcode = MMIO_UNKNOWN_OP;
28866 my_trace->width = 0;
28867 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28868@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28869 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28870 void __iomem *addr)
28871 {
28872- static atomic_t next_id;
28873+ static atomic_unchecked_t next_id;
28874 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28875 /* These are page-unaligned. */
28876 struct mmiotrace_map map = {
28877@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28878 .private = trace
28879 },
28880 .phys = offset,
28881- .id = atomic_inc_return(&next_id)
28882+ .id = atomic_inc_return_unchecked(&next_id)
28883 };
28884 map.map_id = trace->id;
28885
28886@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28887 ioremap_trace_core(offset, size, addr);
28888 }
28889
28890-static void iounmap_trace_core(volatile void __iomem *addr)
28891+static void iounmap_trace_core(const volatile void __iomem *addr)
28892 {
28893 struct mmiotrace_map map = {
28894 .phys = 0,
28895@@ -328,7 +328,7 @@ not_enabled:
28896 }
28897 }
28898
28899-void mmiotrace_iounmap(volatile void __iomem *addr)
28900+void mmiotrace_iounmap(const volatile void __iomem *addr)
28901 {
28902 might_sleep();
28903 if (is_enabled()) /* recheck and proper locking in *_core() */
28904diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
28905index a71c4e2..301ae44 100644
28906--- a/arch/x86/mm/numa.c
28907+++ b/arch/x86/mm/numa.c
28908@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
28909 return true;
28910 }
28911
28912-static int __init numa_register_memblks(struct numa_meminfo *mi)
28913+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
28914 {
28915 unsigned long uninitialized_var(pfn_align);
28916 int i, nid;
28917diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28918index d0b1773..4c3327c 100644
28919--- a/arch/x86/mm/pageattr-test.c
28920+++ b/arch/x86/mm/pageattr-test.c
28921@@ -36,7 +36,7 @@ enum {
28922
28923 static int pte_testbit(pte_t pte)
28924 {
28925- return pte_flags(pte) & _PAGE_UNUSED1;
28926+ return pte_flags(pte) & _PAGE_CPA_TEST;
28927 }
28928
28929 struct split_state {
28930diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28931index bb32480..aef8278 100644
28932--- a/arch/x86/mm/pageattr.c
28933+++ b/arch/x86/mm/pageattr.c
28934@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28935 */
28936 #ifdef CONFIG_PCI_BIOS
28937 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28938- pgprot_val(forbidden) |= _PAGE_NX;
28939+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28940 #endif
28941
28942 /*
28943@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28944 * Does not cover __inittext since that is gone later on. On
28945 * 64bit we do not enforce !NX on the low mapping
28946 */
28947- if (within(address, (unsigned long)_text, (unsigned long)_etext))
28948- pgprot_val(forbidden) |= _PAGE_NX;
28949+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28950+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28951
28952+#ifdef CONFIG_DEBUG_RODATA
28953 /*
28954 * The .rodata section needs to be read-only. Using the pfn
28955 * catches all aliases.
28956@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28957 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
28958 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
28959 pgprot_val(forbidden) |= _PAGE_RW;
28960+#endif
28961
28962 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28963 /*
28964@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28965 }
28966 #endif
28967
28968+#ifdef CONFIG_PAX_KERNEXEC
28969+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28970+ pgprot_val(forbidden) |= _PAGE_RW;
28971+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28972+ }
28973+#endif
28974+
28975 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28976
28977 return prot;
28978@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
28979 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28980 {
28981 /* change init_mm */
28982+ pax_open_kernel();
28983 set_pte_atomic(kpte, pte);
28984+
28985 #ifdef CONFIG_X86_32
28986 if (!SHARED_KERNEL_PMD) {
28987+
28988+#ifdef CONFIG_PAX_PER_CPU_PGD
28989+ unsigned long cpu;
28990+#else
28991 struct page *page;
28992+#endif
28993
28994+#ifdef CONFIG_PAX_PER_CPU_PGD
28995+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28996+ pgd_t *pgd = get_cpu_pgd(cpu);
28997+#else
28998 list_for_each_entry(page, &pgd_list, lru) {
28999- pgd_t *pgd;
29000+ pgd_t *pgd = (pgd_t *)page_address(page);
29001+#endif
29002+
29003 pud_t *pud;
29004 pmd_t *pmd;
29005
29006- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29007+ pgd += pgd_index(address);
29008 pud = pud_offset(pgd, address);
29009 pmd = pmd_offset(pud, address);
29010 set_pte_atomic((pte_t *)pmd, pte);
29011 }
29012 }
29013 #endif
29014+ pax_close_kernel();
29015 }
29016
29017 static int
29018diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29019index 6574388..87e9bef 100644
29020--- a/arch/x86/mm/pat.c
29021+++ b/arch/x86/mm/pat.c
29022@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29023
29024 if (!entry) {
29025 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29026- current->comm, current->pid, start, end - 1);
29027+ current->comm, task_pid_nr(current), start, end - 1);
29028 return -EINVAL;
29029 }
29030
29031@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29032
29033 while (cursor < to) {
29034 if (!devmem_is_allowed(pfn)) {
29035- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29036- current->comm, from, to - 1);
29037+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29038+ current->comm, from, to - 1, cursor);
29039 return 0;
29040 }
29041 cursor += PAGE_SIZE;
29042@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29043 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29044 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29045 "for [mem %#010Lx-%#010Lx]\n",
29046- current->comm, current->pid,
29047+ current->comm, task_pid_nr(current),
29048 cattr_name(flags),
29049 base, (unsigned long long)(base + size-1));
29050 return -EINVAL;
29051@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29052 flags = lookup_memtype(paddr);
29053 if (want_flags != flags) {
29054 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29055- current->comm, current->pid,
29056+ current->comm, task_pid_nr(current),
29057 cattr_name(want_flags),
29058 (unsigned long long)paddr,
29059 (unsigned long long)(paddr + size - 1),
29060@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29061 free_memtype(paddr, paddr + size);
29062 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29063 " for [mem %#010Lx-%#010Lx], got %s\n",
29064- current->comm, current->pid,
29065+ current->comm, task_pid_nr(current),
29066 cattr_name(want_flags),
29067 (unsigned long long)paddr,
29068 (unsigned long long)(paddr + size - 1),
29069diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
29070index 415f6c4..d319983 100644
29071--- a/arch/x86/mm/pat_rbtree.c
29072+++ b/arch/x86/mm/pat_rbtree.c
29073@@ -160,7 +160,7 @@ success:
29074
29075 failure:
29076 printk(KERN_INFO "%s:%d conflicting memory types "
29077- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
29078+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
29079 end, cattr_name(found_type), cattr_name(match->type));
29080 return -EBUSY;
29081 }
29082diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29083index 9f0614d..92ae64a 100644
29084--- a/arch/x86/mm/pf_in.c
29085+++ b/arch/x86/mm/pf_in.c
29086@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29087 int i;
29088 enum reason_type rv = OTHERS;
29089
29090- p = (unsigned char *)ins_addr;
29091+ p = (unsigned char *)ktla_ktva(ins_addr);
29092 p += skip_prefix(p, &prf);
29093 p += get_opcode(p, &opcode);
29094
29095@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29096 struct prefix_bits prf;
29097 int i;
29098
29099- p = (unsigned char *)ins_addr;
29100+ p = (unsigned char *)ktla_ktva(ins_addr);
29101 p += skip_prefix(p, &prf);
29102 p += get_opcode(p, &opcode);
29103
29104@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29105 struct prefix_bits prf;
29106 int i;
29107
29108- p = (unsigned char *)ins_addr;
29109+ p = (unsigned char *)ktla_ktva(ins_addr);
29110 p += skip_prefix(p, &prf);
29111 p += get_opcode(p, &opcode);
29112
29113@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29114 struct prefix_bits prf;
29115 int i;
29116
29117- p = (unsigned char *)ins_addr;
29118+ p = (unsigned char *)ktla_ktva(ins_addr);
29119 p += skip_prefix(p, &prf);
29120 p += get_opcode(p, &opcode);
29121 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29122@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29123 struct prefix_bits prf;
29124 int i;
29125
29126- p = (unsigned char *)ins_addr;
29127+ p = (unsigned char *)ktla_ktva(ins_addr);
29128 p += skip_prefix(p, &prf);
29129 p += get_opcode(p, &opcode);
29130 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29131diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29132index 17fda6a..489c74a 100644
29133--- a/arch/x86/mm/pgtable.c
29134+++ b/arch/x86/mm/pgtable.c
29135@@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29136 list_del(&page->lru);
29137 }
29138
29139-#define UNSHARED_PTRS_PER_PGD \
29140- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29141+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29142+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29143
29144+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29145+{
29146+ unsigned int count = USER_PGD_PTRS;
29147
29148+ while (count--)
29149+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29150+}
29151+#endif
29152+
29153+#ifdef CONFIG_PAX_PER_CPU_PGD
29154+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29155+{
29156+ unsigned int count = USER_PGD_PTRS;
29157+
29158+ while (count--) {
29159+ pgd_t pgd;
29160+
29161+#ifdef CONFIG_X86_64
29162+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29163+#else
29164+ pgd = *src++;
29165+#endif
29166+
29167+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29168+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29169+#endif
29170+
29171+ *dst++ = pgd;
29172+ }
29173+
29174+}
29175+#endif
29176+
29177+#ifdef CONFIG_X86_64
29178+#define pxd_t pud_t
29179+#define pyd_t pgd_t
29180+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29181+#define pxd_free(mm, pud) pud_free((mm), (pud))
29182+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29183+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29184+#define PYD_SIZE PGDIR_SIZE
29185+#else
29186+#define pxd_t pmd_t
29187+#define pyd_t pud_t
29188+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29189+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29190+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29191+#define pyd_offset(mm, address) pud_offset((mm), (address))
29192+#define PYD_SIZE PUD_SIZE
29193+#endif
29194+
29195+#ifdef CONFIG_PAX_PER_CPU_PGD
29196+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29197+static inline void pgd_dtor(pgd_t *pgd) {}
29198+#else
29199 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29200 {
29201 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29202@@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
29203 pgd_list_del(pgd);
29204 spin_unlock(&pgd_lock);
29205 }
29206+#endif
29207
29208 /*
29209 * List of all pgd's needed for non-PAE so it can invalidate entries
29210@@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
29211 * -- nyc
29212 */
29213
29214-#ifdef CONFIG_X86_PAE
29215+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29216 /*
29217 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29218 * updating the top-level pagetable entries to guarantee the
29219@@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
29220 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29221 * and initialize the kernel pmds here.
29222 */
29223-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29224+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29225
29226 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29227 {
29228@@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29229 */
29230 flush_tlb_mm(mm);
29231 }
29232+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29233+#define PREALLOCATED_PXDS USER_PGD_PTRS
29234 #else /* !CONFIG_X86_PAE */
29235
29236 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29237-#define PREALLOCATED_PMDS 0
29238+#define PREALLOCATED_PXDS 0
29239
29240 #endif /* CONFIG_X86_PAE */
29241
29242-static void free_pmds(pmd_t *pmds[])
29243+static void free_pxds(pxd_t *pxds[])
29244 {
29245 int i;
29246
29247- for(i = 0; i < PREALLOCATED_PMDS; i++)
29248- if (pmds[i])
29249- free_page((unsigned long)pmds[i]);
29250+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29251+ if (pxds[i])
29252+ free_page((unsigned long)pxds[i]);
29253 }
29254
29255-static int preallocate_pmds(pmd_t *pmds[])
29256+static int preallocate_pxds(pxd_t *pxds[])
29257 {
29258 int i;
29259 bool failed = false;
29260
29261- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29262- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29263- if (pmd == NULL)
29264+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29265+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29266+ if (pxd == NULL)
29267 failed = true;
29268- pmds[i] = pmd;
29269+ pxds[i] = pxd;
29270 }
29271
29272 if (failed) {
29273- free_pmds(pmds);
29274+ free_pxds(pxds);
29275 return -ENOMEM;
29276 }
29277
29278@@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29279 * preallocate which never got a corresponding vma will need to be
29280 * freed manually.
29281 */
29282-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29283+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29284 {
29285 int i;
29286
29287- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29288+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29289 pgd_t pgd = pgdp[i];
29290
29291 if (pgd_val(pgd) != 0) {
29292- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29293+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29294
29295- pgdp[i] = native_make_pgd(0);
29296+ set_pgd(pgdp + i, native_make_pgd(0));
29297
29298- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29299- pmd_free(mm, pmd);
29300+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29301+ pxd_free(mm, pxd);
29302 }
29303 }
29304 }
29305
29306-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29307+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29308 {
29309- pud_t *pud;
29310+ pyd_t *pyd;
29311 unsigned long addr;
29312 int i;
29313
29314- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29315+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29316 return;
29317
29318- pud = pud_offset(pgd, 0);
29319+#ifdef CONFIG_X86_64
29320+ pyd = pyd_offset(mm, 0L);
29321+#else
29322+ pyd = pyd_offset(pgd, 0L);
29323+#endif
29324
29325- for (addr = i = 0; i < PREALLOCATED_PMDS;
29326- i++, pud++, addr += PUD_SIZE) {
29327- pmd_t *pmd = pmds[i];
29328+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29329+ i++, pyd++, addr += PYD_SIZE) {
29330+ pxd_t *pxd = pxds[i];
29331
29332 if (i >= KERNEL_PGD_BOUNDARY)
29333- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29334- sizeof(pmd_t) * PTRS_PER_PMD);
29335+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29336+ sizeof(pxd_t) * PTRS_PER_PMD);
29337
29338- pud_populate(mm, pud, pmd);
29339+ pyd_populate(mm, pyd, pxd);
29340 }
29341 }
29342
29343 pgd_t *pgd_alloc(struct mm_struct *mm)
29344 {
29345 pgd_t *pgd;
29346- pmd_t *pmds[PREALLOCATED_PMDS];
29347+ pxd_t *pxds[PREALLOCATED_PXDS];
29348
29349 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29350
29351@@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29352
29353 mm->pgd = pgd;
29354
29355- if (preallocate_pmds(pmds) != 0)
29356+ if (preallocate_pxds(pxds) != 0)
29357 goto out_free_pgd;
29358
29359 if (paravirt_pgd_alloc(mm) != 0)
29360- goto out_free_pmds;
29361+ goto out_free_pxds;
29362
29363 /*
29364 * Make sure that pre-populating the pmds is atomic with
29365@@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29366 spin_lock(&pgd_lock);
29367
29368 pgd_ctor(mm, pgd);
29369- pgd_prepopulate_pmd(mm, pgd, pmds);
29370+ pgd_prepopulate_pxd(mm, pgd, pxds);
29371
29372 spin_unlock(&pgd_lock);
29373
29374 return pgd;
29375
29376-out_free_pmds:
29377- free_pmds(pmds);
29378+out_free_pxds:
29379+ free_pxds(pxds);
29380 out_free_pgd:
29381 free_page((unsigned long)pgd);
29382 out:
29383@@ -302,7 +363,7 @@ out:
29384
29385 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29386 {
29387- pgd_mop_up_pmds(mm, pgd);
29388+ pgd_mop_up_pxds(mm, pgd);
29389 pgd_dtor(pgd);
29390 paravirt_pgd_free(mm, pgd);
29391 free_page((unsigned long)pgd);
29392diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29393index a69bcb8..19068ab 100644
29394--- a/arch/x86/mm/pgtable_32.c
29395+++ b/arch/x86/mm/pgtable_32.c
29396@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29397 return;
29398 }
29399 pte = pte_offset_kernel(pmd, vaddr);
29400+
29401+ pax_open_kernel();
29402 if (pte_val(pteval))
29403 set_pte_at(&init_mm, vaddr, pte, pteval);
29404 else
29405 pte_clear(&init_mm, vaddr, pte);
29406+ pax_close_kernel();
29407
29408 /*
29409 * It's enough to flush this one mapping.
29410diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29411index e666cbb..61788c45 100644
29412--- a/arch/x86/mm/physaddr.c
29413+++ b/arch/x86/mm/physaddr.c
29414@@ -10,7 +10,7 @@
29415 #ifdef CONFIG_X86_64
29416
29417 #ifdef CONFIG_DEBUG_VIRTUAL
29418-unsigned long __phys_addr(unsigned long x)
29419+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29420 {
29421 unsigned long y = x - __START_KERNEL_map;
29422
29423@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29424 #else
29425
29426 #ifdef CONFIG_DEBUG_VIRTUAL
29427-unsigned long __phys_addr(unsigned long x)
29428+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29429 {
29430 unsigned long phys_addr = x - PAGE_OFFSET;
29431 /* VMALLOC_* aren't constants */
29432diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29433index 410531d..0f16030 100644
29434--- a/arch/x86/mm/setup_nx.c
29435+++ b/arch/x86/mm/setup_nx.c
29436@@ -5,8 +5,10 @@
29437 #include <asm/pgtable.h>
29438 #include <asm/proto.h>
29439
29440+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29441 static int disable_nx __cpuinitdata;
29442
29443+#ifndef CONFIG_PAX_PAGEEXEC
29444 /*
29445 * noexec = on|off
29446 *
29447@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29448 return 0;
29449 }
29450 early_param("noexec", noexec_setup);
29451+#endif
29452+
29453+#endif
29454
29455 void __cpuinit x86_configure_nx(void)
29456 {
29457+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29458 if (cpu_has_nx && !disable_nx)
29459 __supported_pte_mask |= _PAGE_NX;
29460 else
29461+#endif
29462 __supported_pte_mask &= ~_PAGE_NX;
29463 }
29464
29465diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29466index 282375f..e03a98f 100644
29467--- a/arch/x86/mm/tlb.c
29468+++ b/arch/x86/mm/tlb.c
29469@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29470 BUG();
29471 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29472 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29473+
29474+#ifndef CONFIG_PAX_PER_CPU_PGD
29475 load_cr3(swapper_pg_dir);
29476+#endif
29477+
29478 }
29479 }
29480 EXPORT_SYMBOL_GPL(leave_mm);
29481diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29482index 877b9a1..a8ecf42 100644
29483--- a/arch/x86/net/bpf_jit.S
29484+++ b/arch/x86/net/bpf_jit.S
29485@@ -9,6 +9,7 @@
29486 */
29487 #include <linux/linkage.h>
29488 #include <asm/dwarf2.h>
29489+#include <asm/alternative-asm.h>
29490
29491 /*
29492 * Calling convention :
29493@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29494 jle bpf_slow_path_word
29495 mov (SKBDATA,%rsi),%eax
29496 bswap %eax /* ntohl() */
29497+ pax_force_retaddr
29498 ret
29499
29500 sk_load_half:
29501@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29502 jle bpf_slow_path_half
29503 movzwl (SKBDATA,%rsi),%eax
29504 rol $8,%ax # ntohs()
29505+ pax_force_retaddr
29506 ret
29507
29508 sk_load_byte:
29509@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29510 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29511 jle bpf_slow_path_byte
29512 movzbl (SKBDATA,%rsi),%eax
29513+ pax_force_retaddr
29514 ret
29515
29516 /**
29517@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29518 movzbl (SKBDATA,%rsi),%ebx
29519 and $15,%bl
29520 shl $2,%bl
29521+ pax_force_retaddr
29522 ret
29523
29524 /* rsi contains offset and can be scratched */
29525@@ -109,6 +114,7 @@ bpf_slow_path_word:
29526 js bpf_error
29527 mov -12(%rbp),%eax
29528 bswap %eax
29529+ pax_force_retaddr
29530 ret
29531
29532 bpf_slow_path_half:
29533@@ -117,12 +123,14 @@ bpf_slow_path_half:
29534 mov -12(%rbp),%ax
29535 rol $8,%ax
29536 movzwl %ax,%eax
29537+ pax_force_retaddr
29538 ret
29539
29540 bpf_slow_path_byte:
29541 bpf_slow_path_common(1)
29542 js bpf_error
29543 movzbl -12(%rbp),%eax
29544+ pax_force_retaddr
29545 ret
29546
29547 bpf_slow_path_byte_msh:
29548@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29549 and $15,%al
29550 shl $2,%al
29551 xchg %eax,%ebx
29552+ pax_force_retaddr
29553 ret
29554
29555 #define sk_negative_common(SIZE) \
29556@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29557 sk_negative_common(4)
29558 mov (%rax), %eax
29559 bswap %eax
29560+ pax_force_retaddr
29561 ret
29562
29563 bpf_slow_path_half_neg:
29564@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29565 mov (%rax),%ax
29566 rol $8,%ax
29567 movzwl %ax,%eax
29568+ pax_force_retaddr
29569 ret
29570
29571 bpf_slow_path_byte_neg:
29572@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29573 .globl sk_load_byte_negative_offset
29574 sk_negative_common(1)
29575 movzbl (%rax), %eax
29576+ pax_force_retaddr
29577 ret
29578
29579 bpf_slow_path_byte_msh_neg:
29580@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29581 and $15,%al
29582 shl $2,%al
29583 xchg %eax,%ebx
29584+ pax_force_retaddr
29585 ret
29586
29587 bpf_error:
29588@@ -197,4 +210,5 @@ bpf_error:
29589 xor %eax,%eax
29590 mov -8(%rbp),%rbx
29591 leaveq
29592+ pax_force_retaddr
29593 ret
29594diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29595index f66b540..3e88dfb 100644
29596--- a/arch/x86/net/bpf_jit_comp.c
29597+++ b/arch/x86/net/bpf_jit_comp.c
29598@@ -12,6 +12,7 @@
29599 #include <linux/netdevice.h>
29600 #include <linux/filter.h>
29601 #include <linux/if_vlan.h>
29602+#include <linux/random.h>
29603
29604 /*
29605 * Conventions :
29606@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29607 return ptr + len;
29608 }
29609
29610+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29611+#define MAX_INSTR_CODE_SIZE 96
29612+#else
29613+#define MAX_INSTR_CODE_SIZE 64
29614+#endif
29615+
29616 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29617
29618 #define EMIT1(b1) EMIT(b1, 1)
29619 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29620 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29621 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29622+
29623+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29624+/* original constant will appear in ecx */
29625+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29626+do { \
29627+ /* mov ecx, randkey */ \
29628+ EMIT1(0xb9); \
29629+ EMIT(_key, 4); \
29630+ /* xor ecx, randkey ^ off */ \
29631+ EMIT2(0x81, 0xf1); \
29632+ EMIT((_key) ^ (_off), 4); \
29633+} while (0)
29634+
29635+#define EMIT1_off32(b1, _off) \
29636+do { \
29637+ switch (b1) { \
29638+ case 0x05: /* add eax, imm32 */ \
29639+ case 0x2d: /* sub eax, imm32 */ \
29640+ case 0x25: /* and eax, imm32 */ \
29641+ case 0x0d: /* or eax, imm32 */ \
29642+ case 0xb8: /* mov eax, imm32 */ \
29643+ case 0x35: /* xor eax, imm32 */ \
29644+ case 0x3d: /* cmp eax, imm32 */ \
29645+ case 0xa9: /* test eax, imm32 */ \
29646+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29647+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29648+ break; \
29649+ case 0xbb: /* mov ebx, imm32 */ \
29650+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29651+ /* mov ebx, ecx */ \
29652+ EMIT2(0x89, 0xcb); \
29653+ break; \
29654+ case 0xbe: /* mov esi, imm32 */ \
29655+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29656+ /* mov esi, ecx */ \
29657+ EMIT2(0x89, 0xce); \
29658+ break; \
29659+ case 0xe8: /* call rel imm32, always to known funcs */ \
29660+ EMIT1(b1); \
29661+ EMIT(_off, 4); \
29662+ break; \
29663+ case 0xe9: /* jmp rel imm32 */ \
29664+ EMIT1(b1); \
29665+ EMIT(_off, 4); \
29666+ /* prevent fall-through, we're not called if off = 0 */ \
29667+ EMIT(0xcccccccc, 4); \
29668+ EMIT(0xcccccccc, 4); \
29669+ break; \
29670+ default: \
29671+ BUILD_BUG(); \
29672+ } \
29673+} while (0)
29674+
29675+#define EMIT2_off32(b1, b2, _off) \
29676+do { \
29677+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29678+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29679+ EMIT(randkey, 4); \
29680+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29681+ EMIT((_off) - randkey, 4); \
29682+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29683+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29684+ /* imul eax, ecx */ \
29685+ EMIT3(0x0f, 0xaf, 0xc1); \
29686+ } else { \
29687+ BUILD_BUG(); \
29688+ } \
29689+} while (0)
29690+#else
29691 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29692+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29693+#endif
29694
29695 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29696 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29697@@ -90,6 +168,24 @@ do { \
29698 #define X86_JBE 0x76
29699 #define X86_JA 0x77
29700
29701+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29702+#define APPEND_FLOW_VERIFY() \
29703+do { \
29704+ /* mov ecx, randkey */ \
29705+ EMIT1(0xb9); \
29706+ EMIT(randkey, 4); \
29707+ /* cmp ecx, randkey */ \
29708+ EMIT2(0x81, 0xf9); \
29709+ EMIT(randkey, 4); \
29710+ /* jz after 8 int 3s */ \
29711+ EMIT2(0x74, 0x08); \
29712+ EMIT(0xcccccccc, 4); \
29713+ EMIT(0xcccccccc, 4); \
29714+} while (0)
29715+#else
29716+#define APPEND_FLOW_VERIFY() do { } while (0)
29717+#endif
29718+
29719 #define EMIT_COND_JMP(op, offset) \
29720 do { \
29721 if (is_near(offset)) \
29722@@ -97,6 +193,7 @@ do { \
29723 else { \
29724 EMIT2(0x0f, op + 0x10); \
29725 EMIT(offset, 4); /* jxx .+off32 */ \
29726+ APPEND_FLOW_VERIFY(); \
29727 } \
29728 } while (0)
29729
29730@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
29731 set_fs(old_fs);
29732 }
29733
29734+struct bpf_jit_work {
29735+ struct work_struct work;
29736+ void *image;
29737+};
29738+
29739 #define CHOOSE_LOAD_FUNC(K, func) \
29740 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29741
29742@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
29743
29744 void bpf_jit_compile(struct sk_filter *fp)
29745 {
29746- u8 temp[64];
29747+ u8 temp[MAX_INSTR_CODE_SIZE];
29748 u8 *prog;
29749 unsigned int proglen, oldproglen = 0;
29750 int ilen, i;
29751@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29752 unsigned int *addrs;
29753 const struct sock_filter *filter = fp->insns;
29754 int flen = fp->len;
29755+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29756+ unsigned int randkey;
29757+#endif
29758
29759 if (!bpf_jit_enable)
29760 return;
29761@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29762 if (addrs == NULL)
29763 return;
29764
29765+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29766+ if (!fp->work)
29767+ goto out;
29768+
29769+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29770+ randkey = get_random_int();
29771+#endif
29772+
29773 /* Before first pass, make a rough estimation of addrs[]
29774- * each bpf instruction is translated to less than 64 bytes
29775+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29776 */
29777 for (proglen = 0, i = 0; i < flen; i++) {
29778- proglen += 64;
29779+ proglen += MAX_INSTR_CODE_SIZE;
29780 addrs[i] = proglen;
29781 }
29782 cleanup_addr = proglen; /* epilogue address */
29783@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29784 case BPF_S_ALU_MUL_K: /* A *= K */
29785 if (is_imm8(K))
29786 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29787- else {
29788- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29789- EMIT(K, 4);
29790- }
29791+ else
29792+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29793 break;
29794 case BPF_S_ALU_DIV_X: /* A /= X; */
29795 seen |= SEEN_XREG;
29796@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29797 break;
29798 case BPF_S_ALU_MOD_K: /* A %= K; */
29799 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29800+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29801+ DILUTE_CONST_SEQUENCE(K, randkey);
29802+#else
29803 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29804+#endif
29805 EMIT2(0xf7, 0xf1); /* div %ecx */
29806 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29807 break;
29808 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29809+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29810+ DILUTE_CONST_SEQUENCE(K, randkey);
29811+ // imul rax, rcx
29812+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29813+#else
29814 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29815 EMIT(K, 4);
29816+#endif
29817 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29818 break;
29819 case BPF_S_ALU_AND_X:
29820@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29821 if (is_imm8(K)) {
29822 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29823 } else {
29824- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29825- EMIT(K, 4);
29826+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29827 }
29828 } else {
29829 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29830@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29831 break;
29832 default:
29833 /* hmm, too complex filter, give up with jit compiler */
29834- goto out;
29835+ goto error;
29836 }
29837 ilen = prog - temp;
29838 if (image) {
29839 if (unlikely(proglen + ilen > oldproglen)) {
29840 pr_err("bpb_jit_compile fatal error\n");
29841- kfree(addrs);
29842- module_free(NULL, image);
29843- return;
29844+ module_free_exec(NULL, image);
29845+ goto error;
29846 }
29847+ pax_open_kernel();
29848 memcpy(image + proglen, temp, ilen);
29849+ pax_close_kernel();
29850 }
29851 proglen += ilen;
29852 addrs[i] = proglen;
29853@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29854 break;
29855 }
29856 if (proglen == oldproglen) {
29857- image = module_alloc(max_t(unsigned int,
29858- proglen,
29859- sizeof(struct work_struct)));
29860+ image = module_alloc_exec(proglen);
29861 if (!image)
29862- goto out;
29863+ goto error;
29864 }
29865 oldproglen = proglen;
29866 }
29867@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29868 if (image) {
29869 bpf_flush_icache(image, image + proglen);
29870 fp->bpf_func = (void *)image;
29871- }
29872+ } else
29873+error:
29874+ kfree(fp->work);
29875+
29876 out:
29877 kfree(addrs);
29878 return;
29879@@ -740,18 +862,20 @@ out:
29880
29881 static void jit_free_defer(struct work_struct *arg)
29882 {
29883- module_free(NULL, arg);
29884+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29885+ kfree(arg);
29886 }
29887
29888 /* run from softirq, we must use a work_struct to call
29889- * module_free() from process context
29890+ * module_free_exec() from process context
29891 */
29892 void bpf_jit_free(struct sk_filter *fp)
29893 {
29894 if (fp->bpf_func != sk_run_filter) {
29895- struct work_struct *work = (struct work_struct *)fp->bpf_func;
29896+ struct work_struct *work = &fp->work->work;
29897
29898 INIT_WORK(work, jit_free_defer);
29899+ fp->work->image = fp->bpf_func;
29900 schedule_work(work);
29901 }
29902 }
29903diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
29904index d6aa6e8..266395a 100644
29905--- a/arch/x86/oprofile/backtrace.c
29906+++ b/arch/x86/oprofile/backtrace.c
29907@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
29908 struct stack_frame_ia32 *fp;
29909 unsigned long bytes;
29910
29911- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29912+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29913 if (bytes != sizeof(bufhead))
29914 return NULL;
29915
29916- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
29917+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
29918
29919 oprofile_add_trace(bufhead[0].return_address);
29920
29921@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29922 struct stack_frame bufhead[2];
29923 unsigned long bytes;
29924
29925- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29926+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29927 if (bytes != sizeof(bufhead))
29928 return NULL;
29929
29930@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29931 {
29932 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29933
29934- if (!user_mode_vm(regs)) {
29935+ if (!user_mode(regs)) {
29936 unsigned long stack = kernel_stack_pointer(regs);
29937 if (depth)
29938 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29939diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
29940index 48768df..ba9143c 100644
29941--- a/arch/x86/oprofile/nmi_int.c
29942+++ b/arch/x86/oprofile/nmi_int.c
29943@@ -23,6 +23,7 @@
29944 #include <asm/nmi.h>
29945 #include <asm/msr.h>
29946 #include <asm/apic.h>
29947+#include <asm/pgtable.h>
29948
29949 #include "op_counter.h"
29950 #include "op_x86_model.h"
29951@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
29952 if (ret)
29953 return ret;
29954
29955- if (!model->num_virt_counters)
29956- model->num_virt_counters = model->num_counters;
29957+ if (!model->num_virt_counters) {
29958+ pax_open_kernel();
29959+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
29960+ pax_close_kernel();
29961+ }
29962
29963 mux_init(ops);
29964
29965diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
29966index b2b9443..be58856 100644
29967--- a/arch/x86/oprofile/op_model_amd.c
29968+++ b/arch/x86/oprofile/op_model_amd.c
29969@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
29970 num_counters = AMD64_NUM_COUNTERS;
29971 }
29972
29973- op_amd_spec.num_counters = num_counters;
29974- op_amd_spec.num_controls = num_counters;
29975- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29976+ pax_open_kernel();
29977+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
29978+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
29979+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29980+ pax_close_kernel();
29981
29982 return 0;
29983 }
29984diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
29985index d90528e..0127e2b 100644
29986--- a/arch/x86/oprofile/op_model_ppro.c
29987+++ b/arch/x86/oprofile/op_model_ppro.c
29988@@ -19,6 +19,7 @@
29989 #include <asm/msr.h>
29990 #include <asm/apic.h>
29991 #include <asm/nmi.h>
29992+#include <asm/pgtable.h>
29993
29994 #include "op_x86_model.h"
29995 #include "op_counter.h"
29996@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
29997
29998 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
29999
30000- op_arch_perfmon_spec.num_counters = num_counters;
30001- op_arch_perfmon_spec.num_controls = num_counters;
30002+ pax_open_kernel();
30003+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30004+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30005+ pax_close_kernel();
30006 }
30007
30008 static int arch_perfmon_init(struct oprofile_operations *ignore)
30009diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30010index 71e8a67..6a313bb 100644
30011--- a/arch/x86/oprofile/op_x86_model.h
30012+++ b/arch/x86/oprofile/op_x86_model.h
30013@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30014 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30015 struct op_msrs const * const msrs);
30016 #endif
30017-};
30018+} __do_const;
30019
30020 struct op_counter_config;
30021
30022diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30023index e9e6ed5..e47ae67 100644
30024--- a/arch/x86/pci/amd_bus.c
30025+++ b/arch/x86/pci/amd_bus.c
30026@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30027 return NOTIFY_OK;
30028 }
30029
30030-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30031+static struct notifier_block amd_cpu_notifier = {
30032 .notifier_call = amd_cpu_notify,
30033 };
30034
30035diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30036index 372e9b8..e775a6c 100644
30037--- a/arch/x86/pci/irq.c
30038+++ b/arch/x86/pci/irq.c
30039@@ -50,7 +50,7 @@ struct irq_router {
30040 struct irq_router_handler {
30041 u16 vendor;
30042 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30043-};
30044+} __do_const;
30045
30046 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30047 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30048@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30049 return 0;
30050 }
30051
30052-static __initdata struct irq_router_handler pirq_routers[] = {
30053+static __initconst const struct irq_router_handler pirq_routers[] = {
30054 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30055 { PCI_VENDOR_ID_AL, ali_router_probe },
30056 { PCI_VENDOR_ID_ITE, ite_router_probe },
30057@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30058 static void __init pirq_find_router(struct irq_router *r)
30059 {
30060 struct irq_routing_table *rt = pirq_table;
30061- struct irq_router_handler *h;
30062+ const struct irq_router_handler *h;
30063
30064 #ifdef CONFIG_PCI_BIOS
30065 if (!rt->signature) {
30066@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30067 return 0;
30068 }
30069
30070-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30071+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30072 {
30073 .callback = fix_broken_hp_bios_irq9,
30074 .ident = "HP Pavilion N5400 Series Laptop",
30075diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30076index 6eb18c4..20d83de 100644
30077--- a/arch/x86/pci/mrst.c
30078+++ b/arch/x86/pci/mrst.c
30079@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30080 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30081 pci_mmcfg_late_init();
30082 pcibios_enable_irq = mrst_pci_irq_enable;
30083- pci_root_ops = pci_mrst_ops;
30084+ pax_open_kernel();
30085+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30086+ pax_close_kernel();
30087 pci_soc_mode = 1;
30088 /* Continue with standard init */
30089 return 1;
30090diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30091index c77b24a..c979855 100644
30092--- a/arch/x86/pci/pcbios.c
30093+++ b/arch/x86/pci/pcbios.c
30094@@ -79,7 +79,7 @@ union bios32 {
30095 static struct {
30096 unsigned long address;
30097 unsigned short segment;
30098-} bios32_indirect = { 0, __KERNEL_CS };
30099+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30100
30101 /*
30102 * Returns the entry point for the given service, NULL on error
30103@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30104 unsigned long length; /* %ecx */
30105 unsigned long entry; /* %edx */
30106 unsigned long flags;
30107+ struct desc_struct d, *gdt;
30108
30109 local_irq_save(flags);
30110- __asm__("lcall *(%%edi); cld"
30111+
30112+ gdt = get_cpu_gdt_table(smp_processor_id());
30113+
30114+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30115+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30116+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30117+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30118+
30119+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30120 : "=a" (return_code),
30121 "=b" (address),
30122 "=c" (length),
30123 "=d" (entry)
30124 : "0" (service),
30125 "1" (0),
30126- "D" (&bios32_indirect));
30127+ "D" (&bios32_indirect),
30128+ "r"(__PCIBIOS_DS)
30129+ : "memory");
30130+
30131+ pax_open_kernel();
30132+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30133+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30134+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30135+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30136+ pax_close_kernel();
30137+
30138 local_irq_restore(flags);
30139
30140 switch (return_code) {
30141- case 0:
30142- return address + entry;
30143- case 0x80: /* Not present */
30144- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30145- return 0;
30146- default: /* Shouldn't happen */
30147- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30148- service, return_code);
30149+ case 0: {
30150+ int cpu;
30151+ unsigned char flags;
30152+
30153+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30154+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30155+ printk(KERN_WARNING "bios32_service: not valid\n");
30156 return 0;
30157+ }
30158+ address = address + PAGE_OFFSET;
30159+ length += 16UL; /* some BIOSs underreport this... */
30160+ flags = 4;
30161+ if (length >= 64*1024*1024) {
30162+ length >>= PAGE_SHIFT;
30163+ flags |= 8;
30164+ }
30165+
30166+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30167+ gdt = get_cpu_gdt_table(cpu);
30168+ pack_descriptor(&d, address, length, 0x9b, flags);
30169+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30170+ pack_descriptor(&d, address, length, 0x93, flags);
30171+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30172+ }
30173+ return entry;
30174+ }
30175+ case 0x80: /* Not present */
30176+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30177+ return 0;
30178+ default: /* Shouldn't happen */
30179+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30180+ service, return_code);
30181+ return 0;
30182 }
30183 }
30184
30185 static struct {
30186 unsigned long address;
30187 unsigned short segment;
30188-} pci_indirect = { 0, __KERNEL_CS };
30189+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30190
30191-static int pci_bios_present;
30192+static int pci_bios_present __read_only;
30193
30194 static int check_pcibios(void)
30195 {
30196@@ -131,11 +174,13 @@ static int check_pcibios(void)
30197 unsigned long flags, pcibios_entry;
30198
30199 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30200- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30201+ pci_indirect.address = pcibios_entry;
30202
30203 local_irq_save(flags);
30204- __asm__(
30205- "lcall *(%%edi); cld\n\t"
30206+ __asm__("movw %w6, %%ds\n\t"
30207+ "lcall *%%ss:(%%edi); cld\n\t"
30208+ "push %%ss\n\t"
30209+ "pop %%ds\n\t"
30210 "jc 1f\n\t"
30211 "xor %%ah, %%ah\n"
30212 "1:"
30213@@ -144,7 +189,8 @@ static int check_pcibios(void)
30214 "=b" (ebx),
30215 "=c" (ecx)
30216 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30217- "D" (&pci_indirect)
30218+ "D" (&pci_indirect),
30219+ "r" (__PCIBIOS_DS)
30220 : "memory");
30221 local_irq_restore(flags);
30222
30223@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30224
30225 switch (len) {
30226 case 1:
30227- __asm__("lcall *(%%esi); cld\n\t"
30228+ __asm__("movw %w6, %%ds\n\t"
30229+ "lcall *%%ss:(%%esi); cld\n\t"
30230+ "push %%ss\n\t"
30231+ "pop %%ds\n\t"
30232 "jc 1f\n\t"
30233 "xor %%ah, %%ah\n"
30234 "1:"
30235@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30236 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30237 "b" (bx),
30238 "D" ((long)reg),
30239- "S" (&pci_indirect));
30240+ "S" (&pci_indirect),
30241+ "r" (__PCIBIOS_DS));
30242 /*
30243 * Zero-extend the result beyond 8 bits, do not trust the
30244 * BIOS having done it:
30245@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30246 *value &= 0xff;
30247 break;
30248 case 2:
30249- __asm__("lcall *(%%esi); cld\n\t"
30250+ __asm__("movw %w6, %%ds\n\t"
30251+ "lcall *%%ss:(%%esi); cld\n\t"
30252+ "push %%ss\n\t"
30253+ "pop %%ds\n\t"
30254 "jc 1f\n\t"
30255 "xor %%ah, %%ah\n"
30256 "1:"
30257@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30258 : "1" (PCIBIOS_READ_CONFIG_WORD),
30259 "b" (bx),
30260 "D" ((long)reg),
30261- "S" (&pci_indirect));
30262+ "S" (&pci_indirect),
30263+ "r" (__PCIBIOS_DS));
30264 /*
30265 * Zero-extend the result beyond 16 bits, do not trust the
30266 * BIOS having done it:
30267@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30268 *value &= 0xffff;
30269 break;
30270 case 4:
30271- __asm__("lcall *(%%esi); cld\n\t"
30272+ __asm__("movw %w6, %%ds\n\t"
30273+ "lcall *%%ss:(%%esi); cld\n\t"
30274+ "push %%ss\n\t"
30275+ "pop %%ds\n\t"
30276 "jc 1f\n\t"
30277 "xor %%ah, %%ah\n"
30278 "1:"
30279@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30280 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30281 "b" (bx),
30282 "D" ((long)reg),
30283- "S" (&pci_indirect));
30284+ "S" (&pci_indirect),
30285+ "r" (__PCIBIOS_DS));
30286 break;
30287 }
30288
30289@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30290
30291 switch (len) {
30292 case 1:
30293- __asm__("lcall *(%%esi); cld\n\t"
30294+ __asm__("movw %w6, %%ds\n\t"
30295+ "lcall *%%ss:(%%esi); cld\n\t"
30296+ "push %%ss\n\t"
30297+ "pop %%ds\n\t"
30298 "jc 1f\n\t"
30299 "xor %%ah, %%ah\n"
30300 "1:"
30301@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30302 "c" (value),
30303 "b" (bx),
30304 "D" ((long)reg),
30305- "S" (&pci_indirect));
30306+ "S" (&pci_indirect),
30307+ "r" (__PCIBIOS_DS));
30308 break;
30309 case 2:
30310- __asm__("lcall *(%%esi); cld\n\t"
30311+ __asm__("movw %w6, %%ds\n\t"
30312+ "lcall *%%ss:(%%esi); cld\n\t"
30313+ "push %%ss\n\t"
30314+ "pop %%ds\n\t"
30315 "jc 1f\n\t"
30316 "xor %%ah, %%ah\n"
30317 "1:"
30318@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30319 "c" (value),
30320 "b" (bx),
30321 "D" ((long)reg),
30322- "S" (&pci_indirect));
30323+ "S" (&pci_indirect),
30324+ "r" (__PCIBIOS_DS));
30325 break;
30326 case 4:
30327- __asm__("lcall *(%%esi); cld\n\t"
30328+ __asm__("movw %w6, %%ds\n\t"
30329+ "lcall *%%ss:(%%esi); cld\n\t"
30330+ "push %%ss\n\t"
30331+ "pop %%ds\n\t"
30332 "jc 1f\n\t"
30333 "xor %%ah, %%ah\n"
30334 "1:"
30335@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30336 "c" (value),
30337 "b" (bx),
30338 "D" ((long)reg),
30339- "S" (&pci_indirect));
30340+ "S" (&pci_indirect),
30341+ "r" (__PCIBIOS_DS));
30342 break;
30343 }
30344
30345@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30346
30347 DBG("PCI: Fetching IRQ routing table... ");
30348 __asm__("push %%es\n\t"
30349+ "movw %w8, %%ds\n\t"
30350 "push %%ds\n\t"
30351 "pop %%es\n\t"
30352- "lcall *(%%esi); cld\n\t"
30353+ "lcall *%%ss:(%%esi); cld\n\t"
30354 "pop %%es\n\t"
30355+ "push %%ss\n\t"
30356+ "pop %%ds\n"
30357 "jc 1f\n\t"
30358 "xor %%ah, %%ah\n"
30359 "1:"
30360@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30361 "1" (0),
30362 "D" ((long) &opt),
30363 "S" (&pci_indirect),
30364- "m" (opt)
30365+ "m" (opt),
30366+ "r" (__PCIBIOS_DS)
30367 : "memory");
30368 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30369 if (ret & 0xff00)
30370@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30371 {
30372 int ret;
30373
30374- __asm__("lcall *(%%esi); cld\n\t"
30375+ __asm__("movw %w5, %%ds\n\t"
30376+ "lcall *%%ss:(%%esi); cld\n\t"
30377+ "push %%ss\n\t"
30378+ "pop %%ds\n"
30379 "jc 1f\n\t"
30380 "xor %%ah, %%ah\n"
30381 "1:"
30382@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30383 : "0" (PCIBIOS_SET_PCI_HW_INT),
30384 "b" ((dev->bus->number << 8) | dev->devfn),
30385 "c" ((irq << 8) | (pin + 10)),
30386- "S" (&pci_indirect));
30387+ "S" (&pci_indirect),
30388+ "r" (__PCIBIOS_DS));
30389 return !(ret & 0xff00);
30390 }
30391 EXPORT_SYMBOL(pcibios_set_irq_routing);
30392diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30393index 40e4469..0592924 100644
30394--- a/arch/x86/platform/efi/efi_32.c
30395+++ b/arch/x86/platform/efi/efi_32.c
30396@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30397 {
30398 struct desc_ptr gdt_descr;
30399
30400+#ifdef CONFIG_PAX_KERNEXEC
30401+ struct desc_struct d;
30402+#endif
30403+
30404 local_irq_save(efi_rt_eflags);
30405
30406 load_cr3(initial_page_table);
30407 __flush_tlb_all();
30408
30409+#ifdef CONFIG_PAX_KERNEXEC
30410+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30411+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30412+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30413+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30414+#endif
30415+
30416 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30417 gdt_descr.size = GDT_SIZE - 1;
30418 load_gdt(&gdt_descr);
30419@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
30420 {
30421 struct desc_ptr gdt_descr;
30422
30423+#ifdef CONFIG_PAX_KERNEXEC
30424+ struct desc_struct d;
30425+
30426+ memset(&d, 0, sizeof d);
30427+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30428+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30429+#endif
30430+
30431 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30432 gdt_descr.size = GDT_SIZE - 1;
30433 load_gdt(&gdt_descr);
30434
30435+#ifdef CONFIG_PAX_PER_CPU_PGD
30436+ load_cr3(get_cpu_pgd(smp_processor_id()));
30437+#else
30438 load_cr3(swapper_pg_dir);
30439+#endif
30440+
30441 __flush_tlb_all();
30442
30443 local_irq_restore(efi_rt_eflags);
30444diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
30445index 39a0e7f1..ecc2f1e 100644
30446--- a/arch/x86/platform/efi/efi_64.c
30447+++ b/arch/x86/platform/efi/efi_64.c
30448@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
30449 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
30450 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
30451 }
30452+
30453+#ifdef CONFIG_PAX_PER_CPU_PGD
30454+ load_cr3(swapper_pg_dir);
30455+#endif
30456+
30457 __flush_tlb_all();
30458 }
30459
30460@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
30461 for (pgd = 0; pgd < n_pgds; pgd++)
30462 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
30463 kfree(save_pgd);
30464+
30465+#ifdef CONFIG_PAX_PER_CPU_PGD
30466+ load_cr3(get_cpu_pgd(smp_processor_id()));
30467+#endif
30468+
30469 __flush_tlb_all();
30470 local_irq_restore(efi_flags);
30471 early_code_mapping_set_exec(0);
30472diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30473index fbe66e6..eae5e38 100644
30474--- a/arch/x86/platform/efi/efi_stub_32.S
30475+++ b/arch/x86/platform/efi/efi_stub_32.S
30476@@ -6,7 +6,9 @@
30477 */
30478
30479 #include <linux/linkage.h>
30480+#include <linux/init.h>
30481 #include <asm/page_types.h>
30482+#include <asm/segment.h>
30483
30484 /*
30485 * efi_call_phys(void *, ...) is a function with variable parameters.
30486@@ -20,7 +22,7 @@
30487 * service functions will comply with gcc calling convention, too.
30488 */
30489
30490-.text
30491+__INIT
30492 ENTRY(efi_call_phys)
30493 /*
30494 * 0. The function can only be called in Linux kernel. So CS has been
30495@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30496 * The mapping of lower virtual memory has been created in prelog and
30497 * epilog.
30498 */
30499- movl $1f, %edx
30500- subl $__PAGE_OFFSET, %edx
30501- jmp *%edx
30502+#ifdef CONFIG_PAX_KERNEXEC
30503+ movl $(__KERNEXEC_EFI_DS), %edx
30504+ mov %edx, %ds
30505+ mov %edx, %es
30506+ mov %edx, %ss
30507+ addl $2f,(1f)
30508+ ljmp *(1f)
30509+
30510+__INITDATA
30511+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30512+.previous
30513+
30514+2:
30515+ subl $2b,(1b)
30516+#else
30517+ jmp 1f-__PAGE_OFFSET
30518 1:
30519+#endif
30520
30521 /*
30522 * 2. Now on the top of stack is the return
30523@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30524 * parameter 2, ..., param n. To make things easy, we save the return
30525 * address of efi_call_phys in a global variable.
30526 */
30527- popl %edx
30528- movl %edx, saved_return_addr
30529- /* get the function pointer into ECX*/
30530- popl %ecx
30531- movl %ecx, efi_rt_function_ptr
30532- movl $2f, %edx
30533- subl $__PAGE_OFFSET, %edx
30534- pushl %edx
30535+ popl (saved_return_addr)
30536+ popl (efi_rt_function_ptr)
30537
30538 /*
30539 * 3. Clear PG bit in %CR0.
30540@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30541 /*
30542 * 5. Call the physical function.
30543 */
30544- jmp *%ecx
30545+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30546
30547-2:
30548 /*
30549 * 6. After EFI runtime service returns, control will return to
30550 * following instruction. We'd better readjust stack pointer first.
30551@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30552 movl %cr0, %edx
30553 orl $0x80000000, %edx
30554 movl %edx, %cr0
30555- jmp 1f
30556-1:
30557+
30558 /*
30559 * 8. Now restore the virtual mode from flat mode by
30560 * adding EIP with PAGE_OFFSET.
30561 */
30562- movl $1f, %edx
30563- jmp *%edx
30564+#ifdef CONFIG_PAX_KERNEXEC
30565+ movl $(__KERNEL_DS), %edx
30566+ mov %edx, %ds
30567+ mov %edx, %es
30568+ mov %edx, %ss
30569+ ljmp $(__KERNEL_CS),$1f
30570+#else
30571+ jmp 1f+__PAGE_OFFSET
30572+#endif
30573 1:
30574
30575 /*
30576 * 9. Balance the stack. And because EAX contain the return value,
30577 * we'd better not clobber it.
30578 */
30579- leal efi_rt_function_ptr, %edx
30580- movl (%edx), %ecx
30581- pushl %ecx
30582+ pushl (efi_rt_function_ptr)
30583
30584 /*
30585- * 10. Push the saved return address onto the stack and return.
30586+ * 10. Return to the saved return address.
30587 */
30588- leal saved_return_addr, %edx
30589- movl (%edx), %ecx
30590- pushl %ecx
30591- ret
30592+ jmpl *(saved_return_addr)
30593 ENDPROC(efi_call_phys)
30594 .previous
30595
30596-.data
30597+__INITDATA
30598 saved_return_addr:
30599 .long 0
30600 efi_rt_function_ptr:
30601diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30602index 4c07cca..2c8427d 100644
30603--- a/arch/x86/platform/efi/efi_stub_64.S
30604+++ b/arch/x86/platform/efi/efi_stub_64.S
30605@@ -7,6 +7,7 @@
30606 */
30607
30608 #include <linux/linkage.h>
30609+#include <asm/alternative-asm.h>
30610
30611 #define SAVE_XMM \
30612 mov %rsp, %rax; \
30613@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30614 call *%rdi
30615 addq $32, %rsp
30616 RESTORE_XMM
30617+ pax_force_retaddr 0, 1
30618 ret
30619 ENDPROC(efi_call0)
30620
30621@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30622 call *%rdi
30623 addq $32, %rsp
30624 RESTORE_XMM
30625+ pax_force_retaddr 0, 1
30626 ret
30627 ENDPROC(efi_call1)
30628
30629@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30630 call *%rdi
30631 addq $32, %rsp
30632 RESTORE_XMM
30633+ pax_force_retaddr 0, 1
30634 ret
30635 ENDPROC(efi_call2)
30636
30637@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30638 call *%rdi
30639 addq $32, %rsp
30640 RESTORE_XMM
30641+ pax_force_retaddr 0, 1
30642 ret
30643 ENDPROC(efi_call3)
30644
30645@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30646 call *%rdi
30647 addq $32, %rsp
30648 RESTORE_XMM
30649+ pax_force_retaddr 0, 1
30650 ret
30651 ENDPROC(efi_call4)
30652
30653@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30654 call *%rdi
30655 addq $48, %rsp
30656 RESTORE_XMM
30657+ pax_force_retaddr 0, 1
30658 ret
30659 ENDPROC(efi_call5)
30660
30661@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30662 call *%rdi
30663 addq $48, %rsp
30664 RESTORE_XMM
30665+ pax_force_retaddr 0, 1
30666 ret
30667 ENDPROC(efi_call6)
30668diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30669index a0a0a43..a48e233 100644
30670--- a/arch/x86/platform/mrst/mrst.c
30671+++ b/arch/x86/platform/mrst/mrst.c
30672@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30673 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30674 int sfi_mrtc_num;
30675
30676-static void mrst_power_off(void)
30677+static __noreturn void mrst_power_off(void)
30678 {
30679+ BUG();
30680 }
30681
30682-static void mrst_reboot(void)
30683+static __noreturn void mrst_reboot(void)
30684 {
30685 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30686+ BUG();
30687 }
30688
30689 /* parse all the mtimer info to a static mtimer array */
30690diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30691index d6ee929..3637cb5 100644
30692--- a/arch/x86/platform/olpc/olpc_dt.c
30693+++ b/arch/x86/platform/olpc/olpc_dt.c
30694@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30695 return res;
30696 }
30697
30698-static struct of_pdt_ops prom_olpc_ops __initdata = {
30699+static struct of_pdt_ops prom_olpc_ops __initconst = {
30700 .nextprop = olpc_dt_nextprop,
30701 .getproplen = olpc_dt_getproplen,
30702 .getproperty = olpc_dt_getproperty,
30703diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30704index 1cf5b30..fd45732 100644
30705--- a/arch/x86/power/cpu.c
30706+++ b/arch/x86/power/cpu.c
30707@@ -137,11 +137,8 @@ static void do_fpu_end(void)
30708 static void fix_processor_context(void)
30709 {
30710 int cpu = smp_processor_id();
30711- struct tss_struct *t = &per_cpu(init_tss, cpu);
30712-#ifdef CONFIG_X86_64
30713- struct desc_struct *desc = get_cpu_gdt_table(cpu);
30714- tss_desc tss;
30715-#endif
30716+ struct tss_struct *t = init_tss + cpu;
30717+
30718 set_tss_desc(cpu, t); /*
30719 * This just modifies memory; should not be
30720 * necessary. But... This is necessary, because
30721@@ -150,10 +147,6 @@ static void fix_processor_context(void)
30722 */
30723
30724 #ifdef CONFIG_X86_64
30725- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
30726- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
30727- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
30728-
30729 syscall_init(); /* This sets MSR_*STAR and related */
30730 #endif
30731 load_TR_desc(); /* This does ltr */
30732diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30733index a44f457..9140171 100644
30734--- a/arch/x86/realmode/init.c
30735+++ b/arch/x86/realmode/init.c
30736@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
30737 __va(real_mode_header->trampoline_header);
30738
30739 #ifdef CONFIG_X86_32
30740- trampoline_header->start = __pa_symbol(startup_32_smp);
30741+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
30742+
30743+#ifdef CONFIG_PAX_KERNEXEC
30744+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30745+#endif
30746+
30747+ trampoline_header->boot_cs = __BOOT_CS;
30748 trampoline_header->gdt_limit = __BOOT_DS + 7;
30749 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
30750 #else
30751@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
30752 *trampoline_cr4_features = read_cr4();
30753
30754 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
30755- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
30756+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
30757 trampoline_pgd[511] = init_level4_pgt[511].pgd;
30758 #endif
30759 }
30760diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30761index 8869287..d577672 100644
30762--- a/arch/x86/realmode/rm/Makefile
30763+++ b/arch/x86/realmode/rm/Makefile
30764@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30765 $(call cc-option, -fno-unit-at-a-time)) \
30766 $(call cc-option, -fno-stack-protector) \
30767 $(call cc-option, -mpreferred-stack-boundary=2)
30768+ifdef CONSTIFY_PLUGIN
30769+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30770+endif
30771 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30772 GCOV_PROFILE := n
30773diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30774index a28221d..93c40f1 100644
30775--- a/arch/x86/realmode/rm/header.S
30776+++ b/arch/x86/realmode/rm/header.S
30777@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30778 #endif
30779 /* APM/BIOS reboot */
30780 .long pa_machine_real_restart_asm
30781-#ifdef CONFIG_X86_64
30782+#ifdef CONFIG_X86_32
30783+ .long __KERNEL_CS
30784+#else
30785 .long __KERNEL32_CS
30786 #endif
30787 END(real_mode_header)
30788diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30789index c1b2791..f9e31c7 100644
30790--- a/arch/x86/realmode/rm/trampoline_32.S
30791+++ b/arch/x86/realmode/rm/trampoline_32.S
30792@@ -25,6 +25,12 @@
30793 #include <asm/page_types.h>
30794 #include "realmode.h"
30795
30796+#ifdef CONFIG_PAX_KERNEXEC
30797+#define ta(X) (X)
30798+#else
30799+#define ta(X) (pa_ ## X)
30800+#endif
30801+
30802 .text
30803 .code16
30804
30805@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30806
30807 cli # We should be safe anyway
30808
30809- movl tr_start, %eax # where we need to go
30810-
30811 movl $0xA5A5A5A5, trampoline_status
30812 # write marker for master knows we're running
30813
30814@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30815 movw $1, %dx # protected mode (PE) bit
30816 lmsw %dx # into protected mode
30817
30818- ljmpl $__BOOT_CS, $pa_startup_32
30819+ ljmpl *(trampoline_header)
30820
30821 .section ".text32","ax"
30822 .code32
30823@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30824 .balign 8
30825 GLOBAL(trampoline_header)
30826 tr_start: .space 4
30827- tr_gdt_pad: .space 2
30828+ tr_boot_cs: .space 2
30829 tr_gdt: .space 6
30830 END(trampoline_header)
30831
30832diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30833index bb360dc..3e5945f 100644
30834--- a/arch/x86/realmode/rm/trampoline_64.S
30835+++ b/arch/x86/realmode/rm/trampoline_64.S
30836@@ -107,7 +107,7 @@ ENTRY(startup_32)
30837 wrmsr
30838
30839 # Enable paging and in turn activate Long Mode
30840- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30841+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
30842 movl %eax, %cr0
30843
30844 /*
30845diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
30846index e812034..c747134 100644
30847--- a/arch/x86/tools/Makefile
30848+++ b/arch/x86/tools/Makefile
30849@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
30850
30851 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
30852
30853-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
30854+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
30855 hostprogs-y += relocs
30856 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
30857 relocs: $(obj)/relocs
30858diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30859index f7bab68..b6d9886 100644
30860--- a/arch/x86/tools/relocs.c
30861+++ b/arch/x86/tools/relocs.c
30862@@ -1,5 +1,7 @@
30863 /* This is included from relocs_32/64.c */
30864
30865+#include "../../../include/generated/autoconf.h"
30866+
30867 #define ElfW(type) _ElfW(ELF_BITS, type)
30868 #define _ElfW(bits, type) __ElfW(bits, type)
30869 #define __ElfW(bits, type) Elf##bits##_##type
30870@@ -11,6 +13,7 @@
30871 #define Elf_Sym ElfW(Sym)
30872
30873 static Elf_Ehdr ehdr;
30874+static Elf_Phdr *phdr;
30875
30876 struct relocs {
30877 uint32_t *offset;
30878@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
30879 }
30880 }
30881
30882+static void read_phdrs(FILE *fp)
30883+{
30884+ unsigned int i;
30885+
30886+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
30887+ if (!phdr) {
30888+ die("Unable to allocate %d program headers\n",
30889+ ehdr.e_phnum);
30890+ }
30891+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30892+ die("Seek to %d failed: %s\n",
30893+ ehdr.e_phoff, strerror(errno));
30894+ }
30895+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30896+ die("Cannot read ELF program headers: %s\n",
30897+ strerror(errno));
30898+ }
30899+ for(i = 0; i < ehdr.e_phnum; i++) {
30900+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
30901+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
30902+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
30903+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
30904+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
30905+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
30906+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
30907+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
30908+ }
30909+
30910+}
30911+
30912 static void read_shdrs(FILE *fp)
30913 {
30914- int i;
30915+ unsigned int i;
30916 Elf_Shdr shdr;
30917
30918 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30919@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
30920
30921 static void read_strtabs(FILE *fp)
30922 {
30923- int i;
30924+ unsigned int i;
30925 for (i = 0; i < ehdr.e_shnum; i++) {
30926 struct section *sec = &secs[i];
30927 if (sec->shdr.sh_type != SHT_STRTAB) {
30928@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
30929
30930 static void read_symtabs(FILE *fp)
30931 {
30932- int i,j;
30933+ unsigned int i,j;
30934 for (i = 0; i < ehdr.e_shnum; i++) {
30935 struct section *sec = &secs[i];
30936 if (sec->shdr.sh_type != SHT_SYMTAB) {
30937@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
30938 }
30939
30940
30941-static void read_relocs(FILE *fp)
30942+static void read_relocs(FILE *fp, int use_real_mode)
30943 {
30944- int i,j;
30945+ unsigned int i,j;
30946+ uint32_t base;
30947+
30948 for (i = 0; i < ehdr.e_shnum; i++) {
30949 struct section *sec = &secs[i];
30950 if (sec->shdr.sh_type != SHT_REL_TYPE) {
30951@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
30952 die("Cannot read symbol table: %s\n",
30953 strerror(errno));
30954 }
30955+ base = 0;
30956+
30957+#ifdef CONFIG_X86_32
30958+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
30959+ if (phdr[j].p_type != PT_LOAD )
30960+ continue;
30961+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
30962+ continue;
30963+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
30964+ break;
30965+ }
30966+#endif
30967+
30968 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
30969 Elf_Rel *rel = &sec->reltab[j];
30970- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
30971+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
30972 rel->r_info = elf_xword_to_cpu(rel->r_info);
30973 #if (SHT_REL_TYPE == SHT_RELA)
30974 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
30975@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
30976
30977 static void print_absolute_symbols(void)
30978 {
30979- int i;
30980+ unsigned int i;
30981 const char *format;
30982
30983 if (ELF_BITS == 64)
30984@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
30985 for (i = 0; i < ehdr.e_shnum; i++) {
30986 struct section *sec = &secs[i];
30987 char *sym_strtab;
30988- int j;
30989+ unsigned int j;
30990
30991 if (sec->shdr.sh_type != SHT_SYMTAB) {
30992 continue;
30993@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
30994
30995 static void print_absolute_relocs(void)
30996 {
30997- int i, printed = 0;
30998+ unsigned int i, printed = 0;
30999 const char *format;
31000
31001 if (ELF_BITS == 64)
31002@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
31003 struct section *sec_applies, *sec_symtab;
31004 char *sym_strtab;
31005 Elf_Sym *sh_symtab;
31006- int j;
31007+ unsigned int j;
31008 if (sec->shdr.sh_type != SHT_REL_TYPE) {
31009 continue;
31010 }
31011@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
31012 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
31013 Elf_Sym *sym, const char *symname))
31014 {
31015- int i;
31016+ unsigned int i;
31017 /* Walk through the relocations */
31018 for (i = 0; i < ehdr.e_shnum; i++) {
31019 char *sym_strtab;
31020 Elf_Sym *sh_symtab;
31021 struct section *sec_applies, *sec_symtab;
31022- int j;
31023+ unsigned int j;
31024 struct section *sec = &secs[i];
31025
31026 if (sec->shdr.sh_type != SHT_REL_TYPE) {
31027@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
31028 {
31029 unsigned r_type = ELF32_R_TYPE(rel->r_info);
31030 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
31031+ char *sym_strtab = sec->link->link->strtab;
31032+
31033+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31034+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31035+ return 0;
31036+
31037+#ifdef CONFIG_PAX_KERNEXEC
31038+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31039+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31040+ return 0;
31041+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31042+ return 0;
31043+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31044+ return 0;
31045+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31046+ return 0;
31047+#endif
31048
31049 switch (r_type) {
31050 case R_386_NONE:
31051@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
31052
31053 static void emit_relocs(int as_text, int use_real_mode)
31054 {
31055- int i;
31056+ unsigned int i;
31057 int (*write_reloc)(uint32_t, FILE *) = write32;
31058 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
31059 const char *symname);
31060@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
31061 {
31062 regex_init(use_real_mode);
31063 read_ehdr(fp);
31064+ read_phdrs(fp);
31065 read_shdrs(fp);
31066 read_strtabs(fp);
31067 read_symtabs(fp);
31068- read_relocs(fp);
31069+ read_relocs(fp, use_real_mode);
31070 if (ELF_BITS == 64)
31071 percpu_init();
31072 if (show_absolute_syms) {
31073diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
31074index 80ffa5b..a33bd15 100644
31075--- a/arch/x86/um/tls_32.c
31076+++ b/arch/x86/um/tls_32.c
31077@@ -260,7 +260,7 @@ out:
31078 if (unlikely(task == current &&
31079 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
31080 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
31081- "without flushed TLS.", current->pid);
31082+ "without flushed TLS.", task_pid_nr(current));
31083 }
31084
31085 return 0;
31086diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31087index fd14be1..e3c79c0 100644
31088--- a/arch/x86/vdso/Makefile
31089+++ b/arch/x86/vdso/Makefile
31090@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31091 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31092 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31093
31094-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31095+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31096 GCOV_PROFILE := n
31097
31098 #
31099diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31100index 0faad64..39ef157 100644
31101--- a/arch/x86/vdso/vdso32-setup.c
31102+++ b/arch/x86/vdso/vdso32-setup.c
31103@@ -25,6 +25,7 @@
31104 #include <asm/tlbflush.h>
31105 #include <asm/vdso.h>
31106 #include <asm/proto.h>
31107+#include <asm/mman.h>
31108
31109 enum {
31110 VDSO_DISABLED = 0,
31111@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31112 void enable_sep_cpu(void)
31113 {
31114 int cpu = get_cpu();
31115- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31116+ struct tss_struct *tss = init_tss + cpu;
31117
31118 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31119 put_cpu();
31120@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31121 gate_vma.vm_start = FIXADDR_USER_START;
31122 gate_vma.vm_end = FIXADDR_USER_END;
31123 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31124- gate_vma.vm_page_prot = __P101;
31125+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31126
31127 return 0;
31128 }
31129@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31130 if (compat)
31131 addr = VDSO_HIGH_BASE;
31132 else {
31133- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31134+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31135 if (IS_ERR_VALUE(addr)) {
31136 ret = addr;
31137 goto up_fail;
31138 }
31139 }
31140
31141- current->mm->context.vdso = (void *)addr;
31142+ current->mm->context.vdso = addr;
31143
31144 if (compat_uses_vma || !compat) {
31145 /*
31146@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31147 }
31148
31149 current_thread_info()->sysenter_return =
31150- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31151+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31152
31153 up_fail:
31154 if (ret)
31155- current->mm->context.vdso = NULL;
31156+ current->mm->context.vdso = 0;
31157
31158 up_write(&mm->mmap_sem);
31159
31160@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31161
31162 const char *arch_vma_name(struct vm_area_struct *vma)
31163 {
31164- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31165+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31166 return "[vdso]";
31167+
31168+#ifdef CONFIG_PAX_SEGMEXEC
31169+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31170+ return "[vdso]";
31171+#endif
31172+
31173 return NULL;
31174 }
31175
31176@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31177 * Check to see if the corresponding task was created in compat vdso
31178 * mode.
31179 */
31180- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31181+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31182 return &gate_vma;
31183 return NULL;
31184 }
31185diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31186index 431e875..cbb23f3 100644
31187--- a/arch/x86/vdso/vma.c
31188+++ b/arch/x86/vdso/vma.c
31189@@ -16,8 +16,6 @@
31190 #include <asm/vdso.h>
31191 #include <asm/page.h>
31192
31193-unsigned int __read_mostly vdso_enabled = 1;
31194-
31195 extern char vdso_start[], vdso_end[];
31196 extern unsigned short vdso_sync_cpuid;
31197
31198@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31199 * unaligned here as a result of stack start randomization.
31200 */
31201 addr = PAGE_ALIGN(addr);
31202- addr = align_vdso_addr(addr);
31203
31204 return addr;
31205 }
31206@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31207 unsigned size)
31208 {
31209 struct mm_struct *mm = current->mm;
31210- unsigned long addr;
31211+ unsigned long addr = 0;
31212 int ret;
31213
31214- if (!vdso_enabled)
31215- return 0;
31216-
31217 down_write(&mm->mmap_sem);
31218+
31219+#ifdef CONFIG_PAX_RANDMMAP
31220+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31221+#endif
31222+
31223 addr = vdso_addr(mm->start_stack, size);
31224+ addr = align_vdso_addr(addr);
31225 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31226 if (IS_ERR_VALUE(addr)) {
31227 ret = addr;
31228 goto up_fail;
31229 }
31230
31231- current->mm->context.vdso = (void *)addr;
31232+ mm->context.vdso = addr;
31233
31234 ret = install_special_mapping(mm, addr, size,
31235 VM_READ|VM_EXEC|
31236 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31237 pages);
31238- if (ret) {
31239- current->mm->context.vdso = NULL;
31240- goto up_fail;
31241- }
31242+ if (ret)
31243+ mm->context.vdso = 0;
31244
31245 up_fail:
31246 up_write(&mm->mmap_sem);
31247@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31248 vdsox32_size);
31249 }
31250 #endif
31251-
31252-static __init int vdso_setup(char *s)
31253-{
31254- vdso_enabled = simple_strtoul(s, NULL, 0);
31255- return 0;
31256-}
31257-__setup("vdso=", vdso_setup);
31258diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31259index a492be2..08678da 100644
31260--- a/arch/x86/xen/enlighten.c
31261+++ b/arch/x86/xen/enlighten.c
31262@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31263
31264 struct shared_info xen_dummy_shared_info;
31265
31266-void *xen_initial_gdt;
31267-
31268 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31269 __read_mostly int xen_have_vector_callback;
31270 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31271@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31272 {
31273 unsigned long va = dtr->address;
31274 unsigned int size = dtr->size + 1;
31275- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31276- unsigned long frames[pages];
31277+ unsigned long frames[65536 / PAGE_SIZE];
31278 int f;
31279
31280 /*
31281@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31282 {
31283 unsigned long va = dtr->address;
31284 unsigned int size = dtr->size + 1;
31285- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31286- unsigned long frames[pages];
31287+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
31288 int f;
31289
31290 /*
31291@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31292 * 8-byte entries, or 16 4k pages..
31293 */
31294
31295- BUG_ON(size > 65536);
31296+ BUG_ON(size > GDT_SIZE);
31297 BUG_ON(va & ~PAGE_MASK);
31298
31299 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
31300@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31301 return 0;
31302 }
31303
31304-static void set_xen_basic_apic_ops(void)
31305+static void __init set_xen_basic_apic_ops(void)
31306 {
31307 apic->read = xen_apic_read;
31308 apic->write = xen_apic_write;
31309@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31310 #endif
31311 };
31312
31313-static void xen_reboot(int reason)
31314+static __noreturn void xen_reboot(int reason)
31315 {
31316 struct sched_shutdown r = { .reason = reason };
31317
31318- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31319- BUG();
31320+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31321+ BUG();
31322 }
31323
31324-static void xen_restart(char *msg)
31325+static __noreturn void xen_restart(char *msg)
31326 {
31327 xen_reboot(SHUTDOWN_reboot);
31328 }
31329
31330-static void xen_emergency_restart(void)
31331+static __noreturn void xen_emergency_restart(void)
31332 {
31333 xen_reboot(SHUTDOWN_reboot);
31334 }
31335
31336-static void xen_machine_halt(void)
31337+static __noreturn void xen_machine_halt(void)
31338 {
31339 xen_reboot(SHUTDOWN_poweroff);
31340 }
31341
31342-static void xen_machine_power_off(void)
31343+static __noreturn void xen_machine_power_off(void)
31344 {
31345 if (pm_power_off)
31346 pm_power_off();
31347@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
31348 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31349
31350 /* Work out if we support NX */
31351- x86_configure_nx();
31352+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31353+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31354+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31355+ unsigned l, h;
31356+
31357+ __supported_pte_mask |= _PAGE_NX;
31358+ rdmsr(MSR_EFER, l, h);
31359+ l |= EFER_NX;
31360+ wrmsr(MSR_EFER, l, h);
31361+ }
31362+#endif
31363
31364 xen_setup_features();
31365
31366@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
31367
31368 machine_ops = xen_machine_ops;
31369
31370- /*
31371- * The only reliable way to retain the initial address of the
31372- * percpu gdt_page is to remember it here, so we can go and
31373- * mark it RW later, when the initial percpu area is freed.
31374- */
31375- xen_initial_gdt = &per_cpu(gdt_page, 0);
31376-
31377 xen_smp_init();
31378
31379 #ifdef CONFIG_ACPI_NUMA
31380@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31381 return NOTIFY_OK;
31382 }
31383
31384-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31385+static struct notifier_block xen_hvm_cpu_notifier = {
31386 .notifier_call = xen_hvm_cpu_notify,
31387 };
31388
31389diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31390index fdc3ba2..3daee39 100644
31391--- a/arch/x86/xen/mmu.c
31392+++ b/arch/x86/xen/mmu.c
31393@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31394 /* L3_k[510] -> level2_kernel_pgt
31395 * L3_i[511] -> level2_fixmap_pgt */
31396 convert_pfn_mfn(level3_kernel_pgt);
31397+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31398+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31399+ convert_pfn_mfn(level3_vmemmap_pgt);
31400
31401 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31402 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31403@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31404 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31405 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31406 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31407+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31408+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31409+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31410 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31411 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31412+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31413 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31414 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31415
31416@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
31417 pv_mmu_ops.set_pud = xen_set_pud;
31418 #if PAGETABLE_LEVELS == 4
31419 pv_mmu_ops.set_pgd = xen_set_pgd;
31420+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31421 #endif
31422
31423 /* This will work as long as patching hasn't happened yet
31424@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31425 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31426 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31427 .set_pgd = xen_set_pgd_hyper,
31428+ .set_pgd_batched = xen_set_pgd_hyper,
31429
31430 .alloc_pud = xen_alloc_pmd_init,
31431 .release_pud = xen_release_pmd_init,
31432diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31433index d99cae8..18401e1 100644
31434--- a/arch/x86/xen/smp.c
31435+++ b/arch/x86/xen/smp.c
31436@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31437 {
31438 BUG_ON(smp_processor_id() != 0);
31439 native_smp_prepare_boot_cpu();
31440-
31441- /* We've switched to the "real" per-cpu gdt, so make sure the
31442- old memory can be recycled */
31443- make_lowmem_page_readwrite(xen_initial_gdt);
31444-
31445 xen_filter_cpu_maps();
31446 xen_setup_vcpu_info_placement();
31447 }
31448@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31449 ctxt->user_regs.ss = __KERNEL_DS;
31450 #ifdef CONFIG_X86_32
31451 ctxt->user_regs.fs = __KERNEL_PERCPU;
31452- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31453+ savesegment(gs, ctxt->user_regs.gs);
31454 #else
31455 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31456 #endif
31457@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31458
31459 {
31460 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
31461- ctxt->user_regs.ds = __USER_DS;
31462- ctxt->user_regs.es = __USER_DS;
31463+ ctxt->user_regs.ds = __KERNEL_DS;
31464+ ctxt->user_regs.es = __KERNEL_DS;
31465
31466 xen_copy_trap_info(ctxt->trap_ctxt);
31467
31468@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31469 int rc;
31470
31471 per_cpu(current_task, cpu) = idle;
31472+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31473 #ifdef CONFIG_X86_32
31474 irq_ctx_init(cpu);
31475 #else
31476 clear_tsk_thread_flag(idle, TIF_FORK);
31477- per_cpu(kernel_stack, cpu) =
31478- (unsigned long)task_stack_page(idle) -
31479- KERNEL_STACK_OFFSET + THREAD_SIZE;
31480+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31481 #endif
31482 xen_setup_runstate_info(cpu);
31483 xen_setup_timer(cpu);
31484@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31485
31486 void __init xen_smp_init(void)
31487 {
31488- smp_ops = xen_smp_ops;
31489+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31490 xen_fill_possible_map();
31491 xen_init_spinlocks();
31492 }
31493diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31494index 33ca6e4..0ded929 100644
31495--- a/arch/x86/xen/xen-asm_32.S
31496+++ b/arch/x86/xen/xen-asm_32.S
31497@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31498 ESP_OFFSET=4 # bytes pushed onto stack
31499
31500 /*
31501- * Store vcpu_info pointer for easy access. Do it this way to
31502- * avoid having to reload %fs
31503+ * Store vcpu_info pointer for easy access.
31504 */
31505 #ifdef CONFIG_SMP
31506- GET_THREAD_INFO(%eax)
31507- movl %ss:TI_cpu(%eax), %eax
31508- movl %ss:__per_cpu_offset(,%eax,4), %eax
31509- mov %ss:xen_vcpu(%eax), %eax
31510+ push %fs
31511+ mov $(__KERNEL_PERCPU), %eax
31512+ mov %eax, %fs
31513+ mov PER_CPU_VAR(xen_vcpu), %eax
31514+ pop %fs
31515 #else
31516 movl %ss:xen_vcpu, %eax
31517 #endif
31518diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31519index 7faed58..ba4427c 100644
31520--- a/arch/x86/xen/xen-head.S
31521+++ b/arch/x86/xen/xen-head.S
31522@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31523 #ifdef CONFIG_X86_32
31524 mov %esi,xen_start_info
31525 mov $init_thread_union+THREAD_SIZE,%esp
31526+#ifdef CONFIG_SMP
31527+ movl $cpu_gdt_table,%edi
31528+ movl $__per_cpu_load,%eax
31529+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31530+ rorl $16,%eax
31531+ movb %al,__KERNEL_PERCPU + 4(%edi)
31532+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31533+ movl $__per_cpu_end - 1,%eax
31534+ subl $__per_cpu_start,%eax
31535+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31536+#endif
31537 #else
31538 mov %rsi,xen_start_info
31539 mov $init_thread_union+THREAD_SIZE,%rsp
31540diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31541index a95b417..b6dbd0b 100644
31542--- a/arch/x86/xen/xen-ops.h
31543+++ b/arch/x86/xen/xen-ops.h
31544@@ -10,8 +10,6 @@
31545 extern const char xen_hypervisor_callback[];
31546 extern const char xen_failsafe_callback[];
31547
31548-extern void *xen_initial_gdt;
31549-
31550 struct trap_info;
31551 void xen_copy_trap_info(struct trap_info *traps);
31552
31553diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31554index 525bd3d..ef888b1 100644
31555--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31556+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31557@@ -119,9 +119,9 @@
31558 ----------------------------------------------------------------------*/
31559
31560 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31561-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31562 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31563 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31564+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31565
31566 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31567 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31568diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31569index 2f33760..835e50a 100644
31570--- a/arch/xtensa/variants/fsf/include/variant/core.h
31571+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31572@@ -11,6 +11,7 @@
31573 #ifndef _XTENSA_CORE_H
31574 #define _XTENSA_CORE_H
31575
31576+#include <linux/const.h>
31577
31578 /****************************************************************************
31579 Parameters Useful for Any Code, USER or PRIVILEGED
31580@@ -112,9 +113,9 @@
31581 ----------------------------------------------------------------------*/
31582
31583 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31584-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31585 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31586 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31587+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31588
31589 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31590 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31591diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31592index af00795..2bb8105 100644
31593--- a/arch/xtensa/variants/s6000/include/variant/core.h
31594+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31595@@ -11,6 +11,7 @@
31596 #ifndef _XTENSA_CORE_CONFIGURATION_H
31597 #define _XTENSA_CORE_CONFIGURATION_H
31598
31599+#include <linux/const.h>
31600
31601 /****************************************************************************
31602 Parameters Useful for Any Code, USER or PRIVILEGED
31603@@ -118,9 +119,9 @@
31604 ----------------------------------------------------------------------*/
31605
31606 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31607-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31608 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31609 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31610+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31611
31612 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31613 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31614diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31615index 58916af..eb9dbcf6 100644
31616--- a/block/blk-iopoll.c
31617+++ b/block/blk-iopoll.c
31618@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31619 }
31620 EXPORT_SYMBOL(blk_iopoll_complete);
31621
31622-static void blk_iopoll_softirq(struct softirq_action *h)
31623+static void blk_iopoll_softirq(void)
31624 {
31625 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31626 int rearm = 0, budget = blk_iopoll_budget;
31627@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31628 return NOTIFY_OK;
31629 }
31630
31631-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31632+static struct notifier_block blk_iopoll_cpu_notifier = {
31633 .notifier_call = blk_iopoll_cpu_notify,
31634 };
31635
31636diff --git a/block/blk-map.c b/block/blk-map.c
31637index 623e1cd..ca1e109 100644
31638--- a/block/blk-map.c
31639+++ b/block/blk-map.c
31640@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31641 if (!len || !kbuf)
31642 return -EINVAL;
31643
31644- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31645+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31646 if (do_copy)
31647 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31648 else
31649diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31650index 467c8de..f3628c5 100644
31651--- a/block/blk-softirq.c
31652+++ b/block/blk-softirq.c
31653@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31654 * Softirq action handler - move entries to local list and loop over them
31655 * while passing them to the queue registered handler.
31656 */
31657-static void blk_done_softirq(struct softirq_action *h)
31658+static void blk_done_softirq(void)
31659 {
31660 struct list_head *cpu_list, local_list;
31661
31662@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31663 return NOTIFY_OK;
31664 }
31665
31666-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31667+static struct notifier_block blk_cpu_notifier = {
31668 .notifier_call = blk_cpu_notify,
31669 };
31670
31671diff --git a/block/bsg.c b/block/bsg.c
31672index 420a5a9..23834aa 100644
31673--- a/block/bsg.c
31674+++ b/block/bsg.c
31675@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31676 struct sg_io_v4 *hdr, struct bsg_device *bd,
31677 fmode_t has_write_perm)
31678 {
31679+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31680+ unsigned char *cmdptr;
31681+
31682 if (hdr->request_len > BLK_MAX_CDB) {
31683 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31684 if (!rq->cmd)
31685 return -ENOMEM;
31686- }
31687+ cmdptr = rq->cmd;
31688+ } else
31689+ cmdptr = tmpcmd;
31690
31691- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31692+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31693 hdr->request_len))
31694 return -EFAULT;
31695
31696+ if (cmdptr != rq->cmd)
31697+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31698+
31699 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31700 if (blk_verify_command(rq->cmd, has_write_perm))
31701 return -EPERM;
31702diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31703index 7c668c8..db3521c 100644
31704--- a/block/compat_ioctl.c
31705+++ b/block/compat_ioctl.c
31706@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31707 err |= __get_user(f->spec1, &uf->spec1);
31708 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31709 err |= __get_user(name, &uf->name);
31710- f->name = compat_ptr(name);
31711+ f->name = (void __force_kernel *)compat_ptr(name);
31712 if (err) {
31713 err = -EFAULT;
31714 goto out;
31715diff --git a/block/genhd.c b/block/genhd.c
31716index cdeb527..10aa34d 100644
31717--- a/block/genhd.c
31718+++ b/block/genhd.c
31719@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
31720
31721 /*
31722 * Register device numbers dev..(dev+range-1)
31723- * range must be nonzero
31724+ * Noop if @range is zero.
31725 * The hash chain is sorted on range, so that subranges can override.
31726 */
31727 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
31728 struct kobject *(*probe)(dev_t, int *, void *),
31729 int (*lock)(dev_t, void *), void *data)
31730 {
31731- kobj_map(bdev_map, devt, range, module, probe, lock, data);
31732+ if (range)
31733+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
31734 }
31735
31736 EXPORT_SYMBOL(blk_register_region);
31737
31738+/* undo blk_register_region(), noop if @range is zero */
31739 void blk_unregister_region(dev_t devt, unsigned long range)
31740 {
31741- kobj_unmap(bdev_map, devt, range);
31742+ if (range)
31743+ kobj_unmap(bdev_map, devt, range);
31744 }
31745
31746 EXPORT_SYMBOL(blk_unregister_region);
31747diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31748index c85fc89..51e690b 100644
31749--- a/block/partitions/efi.c
31750+++ b/block/partitions/efi.c
31751@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31752 if (!gpt)
31753 return NULL;
31754
31755+ if (!le32_to_cpu(gpt->num_partition_entries))
31756+ return NULL;
31757+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31758+ if (!pte)
31759+ return NULL;
31760+
31761 count = le32_to_cpu(gpt->num_partition_entries) *
31762 le32_to_cpu(gpt->sizeof_partition_entry);
31763- if (!count)
31764- return NULL;
31765- pte = kmalloc(count, GFP_KERNEL);
31766- if (!pte)
31767- return NULL;
31768-
31769 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31770 (u8 *) pte,
31771 count) < count) {
31772diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31773index a5ffcc9..3cedc9c 100644
31774--- a/block/scsi_ioctl.c
31775+++ b/block/scsi_ioctl.c
31776@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
31777 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31778 struct sg_io_hdr *hdr, fmode_t mode)
31779 {
31780- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31781+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31782+ unsigned char *cmdptr;
31783+
31784+ if (rq->cmd != rq->__cmd)
31785+ cmdptr = rq->cmd;
31786+ else
31787+ cmdptr = tmpcmd;
31788+
31789+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31790 return -EFAULT;
31791+
31792+ if (cmdptr != rq->cmd)
31793+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31794+
31795 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31796 return -EPERM;
31797
31798@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31799 int err;
31800 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31801 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31802+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31803+ unsigned char *cmdptr;
31804
31805 if (!sic)
31806 return -EINVAL;
31807@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31808 */
31809 err = -EFAULT;
31810 rq->cmd_len = cmdlen;
31811- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31812+
31813+ if (rq->cmd != rq->__cmd)
31814+ cmdptr = rq->cmd;
31815+ else
31816+ cmdptr = tmpcmd;
31817+
31818+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31819 goto error;
31820
31821+ if (rq->cmd != cmdptr)
31822+ memcpy(rq->cmd, cmdptr, cmdlen);
31823+
31824 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31825 goto error;
31826
31827diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31828index 7bdd61b..afec999 100644
31829--- a/crypto/cryptd.c
31830+++ b/crypto/cryptd.c
31831@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31832
31833 struct cryptd_blkcipher_request_ctx {
31834 crypto_completion_t complete;
31835-};
31836+} __no_const;
31837
31838 struct cryptd_hash_ctx {
31839 struct crypto_shash *child;
31840@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31841
31842 struct cryptd_aead_request_ctx {
31843 crypto_completion_t complete;
31844-};
31845+} __no_const;
31846
31847 static void cryptd_queue_worker(struct work_struct *work);
31848
31849diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
31850index b2c99dc..476c9fb 100644
31851--- a/crypto/pcrypt.c
31852+++ b/crypto/pcrypt.c
31853@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
31854 int ret;
31855
31856 pinst->kobj.kset = pcrypt_kset;
31857- ret = kobject_add(&pinst->kobj, NULL, name);
31858+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
31859 if (!ret)
31860 kobject_uevent(&pinst->kobj, KOBJ_ADD);
31861
31862@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
31863
31864 get_online_cpus();
31865
31866- pcrypt->wq = alloc_workqueue(name,
31867- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
31868+ pcrypt->wq = alloc_workqueue("%s",
31869+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
31870 if (!pcrypt->wq)
31871 goto err;
31872
31873diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31874index f220d64..d359ad6 100644
31875--- a/drivers/acpi/apei/apei-internal.h
31876+++ b/drivers/acpi/apei/apei-internal.h
31877@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31878 struct apei_exec_ins_type {
31879 u32 flags;
31880 apei_exec_ins_func_t run;
31881-};
31882+} __do_const;
31883
31884 struct apei_exec_context {
31885 u32 ip;
31886diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31887index 33dc6a0..4b24b47 100644
31888--- a/drivers/acpi/apei/cper.c
31889+++ b/drivers/acpi/apei/cper.c
31890@@ -39,12 +39,12 @@
31891 */
31892 u64 cper_next_record_id(void)
31893 {
31894- static atomic64_t seq;
31895+ static atomic64_unchecked_t seq;
31896
31897- if (!atomic64_read(&seq))
31898- atomic64_set(&seq, ((u64)get_seconds()) << 32);
31899+ if (!atomic64_read_unchecked(&seq))
31900+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31901
31902- return atomic64_inc_return(&seq);
31903+ return atomic64_inc_return_unchecked(&seq);
31904 }
31905 EXPORT_SYMBOL_GPL(cper_next_record_id);
31906
31907diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31908index be60399..778b33e8 100644
31909--- a/drivers/acpi/bgrt.c
31910+++ b/drivers/acpi/bgrt.c
31911@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31912 return -ENODEV;
31913
31914 sysfs_bin_attr_init(&image_attr);
31915- image_attr.private = bgrt_image;
31916- image_attr.size = bgrt_image_size;
31917+ pax_open_kernel();
31918+ *(void **)&image_attr.private = bgrt_image;
31919+ *(size_t *)&image_attr.size = bgrt_image_size;
31920+ pax_close_kernel();
31921
31922 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31923 if (!bgrt_kobj)
31924diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31925index cb96296..b81293b 100644
31926--- a/drivers/acpi/blacklist.c
31927+++ b/drivers/acpi/blacklist.c
31928@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
31929 u32 is_critical_error;
31930 };
31931
31932-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
31933+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
31934
31935 /*
31936 * POLICY: If *anything* doesn't work, put it on the blacklist.
31937@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
31938 return 0;
31939 }
31940
31941-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
31942+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
31943 {
31944 .callback = dmi_disable_osi_vista,
31945 .ident = "Fujitsu Siemens",
31946diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
31947index 7586544..636a2f0 100644
31948--- a/drivers/acpi/ec_sys.c
31949+++ b/drivers/acpi/ec_sys.c
31950@@ -12,6 +12,7 @@
31951 #include <linux/acpi.h>
31952 #include <linux/debugfs.h>
31953 #include <linux/module.h>
31954+#include <linux/uaccess.h>
31955 #include "internal.h"
31956
31957 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
31958@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31959 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
31960 */
31961 unsigned int size = EC_SPACE_SIZE;
31962- u8 *data = (u8 *) buf;
31963+ u8 data;
31964 loff_t init_off = *off;
31965 int err = 0;
31966
31967@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31968 size = count;
31969
31970 while (size) {
31971- err = ec_read(*off, &data[*off - init_off]);
31972+ err = ec_read(*off, &data);
31973 if (err)
31974 return err;
31975+ if (put_user(data, &buf[*off - init_off]))
31976+ return -EFAULT;
31977 *off += 1;
31978 size--;
31979 }
31980@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31981
31982 unsigned int size = count;
31983 loff_t init_off = *off;
31984- u8 *data = (u8 *) buf;
31985 int err = 0;
31986
31987 if (*off >= EC_SPACE_SIZE)
31988@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31989 }
31990
31991 while (size) {
31992- u8 byte_write = data[*off - init_off];
31993+ u8 byte_write;
31994+ if (get_user(byte_write, &buf[*off - init_off]))
31995+ return -EFAULT;
31996 err = ec_write(*off, byte_write);
31997 if (err)
31998 return err;
31999diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32000index eb133c7..f571552 100644
32001--- a/drivers/acpi/processor_idle.c
32002+++ b/drivers/acpi/processor_idle.c
32003@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32004 {
32005 int i, count = CPUIDLE_DRIVER_STATE_START;
32006 struct acpi_processor_cx *cx;
32007- struct cpuidle_state *state;
32008+ cpuidle_state_no_const *state;
32009 struct cpuidle_driver *drv = &acpi_idle_driver;
32010
32011 if (!pr->flags.power_setup_done)
32012diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32013index fcae5fa..e9f71ea 100644
32014--- a/drivers/acpi/sysfs.c
32015+++ b/drivers/acpi/sysfs.c
32016@@ -423,11 +423,11 @@ static u32 num_counters;
32017 static struct attribute **all_attrs;
32018 static u32 acpi_gpe_count;
32019
32020-static struct attribute_group interrupt_stats_attr_group = {
32021+static attribute_group_no_const interrupt_stats_attr_group = {
32022 .name = "interrupts",
32023 };
32024
32025-static struct kobj_attribute *counter_attrs;
32026+static kobj_attribute_no_const *counter_attrs;
32027
32028 static void delete_gpe_attr_array(void)
32029 {
32030diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32031index a70ff15..f1ff44e 100644
32032--- a/drivers/ata/libahci.c
32033+++ b/drivers/ata/libahci.c
32034@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32035 }
32036 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32037
32038-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32039+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32040 struct ata_taskfile *tf, int is_cmd, u16 flags,
32041 unsigned long timeout_msec)
32042 {
32043diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32044index adf002a..39bb8f9 100644
32045--- a/drivers/ata/libata-core.c
32046+++ b/drivers/ata/libata-core.c
32047@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32048 struct ata_port *ap;
32049 unsigned int tag;
32050
32051- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32052+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32053 ap = qc->ap;
32054
32055 qc->flags = 0;
32056@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32057 struct ata_port *ap;
32058 struct ata_link *link;
32059
32060- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32061+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32062 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32063 ap = qc->ap;
32064 link = qc->dev->link;
32065@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32066 return;
32067
32068 spin_lock(&lock);
32069+ pax_open_kernel();
32070
32071 for (cur = ops->inherits; cur; cur = cur->inherits) {
32072 void **inherit = (void **)cur;
32073@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32074 if (IS_ERR(*pp))
32075 *pp = NULL;
32076
32077- ops->inherits = NULL;
32078+ *(struct ata_port_operations **)&ops->inherits = NULL;
32079
32080+ pax_close_kernel();
32081 spin_unlock(&lock);
32082 }
32083
32084diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32085index 7638121..357a965 100644
32086--- a/drivers/ata/pata_arasan_cf.c
32087+++ b/drivers/ata/pata_arasan_cf.c
32088@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32089 /* Handle platform specific quirks */
32090 if (quirk) {
32091 if (quirk & CF_BROKEN_PIO) {
32092- ap->ops->set_piomode = NULL;
32093+ pax_open_kernel();
32094+ *(void **)&ap->ops->set_piomode = NULL;
32095+ pax_close_kernel();
32096 ap->pio_mask = 0;
32097 }
32098 if (quirk & CF_BROKEN_MWDMA)
32099diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32100index f9b983a..887b9d8 100644
32101--- a/drivers/atm/adummy.c
32102+++ b/drivers/atm/adummy.c
32103@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32104 vcc->pop(vcc, skb);
32105 else
32106 dev_kfree_skb_any(skb);
32107- atomic_inc(&vcc->stats->tx);
32108+ atomic_inc_unchecked(&vcc->stats->tx);
32109
32110 return 0;
32111 }
32112diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32113index 77a7480d..05cde58 100644
32114--- a/drivers/atm/ambassador.c
32115+++ b/drivers/atm/ambassador.c
32116@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32117 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32118
32119 // VC layer stats
32120- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32121+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32122
32123 // free the descriptor
32124 kfree (tx_descr);
32125@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32126 dump_skb ("<<<", vc, skb);
32127
32128 // VC layer stats
32129- atomic_inc(&atm_vcc->stats->rx);
32130+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32131 __net_timestamp(skb);
32132 // end of our responsibility
32133 atm_vcc->push (atm_vcc, skb);
32134@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32135 } else {
32136 PRINTK (KERN_INFO, "dropped over-size frame");
32137 // should we count this?
32138- atomic_inc(&atm_vcc->stats->rx_drop);
32139+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32140 }
32141
32142 } else {
32143@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32144 }
32145
32146 if (check_area (skb->data, skb->len)) {
32147- atomic_inc(&atm_vcc->stats->tx_err);
32148+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32149 return -ENOMEM; // ?
32150 }
32151
32152diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32153index 0e3f8f9..765a7a5 100644
32154--- a/drivers/atm/atmtcp.c
32155+++ b/drivers/atm/atmtcp.c
32156@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32157 if (vcc->pop) vcc->pop(vcc,skb);
32158 else dev_kfree_skb(skb);
32159 if (dev_data) return 0;
32160- atomic_inc(&vcc->stats->tx_err);
32161+ atomic_inc_unchecked(&vcc->stats->tx_err);
32162 return -ENOLINK;
32163 }
32164 size = skb->len+sizeof(struct atmtcp_hdr);
32165@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32166 if (!new_skb) {
32167 if (vcc->pop) vcc->pop(vcc,skb);
32168 else dev_kfree_skb(skb);
32169- atomic_inc(&vcc->stats->tx_err);
32170+ atomic_inc_unchecked(&vcc->stats->tx_err);
32171 return -ENOBUFS;
32172 }
32173 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32174@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32175 if (vcc->pop) vcc->pop(vcc,skb);
32176 else dev_kfree_skb(skb);
32177 out_vcc->push(out_vcc,new_skb);
32178- atomic_inc(&vcc->stats->tx);
32179- atomic_inc(&out_vcc->stats->rx);
32180+ atomic_inc_unchecked(&vcc->stats->tx);
32181+ atomic_inc_unchecked(&out_vcc->stats->rx);
32182 return 0;
32183 }
32184
32185@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32186 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32187 read_unlock(&vcc_sklist_lock);
32188 if (!out_vcc) {
32189- atomic_inc(&vcc->stats->tx_err);
32190+ atomic_inc_unchecked(&vcc->stats->tx_err);
32191 goto done;
32192 }
32193 skb_pull(skb,sizeof(struct atmtcp_hdr));
32194@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32195 __net_timestamp(new_skb);
32196 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32197 out_vcc->push(out_vcc,new_skb);
32198- atomic_inc(&vcc->stats->tx);
32199- atomic_inc(&out_vcc->stats->rx);
32200+ atomic_inc_unchecked(&vcc->stats->tx);
32201+ atomic_inc_unchecked(&out_vcc->stats->rx);
32202 done:
32203 if (vcc->pop) vcc->pop(vcc,skb);
32204 else dev_kfree_skb(skb);
32205diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32206index b1955ba..b179940 100644
32207--- a/drivers/atm/eni.c
32208+++ b/drivers/atm/eni.c
32209@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32210 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32211 vcc->dev->number);
32212 length = 0;
32213- atomic_inc(&vcc->stats->rx_err);
32214+ atomic_inc_unchecked(&vcc->stats->rx_err);
32215 }
32216 else {
32217 length = ATM_CELL_SIZE-1; /* no HEC */
32218@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32219 size);
32220 }
32221 eff = length = 0;
32222- atomic_inc(&vcc->stats->rx_err);
32223+ atomic_inc_unchecked(&vcc->stats->rx_err);
32224 }
32225 else {
32226 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32227@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32228 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32229 vcc->dev->number,vcc->vci,length,size << 2,descr);
32230 length = eff = 0;
32231- atomic_inc(&vcc->stats->rx_err);
32232+ atomic_inc_unchecked(&vcc->stats->rx_err);
32233 }
32234 }
32235 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32236@@ -767,7 +767,7 @@ rx_dequeued++;
32237 vcc->push(vcc,skb);
32238 pushed++;
32239 }
32240- atomic_inc(&vcc->stats->rx);
32241+ atomic_inc_unchecked(&vcc->stats->rx);
32242 }
32243 wake_up(&eni_dev->rx_wait);
32244 }
32245@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32246 PCI_DMA_TODEVICE);
32247 if (vcc->pop) vcc->pop(vcc,skb);
32248 else dev_kfree_skb_irq(skb);
32249- atomic_inc(&vcc->stats->tx);
32250+ atomic_inc_unchecked(&vcc->stats->tx);
32251 wake_up(&eni_dev->tx_wait);
32252 dma_complete++;
32253 }
32254diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32255index b41c948..a002b17 100644
32256--- a/drivers/atm/firestream.c
32257+++ b/drivers/atm/firestream.c
32258@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32259 }
32260 }
32261
32262- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32263+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32264
32265 fs_dprintk (FS_DEBUG_TXMEM, "i");
32266 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32267@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32268 #endif
32269 skb_put (skb, qe->p1 & 0xffff);
32270 ATM_SKB(skb)->vcc = atm_vcc;
32271- atomic_inc(&atm_vcc->stats->rx);
32272+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32273 __net_timestamp(skb);
32274 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32275 atm_vcc->push (atm_vcc, skb);
32276@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32277 kfree (pe);
32278 }
32279 if (atm_vcc)
32280- atomic_inc(&atm_vcc->stats->rx_drop);
32281+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32282 break;
32283 case 0x1f: /* Reassembly abort: no buffers. */
32284 /* Silently increment error counter. */
32285 if (atm_vcc)
32286- atomic_inc(&atm_vcc->stats->rx_drop);
32287+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32288 break;
32289 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32290 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32291diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32292index 204814e..cede831 100644
32293--- a/drivers/atm/fore200e.c
32294+++ b/drivers/atm/fore200e.c
32295@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32296 #endif
32297 /* check error condition */
32298 if (*entry->status & STATUS_ERROR)
32299- atomic_inc(&vcc->stats->tx_err);
32300+ atomic_inc_unchecked(&vcc->stats->tx_err);
32301 else
32302- atomic_inc(&vcc->stats->tx);
32303+ atomic_inc_unchecked(&vcc->stats->tx);
32304 }
32305 }
32306
32307@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32308 if (skb == NULL) {
32309 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32310
32311- atomic_inc(&vcc->stats->rx_drop);
32312+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32313 return -ENOMEM;
32314 }
32315
32316@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32317
32318 dev_kfree_skb_any(skb);
32319
32320- atomic_inc(&vcc->stats->rx_drop);
32321+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32322 return -ENOMEM;
32323 }
32324
32325 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32326
32327 vcc->push(vcc, skb);
32328- atomic_inc(&vcc->stats->rx);
32329+ atomic_inc_unchecked(&vcc->stats->rx);
32330
32331 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32332
32333@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32334 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32335 fore200e->atm_dev->number,
32336 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32337- atomic_inc(&vcc->stats->rx_err);
32338+ atomic_inc_unchecked(&vcc->stats->rx_err);
32339 }
32340 }
32341
32342@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32343 goto retry_here;
32344 }
32345
32346- atomic_inc(&vcc->stats->tx_err);
32347+ atomic_inc_unchecked(&vcc->stats->tx_err);
32348
32349 fore200e->tx_sat++;
32350 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32351diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32352index 507362a..a845e57 100644
32353--- a/drivers/atm/he.c
32354+++ b/drivers/atm/he.c
32355@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32356
32357 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32358 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32359- atomic_inc(&vcc->stats->rx_drop);
32360+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32361 goto return_host_buffers;
32362 }
32363
32364@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32365 RBRQ_LEN_ERR(he_dev->rbrq_head)
32366 ? "LEN_ERR" : "",
32367 vcc->vpi, vcc->vci);
32368- atomic_inc(&vcc->stats->rx_err);
32369+ atomic_inc_unchecked(&vcc->stats->rx_err);
32370 goto return_host_buffers;
32371 }
32372
32373@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32374 vcc->push(vcc, skb);
32375 spin_lock(&he_dev->global_lock);
32376
32377- atomic_inc(&vcc->stats->rx);
32378+ atomic_inc_unchecked(&vcc->stats->rx);
32379
32380 return_host_buffers:
32381 ++pdus_assembled;
32382@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32383 tpd->vcc->pop(tpd->vcc, tpd->skb);
32384 else
32385 dev_kfree_skb_any(tpd->skb);
32386- atomic_inc(&tpd->vcc->stats->tx_err);
32387+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32388 }
32389 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32390 return;
32391@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32392 vcc->pop(vcc, skb);
32393 else
32394 dev_kfree_skb_any(skb);
32395- atomic_inc(&vcc->stats->tx_err);
32396+ atomic_inc_unchecked(&vcc->stats->tx_err);
32397 return -EINVAL;
32398 }
32399
32400@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32401 vcc->pop(vcc, skb);
32402 else
32403 dev_kfree_skb_any(skb);
32404- atomic_inc(&vcc->stats->tx_err);
32405+ atomic_inc_unchecked(&vcc->stats->tx_err);
32406 return -EINVAL;
32407 }
32408 #endif
32409@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32410 vcc->pop(vcc, skb);
32411 else
32412 dev_kfree_skb_any(skb);
32413- atomic_inc(&vcc->stats->tx_err);
32414+ atomic_inc_unchecked(&vcc->stats->tx_err);
32415 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32416 return -ENOMEM;
32417 }
32418@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32419 vcc->pop(vcc, skb);
32420 else
32421 dev_kfree_skb_any(skb);
32422- atomic_inc(&vcc->stats->tx_err);
32423+ atomic_inc_unchecked(&vcc->stats->tx_err);
32424 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32425 return -ENOMEM;
32426 }
32427@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32428 __enqueue_tpd(he_dev, tpd, cid);
32429 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32430
32431- atomic_inc(&vcc->stats->tx);
32432+ atomic_inc_unchecked(&vcc->stats->tx);
32433
32434 return 0;
32435 }
32436diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32437index 1dc0519..1aadaf7 100644
32438--- a/drivers/atm/horizon.c
32439+++ b/drivers/atm/horizon.c
32440@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32441 {
32442 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32443 // VC layer stats
32444- atomic_inc(&vcc->stats->rx);
32445+ atomic_inc_unchecked(&vcc->stats->rx);
32446 __net_timestamp(skb);
32447 // end of our responsibility
32448 vcc->push (vcc, skb);
32449@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32450 dev->tx_iovec = NULL;
32451
32452 // VC layer stats
32453- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32454+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32455
32456 // free the skb
32457 hrz_kfree_skb (skb);
32458diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32459index 272f009..a18ba55 100644
32460--- a/drivers/atm/idt77252.c
32461+++ b/drivers/atm/idt77252.c
32462@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32463 else
32464 dev_kfree_skb(skb);
32465
32466- atomic_inc(&vcc->stats->tx);
32467+ atomic_inc_unchecked(&vcc->stats->tx);
32468 }
32469
32470 atomic_dec(&scq->used);
32471@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32472 if ((sb = dev_alloc_skb(64)) == NULL) {
32473 printk("%s: Can't allocate buffers for aal0.\n",
32474 card->name);
32475- atomic_add(i, &vcc->stats->rx_drop);
32476+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32477 break;
32478 }
32479 if (!atm_charge(vcc, sb->truesize)) {
32480 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32481 card->name);
32482- atomic_add(i - 1, &vcc->stats->rx_drop);
32483+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32484 dev_kfree_skb(sb);
32485 break;
32486 }
32487@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32488 ATM_SKB(sb)->vcc = vcc;
32489 __net_timestamp(sb);
32490 vcc->push(vcc, sb);
32491- atomic_inc(&vcc->stats->rx);
32492+ atomic_inc_unchecked(&vcc->stats->rx);
32493
32494 cell += ATM_CELL_PAYLOAD;
32495 }
32496@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32497 "(CDC: %08x)\n",
32498 card->name, len, rpp->len, readl(SAR_REG_CDC));
32499 recycle_rx_pool_skb(card, rpp);
32500- atomic_inc(&vcc->stats->rx_err);
32501+ atomic_inc_unchecked(&vcc->stats->rx_err);
32502 return;
32503 }
32504 if (stat & SAR_RSQE_CRC) {
32505 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32506 recycle_rx_pool_skb(card, rpp);
32507- atomic_inc(&vcc->stats->rx_err);
32508+ atomic_inc_unchecked(&vcc->stats->rx_err);
32509 return;
32510 }
32511 if (skb_queue_len(&rpp->queue) > 1) {
32512@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32513 RXPRINTK("%s: Can't alloc RX skb.\n",
32514 card->name);
32515 recycle_rx_pool_skb(card, rpp);
32516- atomic_inc(&vcc->stats->rx_err);
32517+ atomic_inc_unchecked(&vcc->stats->rx_err);
32518 return;
32519 }
32520 if (!atm_charge(vcc, skb->truesize)) {
32521@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32522 __net_timestamp(skb);
32523
32524 vcc->push(vcc, skb);
32525- atomic_inc(&vcc->stats->rx);
32526+ atomic_inc_unchecked(&vcc->stats->rx);
32527
32528 return;
32529 }
32530@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32531 __net_timestamp(skb);
32532
32533 vcc->push(vcc, skb);
32534- atomic_inc(&vcc->stats->rx);
32535+ atomic_inc_unchecked(&vcc->stats->rx);
32536
32537 if (skb->truesize > SAR_FB_SIZE_3)
32538 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32539@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32540 if (vcc->qos.aal != ATM_AAL0) {
32541 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32542 card->name, vpi, vci);
32543- atomic_inc(&vcc->stats->rx_drop);
32544+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32545 goto drop;
32546 }
32547
32548 if ((sb = dev_alloc_skb(64)) == NULL) {
32549 printk("%s: Can't allocate buffers for AAL0.\n",
32550 card->name);
32551- atomic_inc(&vcc->stats->rx_err);
32552+ atomic_inc_unchecked(&vcc->stats->rx_err);
32553 goto drop;
32554 }
32555
32556@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32557 ATM_SKB(sb)->vcc = vcc;
32558 __net_timestamp(sb);
32559 vcc->push(vcc, sb);
32560- atomic_inc(&vcc->stats->rx);
32561+ atomic_inc_unchecked(&vcc->stats->rx);
32562
32563 drop:
32564 skb_pull(queue, 64);
32565@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32566
32567 if (vc == NULL) {
32568 printk("%s: NULL connection in send().\n", card->name);
32569- atomic_inc(&vcc->stats->tx_err);
32570+ atomic_inc_unchecked(&vcc->stats->tx_err);
32571 dev_kfree_skb(skb);
32572 return -EINVAL;
32573 }
32574 if (!test_bit(VCF_TX, &vc->flags)) {
32575 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32576- atomic_inc(&vcc->stats->tx_err);
32577+ atomic_inc_unchecked(&vcc->stats->tx_err);
32578 dev_kfree_skb(skb);
32579 return -EINVAL;
32580 }
32581@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32582 break;
32583 default:
32584 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32585- atomic_inc(&vcc->stats->tx_err);
32586+ atomic_inc_unchecked(&vcc->stats->tx_err);
32587 dev_kfree_skb(skb);
32588 return -EINVAL;
32589 }
32590
32591 if (skb_shinfo(skb)->nr_frags != 0) {
32592 printk("%s: No scatter-gather yet.\n", card->name);
32593- atomic_inc(&vcc->stats->tx_err);
32594+ atomic_inc_unchecked(&vcc->stats->tx_err);
32595 dev_kfree_skb(skb);
32596 return -EINVAL;
32597 }
32598@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32599
32600 err = queue_skb(card, vc, skb, oam);
32601 if (err) {
32602- atomic_inc(&vcc->stats->tx_err);
32603+ atomic_inc_unchecked(&vcc->stats->tx_err);
32604 dev_kfree_skb(skb);
32605 return err;
32606 }
32607@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32608 skb = dev_alloc_skb(64);
32609 if (!skb) {
32610 printk("%s: Out of memory in send_oam().\n", card->name);
32611- atomic_inc(&vcc->stats->tx_err);
32612+ atomic_inc_unchecked(&vcc->stats->tx_err);
32613 return -ENOMEM;
32614 }
32615 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32616diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32617index 4217f29..88f547a 100644
32618--- a/drivers/atm/iphase.c
32619+++ b/drivers/atm/iphase.c
32620@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32621 status = (u_short) (buf_desc_ptr->desc_mode);
32622 if (status & (RX_CER | RX_PTE | RX_OFL))
32623 {
32624- atomic_inc(&vcc->stats->rx_err);
32625+ atomic_inc_unchecked(&vcc->stats->rx_err);
32626 IF_ERR(printk("IA: bad packet, dropping it");)
32627 if (status & RX_CER) {
32628 IF_ERR(printk(" cause: packet CRC error\n");)
32629@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32630 len = dma_addr - buf_addr;
32631 if (len > iadev->rx_buf_sz) {
32632 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32633- atomic_inc(&vcc->stats->rx_err);
32634+ atomic_inc_unchecked(&vcc->stats->rx_err);
32635 goto out_free_desc;
32636 }
32637
32638@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32639 ia_vcc = INPH_IA_VCC(vcc);
32640 if (ia_vcc == NULL)
32641 {
32642- atomic_inc(&vcc->stats->rx_err);
32643+ atomic_inc_unchecked(&vcc->stats->rx_err);
32644 atm_return(vcc, skb->truesize);
32645 dev_kfree_skb_any(skb);
32646 goto INCR_DLE;
32647@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32648 if ((length > iadev->rx_buf_sz) || (length >
32649 (skb->len - sizeof(struct cpcs_trailer))))
32650 {
32651- atomic_inc(&vcc->stats->rx_err);
32652+ atomic_inc_unchecked(&vcc->stats->rx_err);
32653 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32654 length, skb->len);)
32655 atm_return(vcc, skb->truesize);
32656@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32657
32658 IF_RX(printk("rx_dle_intr: skb push");)
32659 vcc->push(vcc,skb);
32660- atomic_inc(&vcc->stats->rx);
32661+ atomic_inc_unchecked(&vcc->stats->rx);
32662 iadev->rx_pkt_cnt++;
32663 }
32664 INCR_DLE:
32665@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32666 {
32667 struct k_sonet_stats *stats;
32668 stats = &PRIV(_ia_dev[board])->sonet_stats;
32669- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32670- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32671- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32672- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32673- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32674- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32675- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32676- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32677- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32678+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32679+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32680+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32681+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32682+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32683+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32684+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32685+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32686+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32687 }
32688 ia_cmds.status = 0;
32689 break;
32690@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32691 if ((desc == 0) || (desc > iadev->num_tx_desc))
32692 {
32693 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32694- atomic_inc(&vcc->stats->tx);
32695+ atomic_inc_unchecked(&vcc->stats->tx);
32696 if (vcc->pop)
32697 vcc->pop(vcc, skb);
32698 else
32699@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32700 ATM_DESC(skb) = vcc->vci;
32701 skb_queue_tail(&iadev->tx_dma_q, skb);
32702
32703- atomic_inc(&vcc->stats->tx);
32704+ atomic_inc_unchecked(&vcc->stats->tx);
32705 iadev->tx_pkt_cnt++;
32706 /* Increment transaction counter */
32707 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32708
32709 #if 0
32710 /* add flow control logic */
32711- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32712+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32713 if (iavcc->vc_desc_cnt > 10) {
32714 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32715 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32716diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32717index fa7d701..1e404c7 100644
32718--- a/drivers/atm/lanai.c
32719+++ b/drivers/atm/lanai.c
32720@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32721 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32722 lanai_endtx(lanai, lvcc);
32723 lanai_free_skb(lvcc->tx.atmvcc, skb);
32724- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32725+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32726 }
32727
32728 /* Try to fill the buffer - don't call unless there is backlog */
32729@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32730 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32731 __net_timestamp(skb);
32732 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32733- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32734+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32735 out:
32736 lvcc->rx.buf.ptr = end;
32737 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32738@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32739 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32740 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32741 lanai->stats.service_rxnotaal5++;
32742- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32743+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32744 return 0;
32745 }
32746 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32747@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32748 int bytes;
32749 read_unlock(&vcc_sklist_lock);
32750 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32751- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32752+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32753 lvcc->stats.x.aal5.service_trash++;
32754 bytes = (SERVICE_GET_END(s) * 16) -
32755 (((unsigned long) lvcc->rx.buf.ptr) -
32756@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32757 }
32758 if (s & SERVICE_STREAM) {
32759 read_unlock(&vcc_sklist_lock);
32760- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32761+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32762 lvcc->stats.x.aal5.service_stream++;
32763 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32764 "PDU on VCI %d!\n", lanai->number, vci);
32765@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32766 return 0;
32767 }
32768 DPRINTK("got rx crc error on vci %d\n", vci);
32769- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32770+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32771 lvcc->stats.x.aal5.service_rxcrc++;
32772 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32773 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32774diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32775index 6587dc2..149833d 100644
32776--- a/drivers/atm/nicstar.c
32777+++ b/drivers/atm/nicstar.c
32778@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32779 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32780 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32781 card->index);
32782- atomic_inc(&vcc->stats->tx_err);
32783+ atomic_inc_unchecked(&vcc->stats->tx_err);
32784 dev_kfree_skb_any(skb);
32785 return -EINVAL;
32786 }
32787@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32788 if (!vc->tx) {
32789 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32790 card->index);
32791- atomic_inc(&vcc->stats->tx_err);
32792+ atomic_inc_unchecked(&vcc->stats->tx_err);
32793 dev_kfree_skb_any(skb);
32794 return -EINVAL;
32795 }
32796@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32797 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32798 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32799 card->index);
32800- atomic_inc(&vcc->stats->tx_err);
32801+ atomic_inc_unchecked(&vcc->stats->tx_err);
32802 dev_kfree_skb_any(skb);
32803 return -EINVAL;
32804 }
32805
32806 if (skb_shinfo(skb)->nr_frags != 0) {
32807 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32808- atomic_inc(&vcc->stats->tx_err);
32809+ atomic_inc_unchecked(&vcc->stats->tx_err);
32810 dev_kfree_skb_any(skb);
32811 return -EINVAL;
32812 }
32813@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32814 }
32815
32816 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32817- atomic_inc(&vcc->stats->tx_err);
32818+ atomic_inc_unchecked(&vcc->stats->tx_err);
32819 dev_kfree_skb_any(skb);
32820 return -EIO;
32821 }
32822- atomic_inc(&vcc->stats->tx);
32823+ atomic_inc_unchecked(&vcc->stats->tx);
32824
32825 return 0;
32826 }
32827@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32828 printk
32829 ("nicstar%d: Can't allocate buffers for aal0.\n",
32830 card->index);
32831- atomic_add(i, &vcc->stats->rx_drop);
32832+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32833 break;
32834 }
32835 if (!atm_charge(vcc, sb->truesize)) {
32836 RXPRINTK
32837 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32838 card->index);
32839- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32840+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32841 dev_kfree_skb_any(sb);
32842 break;
32843 }
32844@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32845 ATM_SKB(sb)->vcc = vcc;
32846 __net_timestamp(sb);
32847 vcc->push(vcc, sb);
32848- atomic_inc(&vcc->stats->rx);
32849+ atomic_inc_unchecked(&vcc->stats->rx);
32850 cell += ATM_CELL_PAYLOAD;
32851 }
32852
32853@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32854 if (iovb == NULL) {
32855 printk("nicstar%d: Out of iovec buffers.\n",
32856 card->index);
32857- atomic_inc(&vcc->stats->rx_drop);
32858+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32859 recycle_rx_buf(card, skb);
32860 return;
32861 }
32862@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32863 small or large buffer itself. */
32864 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32865 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32866- atomic_inc(&vcc->stats->rx_err);
32867+ atomic_inc_unchecked(&vcc->stats->rx_err);
32868 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32869 NS_MAX_IOVECS);
32870 NS_PRV_IOVCNT(iovb) = 0;
32871@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32872 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32873 card->index);
32874 which_list(card, skb);
32875- atomic_inc(&vcc->stats->rx_err);
32876+ atomic_inc_unchecked(&vcc->stats->rx_err);
32877 recycle_rx_buf(card, skb);
32878 vc->rx_iov = NULL;
32879 recycle_iov_buf(card, iovb);
32880@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32881 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32882 card->index);
32883 which_list(card, skb);
32884- atomic_inc(&vcc->stats->rx_err);
32885+ atomic_inc_unchecked(&vcc->stats->rx_err);
32886 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32887 NS_PRV_IOVCNT(iovb));
32888 vc->rx_iov = NULL;
32889@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32890 printk(" - PDU size mismatch.\n");
32891 else
32892 printk(".\n");
32893- atomic_inc(&vcc->stats->rx_err);
32894+ atomic_inc_unchecked(&vcc->stats->rx_err);
32895 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32896 NS_PRV_IOVCNT(iovb));
32897 vc->rx_iov = NULL;
32898@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32899 /* skb points to a small buffer */
32900 if (!atm_charge(vcc, skb->truesize)) {
32901 push_rxbufs(card, skb);
32902- atomic_inc(&vcc->stats->rx_drop);
32903+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32904 } else {
32905 skb_put(skb, len);
32906 dequeue_sm_buf(card, skb);
32907@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32908 ATM_SKB(skb)->vcc = vcc;
32909 __net_timestamp(skb);
32910 vcc->push(vcc, skb);
32911- atomic_inc(&vcc->stats->rx);
32912+ atomic_inc_unchecked(&vcc->stats->rx);
32913 }
32914 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32915 struct sk_buff *sb;
32916@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32917 if (len <= NS_SMBUFSIZE) {
32918 if (!atm_charge(vcc, sb->truesize)) {
32919 push_rxbufs(card, sb);
32920- atomic_inc(&vcc->stats->rx_drop);
32921+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32922 } else {
32923 skb_put(sb, len);
32924 dequeue_sm_buf(card, sb);
32925@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32926 ATM_SKB(sb)->vcc = vcc;
32927 __net_timestamp(sb);
32928 vcc->push(vcc, sb);
32929- atomic_inc(&vcc->stats->rx);
32930+ atomic_inc_unchecked(&vcc->stats->rx);
32931 }
32932
32933 push_rxbufs(card, skb);
32934@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32935
32936 if (!atm_charge(vcc, skb->truesize)) {
32937 push_rxbufs(card, skb);
32938- atomic_inc(&vcc->stats->rx_drop);
32939+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32940 } else {
32941 dequeue_lg_buf(card, skb);
32942 #ifdef NS_USE_DESTRUCTORS
32943@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32944 ATM_SKB(skb)->vcc = vcc;
32945 __net_timestamp(skb);
32946 vcc->push(vcc, skb);
32947- atomic_inc(&vcc->stats->rx);
32948+ atomic_inc_unchecked(&vcc->stats->rx);
32949 }
32950
32951 push_rxbufs(card, sb);
32952@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32953 printk
32954 ("nicstar%d: Out of huge buffers.\n",
32955 card->index);
32956- atomic_inc(&vcc->stats->rx_drop);
32957+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32958 recycle_iovec_rx_bufs(card,
32959 (struct iovec *)
32960 iovb->data,
32961@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32962 card->hbpool.count++;
32963 } else
32964 dev_kfree_skb_any(hb);
32965- atomic_inc(&vcc->stats->rx_drop);
32966+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32967 } else {
32968 /* Copy the small buffer to the huge buffer */
32969 sb = (struct sk_buff *)iov->iov_base;
32970@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32971 #endif /* NS_USE_DESTRUCTORS */
32972 __net_timestamp(hb);
32973 vcc->push(vcc, hb);
32974- atomic_inc(&vcc->stats->rx);
32975+ atomic_inc_unchecked(&vcc->stats->rx);
32976 }
32977 }
32978
32979diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
32980index 32784d1..4a8434a 100644
32981--- a/drivers/atm/solos-pci.c
32982+++ b/drivers/atm/solos-pci.c
32983@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
32984 }
32985 atm_charge(vcc, skb->truesize);
32986 vcc->push(vcc, skb);
32987- atomic_inc(&vcc->stats->rx);
32988+ atomic_inc_unchecked(&vcc->stats->rx);
32989 break;
32990
32991 case PKT_STATUS:
32992@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
32993 vcc = SKB_CB(oldskb)->vcc;
32994
32995 if (vcc) {
32996- atomic_inc(&vcc->stats->tx);
32997+ atomic_inc_unchecked(&vcc->stats->tx);
32998 solos_pop(vcc, oldskb);
32999 } else {
33000 dev_kfree_skb_irq(oldskb);
33001diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33002index 0215934..ce9f5b1 100644
33003--- a/drivers/atm/suni.c
33004+++ b/drivers/atm/suni.c
33005@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33006
33007
33008 #define ADD_LIMITED(s,v) \
33009- atomic_add((v),&stats->s); \
33010- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33011+ atomic_add_unchecked((v),&stats->s); \
33012+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33013
33014
33015 static void suni_hz(unsigned long from_timer)
33016diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33017index 5120a96..e2572bd 100644
33018--- a/drivers/atm/uPD98402.c
33019+++ b/drivers/atm/uPD98402.c
33020@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33021 struct sonet_stats tmp;
33022 int error = 0;
33023
33024- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33025+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33026 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33027 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33028 if (zero && !error) {
33029@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33030
33031
33032 #define ADD_LIMITED(s,v) \
33033- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33034- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33035- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33036+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33037+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33038+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33039
33040
33041 static void stat_event(struct atm_dev *dev)
33042@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33043 if (reason & uPD98402_INT_PFM) stat_event(dev);
33044 if (reason & uPD98402_INT_PCO) {
33045 (void) GET(PCOCR); /* clear interrupt cause */
33046- atomic_add(GET(HECCT),
33047+ atomic_add_unchecked(GET(HECCT),
33048 &PRIV(dev)->sonet_stats.uncorr_hcs);
33049 }
33050 if ((reason & uPD98402_INT_RFO) &&
33051@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33052 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33053 uPD98402_INT_LOS),PIMR); /* enable them */
33054 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33055- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33056- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33057- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33058+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33059+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33060+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33061 return 0;
33062 }
33063
33064diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33065index 969c3c2..9b72956 100644
33066--- a/drivers/atm/zatm.c
33067+++ b/drivers/atm/zatm.c
33068@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33069 }
33070 if (!size) {
33071 dev_kfree_skb_irq(skb);
33072- if (vcc) atomic_inc(&vcc->stats->rx_err);
33073+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33074 continue;
33075 }
33076 if (!atm_charge(vcc,skb->truesize)) {
33077@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33078 skb->len = size;
33079 ATM_SKB(skb)->vcc = vcc;
33080 vcc->push(vcc,skb);
33081- atomic_inc(&vcc->stats->rx);
33082+ atomic_inc_unchecked(&vcc->stats->rx);
33083 }
33084 zout(pos & 0xffff,MTA(mbx));
33085 #if 0 /* probably a stupid idea */
33086@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33087 skb_queue_head(&zatm_vcc->backlog,skb);
33088 break;
33089 }
33090- atomic_inc(&vcc->stats->tx);
33091+ atomic_inc_unchecked(&vcc->stats->tx);
33092 wake_up(&zatm_vcc->tx_wait);
33093 }
33094
33095diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
33096index d78b204..ecc1929 100644
33097--- a/drivers/base/attribute_container.c
33098+++ b/drivers/base/attribute_container.c
33099@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
33100 ic->classdev.parent = get_device(dev);
33101 ic->classdev.class = cont->class;
33102 cont->class->dev_release = attribute_container_release;
33103- dev_set_name(&ic->classdev, dev_name(dev));
33104+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
33105 if (fn)
33106 fn(cont, dev, &ic->classdev);
33107 else
33108diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33109index d414331..b4dd4ba 100644
33110--- a/drivers/base/bus.c
33111+++ b/drivers/base/bus.c
33112@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33113 return -EINVAL;
33114
33115 mutex_lock(&subsys->p->mutex);
33116- list_add_tail(&sif->node, &subsys->p->interfaces);
33117+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33118 if (sif->add_dev) {
33119 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33120 while ((dev = subsys_dev_iter_next(&iter)))
33121@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33122 subsys = sif->subsys;
33123
33124 mutex_lock(&subsys->p->mutex);
33125- list_del_init(&sif->node);
33126+ pax_list_del_init((struct list_head *)&sif->node);
33127 if (sif->remove_dev) {
33128 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33129 while ((dev = subsys_dev_iter_next(&iter)))
33130diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33131index 7413d06..79155fa 100644
33132--- a/drivers/base/devtmpfs.c
33133+++ b/drivers/base/devtmpfs.c
33134@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
33135 if (!thread)
33136 return 0;
33137
33138- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33139+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33140 if (err)
33141 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33142 else
33143@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
33144 *err = sys_unshare(CLONE_NEWNS);
33145 if (*err)
33146 goto out;
33147- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
33148+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
33149 if (*err)
33150 goto out;
33151- sys_chdir("/.."); /* will traverse into overmounted root */
33152- sys_chroot(".");
33153+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
33154+ sys_chroot((char __force_user *)".");
33155 complete(&setup_done);
33156 while (1) {
33157 spin_lock(&req_lock);
33158diff --git a/drivers/base/node.c b/drivers/base/node.c
33159index 7616a77c..8f57f51 100644
33160--- a/drivers/base/node.c
33161+++ b/drivers/base/node.c
33162@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33163 struct node_attr {
33164 struct device_attribute attr;
33165 enum node_states state;
33166-};
33167+} __do_const;
33168
33169 static ssize_t show_node_state(struct device *dev,
33170 struct device_attribute *attr, char *buf)
33171diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33172index 7072404..76dcebd 100644
33173--- a/drivers/base/power/domain.c
33174+++ b/drivers/base/power/domain.c
33175@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33176 {
33177 struct cpuidle_driver *cpuidle_drv;
33178 struct gpd_cpu_data *cpu_data;
33179- struct cpuidle_state *idle_state;
33180+ cpuidle_state_no_const *idle_state;
33181 int ret = 0;
33182
33183 if (IS_ERR_OR_NULL(genpd) || state < 0)
33184@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33185 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33186 {
33187 struct gpd_cpu_data *cpu_data;
33188- struct cpuidle_state *idle_state;
33189+ cpuidle_state_no_const *idle_state;
33190 int ret = 0;
33191
33192 if (IS_ERR_OR_NULL(genpd))
33193diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
33194index a53ebd2..8f73eeb 100644
33195--- a/drivers/base/power/sysfs.c
33196+++ b/drivers/base/power/sysfs.c
33197@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
33198 return -EIO;
33199 }
33200 }
33201- return sprintf(buf, p);
33202+ return sprintf(buf, "%s", p);
33203 }
33204
33205 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
33206diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33207index 79715e7..df06b3b 100644
33208--- a/drivers/base/power/wakeup.c
33209+++ b/drivers/base/power/wakeup.c
33210@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33211 * They need to be modified together atomically, so it's better to use one
33212 * atomic variable to hold them both.
33213 */
33214-static atomic_t combined_event_count = ATOMIC_INIT(0);
33215+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33216
33217 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33218 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33219
33220 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33221 {
33222- unsigned int comb = atomic_read(&combined_event_count);
33223+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33224
33225 *cnt = (comb >> IN_PROGRESS_BITS);
33226 *inpr = comb & MAX_IN_PROGRESS;
33227@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33228 ws->start_prevent_time = ws->last_time;
33229
33230 /* Increment the counter of events in progress. */
33231- cec = atomic_inc_return(&combined_event_count);
33232+ cec = atomic_inc_return_unchecked(&combined_event_count);
33233
33234 trace_wakeup_source_activate(ws->name, cec);
33235 }
33236@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33237 * Increment the counter of registered wakeup events and decrement the
33238 * couter of wakeup events in progress simultaneously.
33239 */
33240- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33241+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33242 trace_wakeup_source_deactivate(ws->name, cec);
33243
33244 split_counters(&cnt, &inpr);
33245diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33246index e8d11b6..7b1b36f 100644
33247--- a/drivers/base/syscore.c
33248+++ b/drivers/base/syscore.c
33249@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33250 void register_syscore_ops(struct syscore_ops *ops)
33251 {
33252 mutex_lock(&syscore_ops_lock);
33253- list_add_tail(&ops->node, &syscore_ops_list);
33254+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33255 mutex_unlock(&syscore_ops_lock);
33256 }
33257 EXPORT_SYMBOL_GPL(register_syscore_ops);
33258@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33259 void unregister_syscore_ops(struct syscore_ops *ops)
33260 {
33261 mutex_lock(&syscore_ops_lock);
33262- list_del(&ops->node);
33263+ pax_list_del((struct list_head *)&ops->node);
33264 mutex_unlock(&syscore_ops_lock);
33265 }
33266 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33267diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33268index 62b6c2c..4a11354 100644
33269--- a/drivers/block/cciss.c
33270+++ b/drivers/block/cciss.c
33271@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33272 int err;
33273 u32 cp;
33274
33275+ memset(&arg64, 0, sizeof(arg64));
33276+
33277 err = 0;
33278 err |=
33279 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33280@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
33281 while (!list_empty(&h->reqQ)) {
33282 c = list_entry(h->reqQ.next, CommandList_struct, list);
33283 /* can't do anything if fifo is full */
33284- if ((h->access.fifo_full(h))) {
33285+ if ((h->access->fifo_full(h))) {
33286 dev_warn(&h->pdev->dev, "fifo full\n");
33287 break;
33288 }
33289@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
33290 h->Qdepth--;
33291
33292 /* Tell the controller execute command */
33293- h->access.submit_command(h, c);
33294+ h->access->submit_command(h, c);
33295
33296 /* Put job onto the completed Q */
33297 addQ(&h->cmpQ, c);
33298@@ -3446,17 +3448,17 @@ startio:
33299
33300 static inline unsigned long get_next_completion(ctlr_info_t *h)
33301 {
33302- return h->access.command_completed(h);
33303+ return h->access->command_completed(h);
33304 }
33305
33306 static inline int interrupt_pending(ctlr_info_t *h)
33307 {
33308- return h->access.intr_pending(h);
33309+ return h->access->intr_pending(h);
33310 }
33311
33312 static inline long interrupt_not_for_us(ctlr_info_t *h)
33313 {
33314- return ((h->access.intr_pending(h) == 0) ||
33315+ return ((h->access->intr_pending(h) == 0) ||
33316 (h->interrupts_enabled == 0));
33317 }
33318
33319@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
33320 u32 a;
33321
33322 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33323- return h->access.command_completed(h);
33324+ return h->access->command_completed(h);
33325
33326 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33327 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33328@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33329 trans_support & CFGTBL_Trans_use_short_tags);
33330
33331 /* Change the access methods to the performant access methods */
33332- h->access = SA5_performant_access;
33333+ h->access = &SA5_performant_access;
33334 h->transMethod = CFGTBL_Trans_Performant;
33335
33336 return;
33337@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33338 if (prod_index < 0)
33339 return -ENODEV;
33340 h->product_name = products[prod_index].product_name;
33341- h->access = *(products[prod_index].access);
33342+ h->access = products[prod_index].access;
33343
33344 if (cciss_board_disabled(h)) {
33345 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33346@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
33347 }
33348
33349 /* make sure the board interrupts are off */
33350- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33351+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33352 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33353 if (rc)
33354 goto clean2;
33355@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
33356 * fake ones to scoop up any residual completions.
33357 */
33358 spin_lock_irqsave(&h->lock, flags);
33359- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33360+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33361 spin_unlock_irqrestore(&h->lock, flags);
33362 free_irq(h->intr[h->intr_mode], h);
33363 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33364@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
33365 dev_info(&h->pdev->dev, "Board READY.\n");
33366 dev_info(&h->pdev->dev,
33367 "Waiting for stale completions to drain.\n");
33368- h->access.set_intr_mask(h, CCISS_INTR_ON);
33369+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33370 msleep(10000);
33371- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33372+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33373
33374 rc = controller_reset_failed(h->cfgtable);
33375 if (rc)
33376@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
33377 cciss_scsi_setup(h);
33378
33379 /* Turn the interrupts on so we can service requests */
33380- h->access.set_intr_mask(h, CCISS_INTR_ON);
33381+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33382
33383 /* Get the firmware version */
33384 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33385@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33386 kfree(flush_buf);
33387 if (return_code != IO_OK)
33388 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33389- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33390+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33391 free_irq(h->intr[h->intr_mode], h);
33392 }
33393
33394diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33395index 7fda30e..eb5dfe0 100644
33396--- a/drivers/block/cciss.h
33397+++ b/drivers/block/cciss.h
33398@@ -101,7 +101,7 @@ struct ctlr_info
33399 /* information about each logical volume */
33400 drive_info_struct *drv[CISS_MAX_LUN];
33401
33402- struct access_method access;
33403+ struct access_method *access;
33404
33405 /* queue and queue Info */
33406 struct list_head reqQ;
33407diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33408index 639d26b..fd6ad1f 100644
33409--- a/drivers/block/cpqarray.c
33410+++ b/drivers/block/cpqarray.c
33411@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33412 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33413 goto Enomem4;
33414 }
33415- hba[i]->access.set_intr_mask(hba[i], 0);
33416+ hba[i]->access->set_intr_mask(hba[i], 0);
33417 if (request_irq(hba[i]->intr, do_ida_intr,
33418 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33419 {
33420@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33421 add_timer(&hba[i]->timer);
33422
33423 /* Enable IRQ now that spinlock and rate limit timer are set up */
33424- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33425+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33426
33427 for(j=0; j<NWD; j++) {
33428 struct gendisk *disk = ida_gendisk[i][j];
33429@@ -694,7 +694,7 @@ DBGINFO(
33430 for(i=0; i<NR_PRODUCTS; i++) {
33431 if (board_id == products[i].board_id) {
33432 c->product_name = products[i].product_name;
33433- c->access = *(products[i].access);
33434+ c->access = products[i].access;
33435 break;
33436 }
33437 }
33438@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33439 hba[ctlr]->intr = intr;
33440 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33441 hba[ctlr]->product_name = products[j].product_name;
33442- hba[ctlr]->access = *(products[j].access);
33443+ hba[ctlr]->access = products[j].access;
33444 hba[ctlr]->ctlr = ctlr;
33445 hba[ctlr]->board_id = board_id;
33446 hba[ctlr]->pci_dev = NULL; /* not PCI */
33447@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
33448
33449 while((c = h->reqQ) != NULL) {
33450 /* Can't do anything if we're busy */
33451- if (h->access.fifo_full(h) == 0)
33452+ if (h->access->fifo_full(h) == 0)
33453 return;
33454
33455 /* Get the first entry from the request Q */
33456@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
33457 h->Qdepth--;
33458
33459 /* Tell the controller to do our bidding */
33460- h->access.submit_command(h, c);
33461+ h->access->submit_command(h, c);
33462
33463 /* Get onto the completion Q */
33464 addQ(&h->cmpQ, c);
33465@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33466 unsigned long flags;
33467 __u32 a,a1;
33468
33469- istat = h->access.intr_pending(h);
33470+ istat = h->access->intr_pending(h);
33471 /* Is this interrupt for us? */
33472 if (istat == 0)
33473 return IRQ_NONE;
33474@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33475 */
33476 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33477 if (istat & FIFO_NOT_EMPTY) {
33478- while((a = h->access.command_completed(h))) {
33479+ while((a = h->access->command_completed(h))) {
33480 a1 = a; a &= ~3;
33481 if ((c = h->cmpQ) == NULL)
33482 {
33483@@ -1193,6 +1193,7 @@ out_passthru:
33484 ida_pci_info_struct pciinfo;
33485
33486 if (!arg) return -EINVAL;
33487+ memset(&pciinfo, 0, sizeof(pciinfo));
33488 pciinfo.bus = host->pci_dev->bus->number;
33489 pciinfo.dev_fn = host->pci_dev->devfn;
33490 pciinfo.board_id = host->board_id;
33491@@ -1447,11 +1448,11 @@ static int sendcmd(
33492 /*
33493 * Disable interrupt
33494 */
33495- info_p->access.set_intr_mask(info_p, 0);
33496+ info_p->access->set_intr_mask(info_p, 0);
33497 /* Make sure there is room in the command FIFO */
33498 /* Actually it should be completely empty at this time. */
33499 for (i = 200000; i > 0; i--) {
33500- temp = info_p->access.fifo_full(info_p);
33501+ temp = info_p->access->fifo_full(info_p);
33502 if (temp != 0) {
33503 break;
33504 }
33505@@ -1464,7 +1465,7 @@ DBG(
33506 /*
33507 * Send the cmd
33508 */
33509- info_p->access.submit_command(info_p, c);
33510+ info_p->access->submit_command(info_p, c);
33511 complete = pollcomplete(ctlr);
33512
33513 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33514@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33515 * we check the new geometry. Then turn interrupts back on when
33516 * we're done.
33517 */
33518- host->access.set_intr_mask(host, 0);
33519+ host->access->set_intr_mask(host, 0);
33520 getgeometry(ctlr);
33521- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33522+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33523
33524 for(i=0; i<NWD; i++) {
33525 struct gendisk *disk = ida_gendisk[ctlr][i];
33526@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
33527 /* Wait (up to 2 seconds) for a command to complete */
33528
33529 for (i = 200000; i > 0; i--) {
33530- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33531+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33532 if (done == 0) {
33533 udelay(10); /* a short fixed delay */
33534 } else
33535diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33536index be73e9d..7fbf140 100644
33537--- a/drivers/block/cpqarray.h
33538+++ b/drivers/block/cpqarray.h
33539@@ -99,7 +99,7 @@ struct ctlr_info {
33540 drv_info_t drv[NWD];
33541 struct proc_dir_entry *proc;
33542
33543- struct access_method access;
33544+ struct access_method *access;
33545
33546 cmdlist_t *reqQ;
33547 cmdlist_t *cmpQ;
33548diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33549index f943aac..99bfd19 100644
33550--- a/drivers/block/drbd/drbd_int.h
33551+++ b/drivers/block/drbd/drbd_int.h
33552@@ -582,7 +582,7 @@ struct drbd_epoch {
33553 struct drbd_tconn *tconn;
33554 struct list_head list;
33555 unsigned int barrier_nr;
33556- atomic_t epoch_size; /* increased on every request added. */
33557+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33558 atomic_t active; /* increased on every req. added, and dec on every finished. */
33559 unsigned long flags;
33560 };
33561@@ -1021,7 +1021,7 @@ struct drbd_conf {
33562 unsigned int al_tr_number;
33563 int al_tr_cycle;
33564 wait_queue_head_t seq_wait;
33565- atomic_t packet_seq;
33566+ atomic_unchecked_t packet_seq;
33567 unsigned int peer_seq;
33568 spinlock_t peer_seq_lock;
33569 unsigned int minor;
33570@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33571 char __user *uoptval;
33572 int err;
33573
33574- uoptval = (char __user __force *)optval;
33575+ uoptval = (char __force_user *)optval;
33576
33577 set_fs(KERNEL_DS);
33578 if (level == SOL_SOCKET)
33579diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33580index a5dca6a..bb27967 100644
33581--- a/drivers/block/drbd/drbd_main.c
33582+++ b/drivers/block/drbd/drbd_main.c
33583@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33584 p->sector = sector;
33585 p->block_id = block_id;
33586 p->blksize = blksize;
33587- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33588+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33589 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33590 }
33591
33592@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33593 return -EIO;
33594 p->sector = cpu_to_be64(req->i.sector);
33595 p->block_id = (unsigned long)req;
33596- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33597+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33598 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33599 if (mdev->state.conn >= C_SYNC_SOURCE &&
33600 mdev->state.conn <= C_PAUSED_SYNC_T)
33601@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33602 {
33603 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33604
33605- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33606- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33607+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33608+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33609 kfree(tconn->current_epoch);
33610
33611 idr_destroy(&tconn->volumes);
33612diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33613index 4222aff..1f79506 100644
33614--- a/drivers/block/drbd/drbd_receiver.c
33615+++ b/drivers/block/drbd/drbd_receiver.c
33616@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
33617 {
33618 int err;
33619
33620- atomic_set(&mdev->packet_seq, 0);
33621+ atomic_set_unchecked(&mdev->packet_seq, 0);
33622 mdev->peer_seq = 0;
33623
33624 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33625@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33626 do {
33627 next_epoch = NULL;
33628
33629- epoch_size = atomic_read(&epoch->epoch_size);
33630+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33631
33632 switch (ev & ~EV_CLEANUP) {
33633 case EV_PUT:
33634@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33635 rv = FE_DESTROYED;
33636 } else {
33637 epoch->flags = 0;
33638- atomic_set(&epoch->epoch_size, 0);
33639+ atomic_set_unchecked(&epoch->epoch_size, 0);
33640 /* atomic_set(&epoch->active, 0); is already zero */
33641 if (rv == FE_STILL_LIVE)
33642 rv = FE_RECYCLED;
33643@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33644 conn_wait_active_ee_empty(tconn);
33645 drbd_flush(tconn);
33646
33647- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33648+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33649 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33650 if (epoch)
33651 break;
33652@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33653 }
33654
33655 epoch->flags = 0;
33656- atomic_set(&epoch->epoch_size, 0);
33657+ atomic_set_unchecked(&epoch->epoch_size, 0);
33658 atomic_set(&epoch->active, 0);
33659
33660 spin_lock(&tconn->epoch_lock);
33661- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33662+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33663 list_add(&epoch->list, &tconn->current_epoch->list);
33664 tconn->current_epoch = epoch;
33665 tconn->epochs++;
33666@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33667
33668 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33669 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33670- atomic_inc(&tconn->current_epoch->epoch_size);
33671+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33672 err2 = drbd_drain_block(mdev, pi->size);
33673 if (!err)
33674 err = err2;
33675@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33676
33677 spin_lock(&tconn->epoch_lock);
33678 peer_req->epoch = tconn->current_epoch;
33679- atomic_inc(&peer_req->epoch->epoch_size);
33680+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33681 atomic_inc(&peer_req->epoch->active);
33682 spin_unlock(&tconn->epoch_lock);
33683
33684@@ -4347,7 +4347,7 @@ struct data_cmd {
33685 int expect_payload;
33686 size_t pkt_size;
33687 int (*fn)(struct drbd_tconn *, struct packet_info *);
33688-};
33689+} __do_const;
33690
33691 static struct data_cmd drbd_cmd_handler[] = {
33692 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33693@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33694 if (!list_empty(&tconn->current_epoch->list))
33695 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33696 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33697- atomic_set(&tconn->current_epoch->epoch_size, 0);
33698+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33699 tconn->send.seen_any_write_yet = false;
33700
33701 conn_info(tconn, "Connection closed\n");
33702@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33703 struct asender_cmd {
33704 size_t pkt_size;
33705 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33706-};
33707+} __do_const;
33708
33709 static struct asender_cmd asender_tbl[] = {
33710 [P_PING] = { 0, got_Ping },
33711diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33712index d92d50f..a7e9d97 100644
33713--- a/drivers/block/loop.c
33714+++ b/drivers/block/loop.c
33715@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
33716
33717 file_start_write(file);
33718 set_fs(get_ds());
33719- bw = file->f_op->write(file, buf, len, &pos);
33720+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33721 set_fs(old_fs);
33722 file_end_write(file);
33723 if (likely(bw == len))
33724diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
33725index f5d0ea1..c62380a 100644
33726--- a/drivers/block/pktcdvd.c
33727+++ b/drivers/block/pktcdvd.c
33728@@ -84,7 +84,7 @@
33729 #define MAX_SPEED 0xffff
33730
33731 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
33732- ~(sector_t)((pd)->settings.size - 1))
33733+ ~(sector_t)((pd)->settings.size - 1UL))
33734
33735 static DEFINE_MUTEX(pktcdvd_mutex);
33736 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
33737diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33738index 8a3aff7..d7538c2 100644
33739--- a/drivers/cdrom/cdrom.c
33740+++ b/drivers/cdrom/cdrom.c
33741@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33742 ENSURE(reset, CDC_RESET);
33743 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33744 cdi->mc_flags = 0;
33745- cdo->n_minors = 0;
33746 cdi->options = CDO_USE_FFLAGS;
33747
33748 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33749@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33750 else
33751 cdi->cdda_method = CDDA_OLD;
33752
33753- if (!cdo->generic_packet)
33754- cdo->generic_packet = cdrom_dummy_generic_packet;
33755+ if (!cdo->generic_packet) {
33756+ pax_open_kernel();
33757+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33758+ pax_close_kernel();
33759+ }
33760
33761 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33762 mutex_lock(&cdrom_mutex);
33763@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33764 if (cdi->exit)
33765 cdi->exit(cdi);
33766
33767- cdi->ops->n_minors--;
33768 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33769 }
33770
33771@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
33772 */
33773 nr = nframes;
33774 do {
33775- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
33776+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
33777 if (cgc.buffer)
33778 break;
33779
33780@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
33781 struct cdrom_device_info *cdi;
33782 int ret;
33783
33784- ret = scnprintf(info + *pos, max_size - *pos, header);
33785+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
33786 if (!ret)
33787 return 1;
33788
33789diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33790index 4afcb65..a68a32d 100644
33791--- a/drivers/cdrom/gdrom.c
33792+++ b/drivers/cdrom/gdrom.c
33793@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33794 .audio_ioctl = gdrom_audio_ioctl,
33795 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33796 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33797- .n_minors = 1,
33798 };
33799
33800 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33801diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33802index 3bb6fa3..34013fb 100644
33803--- a/drivers/char/Kconfig
33804+++ b/drivers/char/Kconfig
33805@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33806
33807 config DEVKMEM
33808 bool "/dev/kmem virtual device support"
33809- default y
33810+ default n
33811+ depends on !GRKERNSEC_KMEM
33812 help
33813 Say Y here if you want to support the /dev/kmem device. The
33814 /dev/kmem device is rarely used, but can be used for certain
33815@@ -582,6 +583,7 @@ config DEVPORT
33816 bool
33817 depends on !M68K
33818 depends on ISA || PCI
33819+ depends on !GRKERNSEC_KMEM
33820 default y
33821
33822 source "drivers/s390/char/Kconfig"
33823diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
33824index a48e05b..6bac831 100644
33825--- a/drivers/char/agp/compat_ioctl.c
33826+++ b/drivers/char/agp/compat_ioctl.c
33827@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
33828 return -ENOMEM;
33829 }
33830
33831- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
33832+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
33833 sizeof(*usegment) * ureserve.seg_count)) {
33834 kfree(usegment);
33835 kfree(ksegment);
33836diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33837index 2e04433..771f2cc 100644
33838--- a/drivers/char/agp/frontend.c
33839+++ b/drivers/char/agp/frontend.c
33840@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33841 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33842 return -EFAULT;
33843
33844- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33845+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33846 return -EFAULT;
33847
33848 client = agp_find_client_by_pid(reserve.pid);
33849@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33850 if (segment == NULL)
33851 return -ENOMEM;
33852
33853- if (copy_from_user(segment, (void __user *) reserve.seg_list,
33854+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
33855 sizeof(struct agp_segment) * reserve.seg_count)) {
33856 kfree(segment);
33857 return -EFAULT;
33858diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33859index 4f94375..413694e 100644
33860--- a/drivers/char/genrtc.c
33861+++ b/drivers/char/genrtc.c
33862@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
33863 switch (cmd) {
33864
33865 case RTC_PLL_GET:
33866+ memset(&pll, 0, sizeof(pll));
33867 if (get_rtc_pll(&pll))
33868 return -EINVAL;
33869 else
33870diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33871index d784650..e8bfd69 100644
33872--- a/drivers/char/hpet.c
33873+++ b/drivers/char/hpet.c
33874@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33875 }
33876
33877 static int
33878-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33879+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33880 struct hpet_info *info)
33881 {
33882 struct hpet_timer __iomem *timer;
33883diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
33884index 86fe45c..c0ea948 100644
33885--- a/drivers/char/hw_random/intel-rng.c
33886+++ b/drivers/char/hw_random/intel-rng.c
33887@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
33888
33889 if (no_fwh_detect)
33890 return -ENODEV;
33891- printk(warning);
33892+ printk("%s", warning);
33893 return -EBUSY;
33894 }
33895
33896diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33897index 4445fa1..7c6de37 100644
33898--- a/drivers/char/ipmi/ipmi_msghandler.c
33899+++ b/drivers/char/ipmi/ipmi_msghandler.c
33900@@ -420,7 +420,7 @@ struct ipmi_smi {
33901 struct proc_dir_entry *proc_dir;
33902 char proc_dir_name[10];
33903
33904- atomic_t stats[IPMI_NUM_STATS];
33905+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33906
33907 /*
33908 * run_to_completion duplicate of smb_info, smi_info
33909@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33910
33911
33912 #define ipmi_inc_stat(intf, stat) \
33913- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33914+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33915 #define ipmi_get_stat(intf, stat) \
33916- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33917+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33918
33919 static int is_lan_addr(struct ipmi_addr *addr)
33920 {
33921@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33922 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33923 init_waitqueue_head(&intf->waitq);
33924 for (i = 0; i < IPMI_NUM_STATS; i++)
33925- atomic_set(&intf->stats[i], 0);
33926+ atomic_set_unchecked(&intf->stats[i], 0);
33927
33928 intf->proc_dir = NULL;
33929
33930diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33931index af4b23f..79806fc 100644
33932--- a/drivers/char/ipmi/ipmi_si_intf.c
33933+++ b/drivers/char/ipmi/ipmi_si_intf.c
33934@@ -275,7 +275,7 @@ struct smi_info {
33935 unsigned char slave_addr;
33936
33937 /* Counters and things for the proc filesystem. */
33938- atomic_t stats[SI_NUM_STATS];
33939+ atomic_unchecked_t stats[SI_NUM_STATS];
33940
33941 struct task_struct *thread;
33942
33943@@ -284,9 +284,9 @@ struct smi_info {
33944 };
33945
33946 #define smi_inc_stat(smi, stat) \
33947- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33948+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33949 #define smi_get_stat(smi, stat) \
33950- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33951+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33952
33953 #define SI_MAX_PARMS 4
33954
33955@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
33956 atomic_set(&new_smi->req_events, 0);
33957 new_smi->run_to_completion = 0;
33958 for (i = 0; i < SI_NUM_STATS; i++)
33959- atomic_set(&new_smi->stats[i], 0);
33960+ atomic_set_unchecked(&new_smi->stats[i], 0);
33961
33962 new_smi->interrupt_disabled = 1;
33963 atomic_set(&new_smi->stop_operation, 0);
33964diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33965index 1ccbe94..6ad651a 100644
33966--- a/drivers/char/mem.c
33967+++ b/drivers/char/mem.c
33968@@ -18,6 +18,7 @@
33969 #include <linux/raw.h>
33970 #include <linux/tty.h>
33971 #include <linux/capability.h>
33972+#include <linux/security.h>
33973 #include <linux/ptrace.h>
33974 #include <linux/device.h>
33975 #include <linux/highmem.h>
33976@@ -38,6 +39,10 @@
33977
33978 #define DEVPORT_MINOR 4
33979
33980+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33981+extern const struct file_operations grsec_fops;
33982+#endif
33983+
33984 static inline unsigned long size_inside_page(unsigned long start,
33985 unsigned long size)
33986 {
33987@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
33988
33989 while (cursor < to) {
33990 if (!devmem_is_allowed(pfn)) {
33991+#ifdef CONFIG_GRKERNSEC_KMEM
33992+ gr_handle_mem_readwrite(from, to);
33993+#else
33994 printk(KERN_INFO
33995 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
33996 current->comm, from, to);
33997+#endif
33998 return 0;
33999 }
34000 cursor += PAGE_SIZE;
34001@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34002 }
34003 return 1;
34004 }
34005+#elif defined(CONFIG_GRKERNSEC_KMEM)
34006+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34007+{
34008+ return 0;
34009+}
34010 #else
34011 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34012 {
34013@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34014
34015 while (count > 0) {
34016 unsigned long remaining;
34017+ char *temp;
34018
34019 sz = size_inside_page(p, count);
34020
34021@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34022 if (!ptr)
34023 return -EFAULT;
34024
34025- remaining = copy_to_user(buf, ptr, sz);
34026+#ifdef CONFIG_PAX_USERCOPY
34027+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34028+ if (!temp) {
34029+ unxlate_dev_mem_ptr(p, ptr);
34030+ return -ENOMEM;
34031+ }
34032+ memcpy(temp, ptr, sz);
34033+#else
34034+ temp = ptr;
34035+#endif
34036+
34037+ remaining = copy_to_user(buf, temp, sz);
34038+
34039+#ifdef CONFIG_PAX_USERCOPY
34040+ kfree(temp);
34041+#endif
34042+
34043 unxlate_dev_mem_ptr(p, ptr);
34044 if (remaining)
34045 return -EFAULT;
34046@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
34047 else
34048 csize = count;
34049
34050- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
34051+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
34052 if (rc < 0)
34053 return rc;
34054 buf += csize;
34055@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34056 size_t count, loff_t *ppos)
34057 {
34058 unsigned long p = *ppos;
34059- ssize_t low_count, read, sz;
34060+ ssize_t low_count, read, sz, err = 0;
34061 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34062- int err = 0;
34063
34064 read = 0;
34065 if (p < (unsigned long) high_memory) {
34066@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34067 }
34068 #endif
34069 while (low_count > 0) {
34070+ char *temp;
34071+
34072 sz = size_inside_page(p, low_count);
34073
34074 /*
34075@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34076 */
34077 kbuf = xlate_dev_kmem_ptr((char *)p);
34078
34079- if (copy_to_user(buf, kbuf, sz))
34080+#ifdef CONFIG_PAX_USERCOPY
34081+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34082+ if (!temp)
34083+ return -ENOMEM;
34084+ memcpy(temp, kbuf, sz);
34085+#else
34086+ temp = kbuf;
34087+#endif
34088+
34089+ err = copy_to_user(buf, temp, sz);
34090+
34091+#ifdef CONFIG_PAX_USERCOPY
34092+ kfree(temp);
34093+#endif
34094+
34095+ if (err)
34096 return -EFAULT;
34097 buf += sz;
34098 p += sz;
34099@@ -869,6 +916,9 @@ static const struct memdev {
34100 #ifdef CONFIG_CRASH_DUMP
34101 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34102 #endif
34103+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34104+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34105+#endif
34106 };
34107
34108 static int memory_open(struct inode *inode, struct file *filp)
34109@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
34110 continue;
34111
34112 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
34113- NULL, devlist[minor].name);
34114+ NULL, "%s", devlist[minor].name);
34115 }
34116
34117 return tty_init();
34118diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
34119index c689697..04e6d6a2 100644
34120--- a/drivers/char/mwave/tp3780i.c
34121+++ b/drivers/char/mwave/tp3780i.c
34122@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
34123 PRINTK_2(TRACE_TP3780I,
34124 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
34125
34126+ memset(pAbilities, 0, sizeof(*pAbilities));
34127 /* fill out standard constant fields */
34128 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
34129 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
34130diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34131index 9df78e2..01ba9ae 100644
34132--- a/drivers/char/nvram.c
34133+++ b/drivers/char/nvram.c
34134@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34135
34136 spin_unlock_irq(&rtc_lock);
34137
34138- if (copy_to_user(buf, contents, tmp - contents))
34139+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34140 return -EFAULT;
34141
34142 *ppos = i;
34143diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34144index 5c5cc00..ac9edb7 100644
34145--- a/drivers/char/pcmcia/synclink_cs.c
34146+++ b/drivers/char/pcmcia/synclink_cs.c
34147@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34148
34149 if (debug_level >= DEBUG_LEVEL_INFO)
34150 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34151- __FILE__, __LINE__, info->device_name, port->count);
34152+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
34153
34154- WARN_ON(!port->count);
34155+ WARN_ON(!atomic_read(&port->count));
34156
34157 if (tty_port_close_start(port, tty, filp) == 0)
34158 goto cleanup;
34159@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34160 cleanup:
34161 if (debug_level >= DEBUG_LEVEL_INFO)
34162 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
34163- tty->driver->name, port->count);
34164+ tty->driver->name, atomic_read(&port->count));
34165 }
34166
34167 /* Wait until the transmitter is empty.
34168@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34169
34170 if (debug_level >= DEBUG_LEVEL_INFO)
34171 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34172- __FILE__, __LINE__, tty->driver->name, port->count);
34173+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
34174
34175 /* If port is closing, signal caller to try again */
34176 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34177@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34178 goto cleanup;
34179 }
34180 spin_lock(&port->lock);
34181- port->count++;
34182+ atomic_inc(&port->count);
34183 spin_unlock(&port->lock);
34184 spin_unlock_irqrestore(&info->netlock, flags);
34185
34186- if (port->count == 1) {
34187+ if (atomic_read(&port->count) == 1) {
34188 /* 1st open on this device, init hardware */
34189 retval = startup(info, tty);
34190 if (retval < 0)
34191@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34192 unsigned short new_crctype;
34193
34194 /* return error if TTY interface open */
34195- if (info->port.count)
34196+ if (atomic_read(&info->port.count))
34197 return -EBUSY;
34198
34199 switch (encoding)
34200@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
34201
34202 /* arbitrate between network and tty opens */
34203 spin_lock_irqsave(&info->netlock, flags);
34204- if (info->port.count != 0 || info->netcount != 0) {
34205+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34206 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34207 spin_unlock_irqrestore(&info->netlock, flags);
34208 return -EBUSY;
34209@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34210 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
34211
34212 /* return error if TTY interface open */
34213- if (info->port.count)
34214+ if (atomic_read(&info->port.count))
34215 return -EBUSY;
34216
34217 if (cmd != SIOCWANDEV)
34218diff --git a/drivers/char/random.c b/drivers/char/random.c
34219index 35487e8..dac8bd1 100644
34220--- a/drivers/char/random.c
34221+++ b/drivers/char/random.c
34222@@ -272,8 +272,13 @@
34223 /*
34224 * Configuration information
34225 */
34226+#ifdef CONFIG_GRKERNSEC_RANDNET
34227+#define INPUT_POOL_WORDS 512
34228+#define OUTPUT_POOL_WORDS 128
34229+#else
34230 #define INPUT_POOL_WORDS 128
34231 #define OUTPUT_POOL_WORDS 32
34232+#endif
34233 #define SEC_XFER_SIZE 512
34234 #define EXTRACT_SIZE 10
34235
34236@@ -313,10 +318,17 @@ static struct poolinfo {
34237 int poolwords;
34238 int tap1, tap2, tap3, tap4, tap5;
34239 } poolinfo_table[] = {
34240+#ifdef CONFIG_GRKERNSEC_RANDNET
34241+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34242+ { 512, 411, 308, 208, 104, 1 },
34243+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34244+ { 128, 103, 76, 51, 25, 1 },
34245+#else
34246 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34247 { 128, 103, 76, 51, 25, 1 },
34248 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34249 { 32, 26, 20, 14, 7, 1 },
34250+#endif
34251 #if 0
34252 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34253 { 2048, 1638, 1231, 819, 411, 1 },
34254@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34255 input_rotate += i ? 7 : 14;
34256 }
34257
34258- ACCESS_ONCE(r->input_rotate) = input_rotate;
34259- ACCESS_ONCE(r->add_ptr) = i;
34260+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34261+ ACCESS_ONCE_RW(r->add_ptr) = i;
34262 smp_wmb();
34263
34264 if (out)
34265@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34266
34267 extract_buf(r, tmp);
34268 i = min_t(int, nbytes, EXTRACT_SIZE);
34269- if (copy_to_user(buf, tmp, i)) {
34270+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34271 ret = -EFAULT;
34272 break;
34273 }
34274@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34275 #include <linux/sysctl.h>
34276
34277 static int min_read_thresh = 8, min_write_thresh;
34278-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34279+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34280 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34281 static char sysctl_bootid[16];
34282
34283@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
34284 static int proc_do_uuid(ctl_table *table, int write,
34285 void __user *buffer, size_t *lenp, loff_t *ppos)
34286 {
34287- ctl_table fake_table;
34288+ ctl_table_no_const fake_table;
34289 unsigned char buf[64], tmp_uuid[16], *uuid;
34290
34291 uuid = table->data;
34292diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34293index bf2349db..5456d53 100644
34294--- a/drivers/char/sonypi.c
34295+++ b/drivers/char/sonypi.c
34296@@ -54,6 +54,7 @@
34297
34298 #include <asm/uaccess.h>
34299 #include <asm/io.h>
34300+#include <asm/local.h>
34301
34302 #include <linux/sonypi.h>
34303
34304@@ -490,7 +491,7 @@ static struct sonypi_device {
34305 spinlock_t fifo_lock;
34306 wait_queue_head_t fifo_proc_list;
34307 struct fasync_struct *fifo_async;
34308- int open_count;
34309+ local_t open_count;
34310 int model;
34311 struct input_dev *input_jog_dev;
34312 struct input_dev *input_key_dev;
34313@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34314 static int sonypi_misc_release(struct inode *inode, struct file *file)
34315 {
34316 mutex_lock(&sonypi_device.lock);
34317- sonypi_device.open_count--;
34318+ local_dec(&sonypi_device.open_count);
34319 mutex_unlock(&sonypi_device.lock);
34320 return 0;
34321 }
34322@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34323 {
34324 mutex_lock(&sonypi_device.lock);
34325 /* Flush input queue on first open */
34326- if (!sonypi_device.open_count)
34327+ if (!local_read(&sonypi_device.open_count))
34328 kfifo_reset(&sonypi_device.fifo);
34329- sonypi_device.open_count++;
34330+ local_inc(&sonypi_device.open_count);
34331 mutex_unlock(&sonypi_device.lock);
34332
34333 return 0;
34334diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34335index 64420b3..5c40b56 100644
34336--- a/drivers/char/tpm/tpm_acpi.c
34337+++ b/drivers/char/tpm/tpm_acpi.c
34338@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34339 virt = acpi_os_map_memory(start, len);
34340 if (!virt) {
34341 kfree(log->bios_event_log);
34342+ log->bios_event_log = NULL;
34343 printk("%s: ERROR - Unable to map memory\n", __func__);
34344 return -EIO;
34345 }
34346
34347- memcpy_fromio(log->bios_event_log, virt, len);
34348+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34349
34350 acpi_os_unmap_memory(virt, len);
34351 return 0;
34352diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34353index 84ddc55..1d32f1e 100644
34354--- a/drivers/char/tpm/tpm_eventlog.c
34355+++ b/drivers/char/tpm/tpm_eventlog.c
34356@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34357 event = addr;
34358
34359 if ((event->event_type == 0 && event->event_size == 0) ||
34360- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34361+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34362 return NULL;
34363
34364 return addr;
34365@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34366 return NULL;
34367
34368 if ((event->event_type == 0 && event->event_size == 0) ||
34369- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34370+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34371 return NULL;
34372
34373 (*pos)++;
34374@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34375 int i;
34376
34377 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34378- seq_putc(m, data[i]);
34379+ if (!seq_putc(m, data[i]))
34380+ return -EFAULT;
34381
34382 return 0;
34383 }
34384diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34385index 1b456fe..2510242 100644
34386--- a/drivers/char/virtio_console.c
34387+++ b/drivers/char/virtio_console.c
34388@@ -679,7 +679,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34389 if (to_user) {
34390 ssize_t ret;
34391
34392- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34393+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34394 if (ret)
34395 return -EFAULT;
34396 } else {
34397@@ -778,7 +778,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34398 if (!port_has_data(port) && !port->host_connected)
34399 return 0;
34400
34401- return fill_readbuf(port, ubuf, count, true);
34402+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34403 }
34404
34405 static int wait_port_writable(struct port *port, bool nonblock)
34406diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
34407index a33f46f..a720eed 100644
34408--- a/drivers/clk/clk-composite.c
34409+++ b/drivers/clk/clk-composite.c
34410@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
34411 struct clk *clk;
34412 struct clk_init_data init;
34413 struct clk_composite *composite;
34414- struct clk_ops *clk_composite_ops;
34415+ clk_ops_no_const *clk_composite_ops;
34416
34417 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
34418 if (!composite) {
34419diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
34420index bd11315..2f14eae 100644
34421--- a/drivers/clk/socfpga/clk.c
34422+++ b/drivers/clk/socfpga/clk.c
34423@@ -135,8 +135,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
34424 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
34425 strcmp(clk_name, "sdram_pll")) {
34426 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
34427- clk_pll_ops.enable = clk_gate_ops.enable;
34428- clk_pll_ops.disable = clk_gate_ops.disable;
34429+ pax_open_kernel();
34430+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
34431+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
34432+ pax_close_kernel();
34433 }
34434
34435 clk = clk_register(NULL, &socfpga_clk->hw.hw);
34436diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
34437index a2b2541..bc1e7ff 100644
34438--- a/drivers/clocksource/arm_arch_timer.c
34439+++ b/drivers/clocksource/arm_arch_timer.c
34440@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34441 return NOTIFY_OK;
34442 }
34443
34444-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
34445+static struct notifier_block arch_timer_cpu_nb = {
34446 .notifier_call = arch_timer_cpu_notify,
34447 };
34448
34449diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
34450index ade7513..069445f 100644
34451--- a/drivers/clocksource/metag_generic.c
34452+++ b/drivers/clocksource/metag_generic.c
34453@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34454 return NOTIFY_OK;
34455 }
34456
34457-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34458+static struct notifier_block arch_timer_cpu_nb = {
34459 .notifier_call = arch_timer_cpu_notify,
34460 };
34461
34462diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34463index edc089e..bc7c0bc 100644
34464--- a/drivers/cpufreq/acpi-cpufreq.c
34465+++ b/drivers/cpufreq/acpi-cpufreq.c
34466@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34467 return sprintf(buf, "%u\n", boost_enabled);
34468 }
34469
34470-static struct global_attr global_boost = __ATTR(boost, 0644,
34471+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34472 show_global_boost,
34473 store_global_boost);
34474
34475@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34476 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34477 per_cpu(acfreq_data, cpu) = data;
34478
34479- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34480- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34481+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34482+ pax_open_kernel();
34483+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34484+ pax_close_kernel();
34485+ }
34486
34487 result = acpi_processor_register_performance(data->acpi_data, cpu);
34488 if (result)
34489@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34490 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34491 break;
34492 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34493- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34494+ pax_open_kernel();
34495+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34496+ pax_close_kernel();
34497 policy->cur = get_cur_freq_on_cpu(cpu);
34498 break;
34499 default:
34500@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34501 acpi_processor_notify_smm(THIS_MODULE);
34502
34503 /* Check for APERF/MPERF support in hardware */
34504- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34505- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34506+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34507+ pax_open_kernel();
34508+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34509+ pax_close_kernel();
34510+ }
34511
34512 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34513 for (i = 0; i < perf->state_count; i++)
34514diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34515index 2d53f47..eb3803e 100644
34516--- a/drivers/cpufreq/cpufreq.c
34517+++ b/drivers/cpufreq/cpufreq.c
34518@@ -1851,7 +1851,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34519 return NOTIFY_OK;
34520 }
34521
34522-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34523+static struct notifier_block cpufreq_cpu_notifier = {
34524 .notifier_call = cpufreq_cpu_callback,
34525 };
34526
34527@@ -1883,8 +1883,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34528
34529 pr_debug("trying to register driver %s\n", driver_data->name);
34530
34531- if (driver_data->setpolicy)
34532- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34533+ if (driver_data->setpolicy) {
34534+ pax_open_kernel();
34535+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34536+ pax_close_kernel();
34537+ }
34538
34539 write_lock_irqsave(&cpufreq_driver_lock, flags);
34540 if (cpufreq_driver) {
34541diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34542index dc9b72e..11c0302 100644
34543--- a/drivers/cpufreq/cpufreq_governor.c
34544+++ b/drivers/cpufreq/cpufreq_governor.c
34545@@ -238,7 +238,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
34546 struct dbs_data *dbs_data;
34547 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
34548 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
34549- struct od_ops *od_ops = NULL;
34550+ const struct od_ops *od_ops = NULL;
34551 struct od_dbs_tuners *od_tuners = NULL;
34552 struct cs_dbs_tuners *cs_tuners = NULL;
34553 struct cpu_dbs_common_info *cpu_cdbs;
34554@@ -301,7 +301,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
34555
34556 if ((cdata->governor == GOV_CONSERVATIVE) &&
34557 (!policy->governor->initialized)) {
34558- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34559+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34560
34561 cpufreq_register_notifier(cs_ops->notifier_block,
34562 CPUFREQ_TRANSITION_NOTIFIER);
34563@@ -318,7 +318,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
34564
34565 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
34566 (policy->governor->initialized == 1)) {
34567- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34568+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34569
34570 cpufreq_unregister_notifier(cs_ops->notifier_block,
34571 CPUFREQ_TRANSITION_NOTIFIER);
34572diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34573index e16a961..0e68927 100644
34574--- a/drivers/cpufreq/cpufreq_governor.h
34575+++ b/drivers/cpufreq/cpufreq_governor.h
34576@@ -204,7 +204,7 @@ struct common_dbs_data {
34577 void (*exit)(struct dbs_data *dbs_data);
34578
34579 /* Governor specific ops, see below */
34580- void *gov_ops;
34581+ const void *gov_ops;
34582 };
34583
34584 /* Governer Per policy data */
34585diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
34586index 93eb5cb..f8ab572 100644
34587--- a/drivers/cpufreq/cpufreq_ondemand.c
34588+++ b/drivers/cpufreq/cpufreq_ondemand.c
34589@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
34590 (struct cpufreq_policy *, unsigned int, unsigned int),
34591 unsigned int powersave_bias)
34592 {
34593- od_ops.powersave_bias_target = f;
34594+ pax_open_kernel();
34595+ *(void **)&od_ops.powersave_bias_target = f;
34596+ pax_close_kernel();
34597 od_set_powersave_bias(powersave_bias);
34598 }
34599 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
34600
34601 void od_unregister_powersave_bias_handler(void)
34602 {
34603- od_ops.powersave_bias_target = generic_powersave_bias_target;
34604+ pax_open_kernel();
34605+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
34606+ pax_close_kernel();
34607 od_set_powersave_bias(0);
34608 }
34609 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
34610diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34611index 591b6fb..2a01183 100644
34612--- a/drivers/cpufreq/cpufreq_stats.c
34613+++ b/drivers/cpufreq/cpufreq_stats.c
34614@@ -367,7 +367,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34615 }
34616
34617 /* priority=1 so this will get called before cpufreq_remove_dev */
34618-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34619+static struct notifier_block cpufreq_stat_cpu_notifier = {
34620 .notifier_call = cpufreq_stat_cpu_callback,
34621 .priority = 1,
34622 };
34623diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34624index 421ef37..e708530c 100644
34625--- a/drivers/cpufreq/p4-clockmod.c
34626+++ b/drivers/cpufreq/p4-clockmod.c
34627@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34628 case 0x0F: /* Core Duo */
34629 case 0x16: /* Celeron Core */
34630 case 0x1C: /* Atom */
34631- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34632+ pax_open_kernel();
34633+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34634+ pax_close_kernel();
34635 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34636 case 0x0D: /* Pentium M (Dothan) */
34637- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34638+ pax_open_kernel();
34639+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34640+ pax_close_kernel();
34641 /* fall through */
34642 case 0x09: /* Pentium M (Banias) */
34643 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34644@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34645
34646 /* on P-4s, the TSC runs with constant frequency independent whether
34647 * throttling is active or not. */
34648- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34649+ pax_open_kernel();
34650+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34651+ pax_close_kernel();
34652
34653 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34654 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34655diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
34656index c71ee14..7c2e183 100644
34657--- a/drivers/cpufreq/sparc-us3-cpufreq.c
34658+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
34659@@ -18,14 +18,12 @@
34660 #include <asm/head.h>
34661 #include <asm/timer.h>
34662
34663-static struct cpufreq_driver *cpufreq_us3_driver;
34664-
34665 struct us3_freq_percpu_info {
34666 struct cpufreq_frequency_table table[4];
34667 };
34668
34669 /* Indexed by cpu number. */
34670-static struct us3_freq_percpu_info *us3_freq_table;
34671+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
34672
34673 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
34674 * in the Safari config register.
34675@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
34676
34677 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
34678 {
34679- if (cpufreq_us3_driver)
34680- us3_set_cpu_divider_index(policy, 0);
34681+ us3_set_cpu_divider_index(policy->cpu, 0);
34682
34683 return 0;
34684 }
34685
34686+static int __init us3_freq_init(void);
34687+static void __exit us3_freq_exit(void);
34688+
34689+static struct cpufreq_driver cpufreq_us3_driver = {
34690+ .init = us3_freq_cpu_init,
34691+ .verify = us3_freq_verify,
34692+ .target = us3_freq_target,
34693+ .get = us3_freq_get,
34694+ .exit = us3_freq_cpu_exit,
34695+ .owner = THIS_MODULE,
34696+ .name = "UltraSPARC-III",
34697+
34698+};
34699+
34700 static int __init us3_freq_init(void)
34701 {
34702 unsigned long manuf, impl, ver;
34703@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
34704 (impl == CHEETAH_IMPL ||
34705 impl == CHEETAH_PLUS_IMPL ||
34706 impl == JAGUAR_IMPL ||
34707- impl == PANTHER_IMPL)) {
34708- struct cpufreq_driver *driver;
34709-
34710- ret = -ENOMEM;
34711- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
34712- if (!driver)
34713- goto err_out;
34714-
34715- us3_freq_table = kzalloc(
34716- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
34717- GFP_KERNEL);
34718- if (!us3_freq_table)
34719- goto err_out;
34720-
34721- driver->init = us3_freq_cpu_init;
34722- driver->verify = us3_freq_verify;
34723- driver->target = us3_freq_target;
34724- driver->get = us3_freq_get;
34725- driver->exit = us3_freq_cpu_exit;
34726- driver->owner = THIS_MODULE,
34727- strcpy(driver->name, "UltraSPARC-III");
34728-
34729- cpufreq_us3_driver = driver;
34730- ret = cpufreq_register_driver(driver);
34731- if (ret)
34732- goto err_out;
34733-
34734- return 0;
34735-
34736-err_out:
34737- if (driver) {
34738- kfree(driver);
34739- cpufreq_us3_driver = NULL;
34740- }
34741- kfree(us3_freq_table);
34742- us3_freq_table = NULL;
34743- return ret;
34744- }
34745+ impl == PANTHER_IMPL))
34746+ return cpufreq_register_driver(&cpufreq_us3_driver);
34747
34748 return -ENODEV;
34749 }
34750
34751 static void __exit us3_freq_exit(void)
34752 {
34753- if (cpufreq_us3_driver) {
34754- cpufreq_unregister_driver(cpufreq_us3_driver);
34755- kfree(cpufreq_us3_driver);
34756- cpufreq_us3_driver = NULL;
34757- kfree(us3_freq_table);
34758- us3_freq_table = NULL;
34759- }
34760+ cpufreq_unregister_driver(&cpufreq_us3_driver);
34761 }
34762
34763 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
34764diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34765index 618e6f4..e89d915 100644
34766--- a/drivers/cpufreq/speedstep-centrino.c
34767+++ b/drivers/cpufreq/speedstep-centrino.c
34768@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34769 !cpu_has(cpu, X86_FEATURE_EST))
34770 return -ENODEV;
34771
34772- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34773- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34774+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34775+ pax_open_kernel();
34776+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34777+ pax_close_kernel();
34778+ }
34779
34780 if (policy->cpu != 0)
34781 return -ENODEV;
34782diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34783index c3a93fe..e808f24 100644
34784--- a/drivers/cpuidle/cpuidle.c
34785+++ b/drivers/cpuidle/cpuidle.c
34786@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
34787
34788 static void poll_idle_init(struct cpuidle_driver *drv)
34789 {
34790- struct cpuidle_state *state = &drv->states[0];
34791+ cpuidle_state_no_const *state = &drv->states[0];
34792
34793 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34794 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34795diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34796index ea2f8e7..70ac501 100644
34797--- a/drivers/cpuidle/governor.c
34798+++ b/drivers/cpuidle/governor.c
34799@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34800 mutex_lock(&cpuidle_lock);
34801 if (__cpuidle_find_governor(gov->name) == NULL) {
34802 ret = 0;
34803- list_add_tail(&gov->governor_list, &cpuidle_governors);
34804+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34805 if (!cpuidle_curr_governor ||
34806 cpuidle_curr_governor->rating < gov->rating)
34807 cpuidle_switch_governor(gov);
34808@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34809 new_gov = cpuidle_replace_governor(gov->rating);
34810 cpuidle_switch_governor(new_gov);
34811 }
34812- list_del(&gov->governor_list);
34813+ pax_list_del((struct list_head *)&gov->governor_list);
34814 mutex_unlock(&cpuidle_lock);
34815 }
34816
34817diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34818index 428754a..8bdf9cc 100644
34819--- a/drivers/cpuidle/sysfs.c
34820+++ b/drivers/cpuidle/sysfs.c
34821@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34822 NULL
34823 };
34824
34825-static struct attribute_group cpuidle_attr_group = {
34826+static attribute_group_no_const cpuidle_attr_group = {
34827 .attrs = cpuidle_default_attrs,
34828 .name = "cpuidle",
34829 };
34830diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34831index 3b36797..db0b0c0 100644
34832--- a/drivers/devfreq/devfreq.c
34833+++ b/drivers/devfreq/devfreq.c
34834@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
34835 GFP_KERNEL);
34836 devfreq->last_stat_updated = jiffies;
34837
34838- dev_set_name(&devfreq->dev, dev_name(dev));
34839+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
34840 err = device_register(&devfreq->dev);
34841 if (err) {
34842 put_device(&devfreq->dev);
34843@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34844 goto err_out;
34845 }
34846
34847- list_add(&governor->node, &devfreq_governor_list);
34848+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34849
34850 list_for_each_entry(devfreq, &devfreq_list, node) {
34851 int ret = 0;
34852@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34853 }
34854 }
34855
34856- list_del(&governor->node);
34857+ pax_list_del((struct list_head *)&governor->node);
34858 err_out:
34859 mutex_unlock(&devfreq_list_lock);
34860
34861diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34862index b70709b..1d8d02a 100644
34863--- a/drivers/dma/sh/shdma.c
34864+++ b/drivers/dma/sh/shdma.c
34865@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34866 return ret;
34867 }
34868
34869-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34870+static struct notifier_block sh_dmae_nmi_notifier = {
34871 .notifier_call = sh_dmae_nmi_handler,
34872
34873 /* Run before NMI debug handler and KGDB */
34874diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34875index 67610a6..30f8a11 100644
34876--- a/drivers/edac/edac_mc_sysfs.c
34877+++ b/drivers/edac/edac_mc_sysfs.c
34878@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
34879 struct dev_ch_attribute {
34880 struct device_attribute attr;
34881 int channel;
34882-};
34883+} __do_const;
34884
34885 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34886 struct dev_ch_attribute dev_attr_legacy_##_name = \
34887@@ -1003,14 +1003,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
34888 }
34889
34890 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
34891+ pax_open_kernel();
34892 if (mci->get_sdram_scrub_rate) {
34893- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34894- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34895+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34896+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34897 }
34898 if (mci->set_sdram_scrub_rate) {
34899- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34900- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34901+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34902+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34903 }
34904+ pax_close_kernel();
34905 err = device_create_file(&mci->dev,
34906 &dev_attr_sdram_scrub_rate);
34907 if (err) {
34908diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34909index e8658e4..22746d6 100644
34910--- a/drivers/edac/edac_pci_sysfs.c
34911+++ b/drivers/edac/edac_pci_sysfs.c
34912@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34913 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34914 static int edac_pci_poll_msec = 1000; /* one second workq period */
34915
34916-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34917-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34918+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34919+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34920
34921 static struct kobject *edac_pci_top_main_kobj;
34922 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34923@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34924 void *value;
34925 ssize_t(*show) (void *, char *);
34926 ssize_t(*store) (void *, const char *, size_t);
34927-};
34928+} __do_const;
34929
34930 /* Set of show/store abstract level functions for PCI Parity object */
34931 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34932@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34933 edac_printk(KERN_CRIT, EDAC_PCI,
34934 "Signaled System Error on %s\n",
34935 pci_name(dev));
34936- atomic_inc(&pci_nonparity_count);
34937+ atomic_inc_unchecked(&pci_nonparity_count);
34938 }
34939
34940 if (status & (PCI_STATUS_PARITY)) {
34941@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34942 "Master Data Parity Error on %s\n",
34943 pci_name(dev));
34944
34945- atomic_inc(&pci_parity_count);
34946+ atomic_inc_unchecked(&pci_parity_count);
34947 }
34948
34949 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34950@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34951 "Detected Parity Error on %s\n",
34952 pci_name(dev));
34953
34954- atomic_inc(&pci_parity_count);
34955+ atomic_inc_unchecked(&pci_parity_count);
34956 }
34957 }
34958
34959@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34960 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34961 "Signaled System Error on %s\n",
34962 pci_name(dev));
34963- atomic_inc(&pci_nonparity_count);
34964+ atomic_inc_unchecked(&pci_nonparity_count);
34965 }
34966
34967 if (status & (PCI_STATUS_PARITY)) {
34968@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34969 "Master Data Parity Error on "
34970 "%s\n", pci_name(dev));
34971
34972- atomic_inc(&pci_parity_count);
34973+ atomic_inc_unchecked(&pci_parity_count);
34974 }
34975
34976 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34977@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34978 "Detected Parity Error on %s\n",
34979 pci_name(dev));
34980
34981- atomic_inc(&pci_parity_count);
34982+ atomic_inc_unchecked(&pci_parity_count);
34983 }
34984 }
34985 }
34986@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
34987 if (!check_pci_errors)
34988 return;
34989
34990- before_count = atomic_read(&pci_parity_count);
34991+ before_count = atomic_read_unchecked(&pci_parity_count);
34992
34993 /* scan all PCI devices looking for a Parity Error on devices and
34994 * bridges.
34995@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
34996 /* Only if operator has selected panic on PCI Error */
34997 if (edac_pci_get_panic_on_pe()) {
34998 /* If the count is different 'after' from 'before' */
34999- if (before_count != atomic_read(&pci_parity_count))
35000+ if (before_count != atomic_read_unchecked(&pci_parity_count))
35001 panic("EDAC: PCI Parity Error");
35002 }
35003 }
35004diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
35005index 51b7e3a..aa8a3e8 100644
35006--- a/drivers/edac/mce_amd.h
35007+++ b/drivers/edac/mce_amd.h
35008@@ -77,7 +77,7 @@ struct amd_decoder_ops {
35009 bool (*mc0_mce)(u16, u8);
35010 bool (*mc1_mce)(u16, u8);
35011 bool (*mc2_mce)(u16, u8);
35012-};
35013+} __no_const;
35014
35015 void amd_report_gart_errors(bool);
35016 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
35017diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
35018index 57ea7f4..789e3c3 100644
35019--- a/drivers/firewire/core-card.c
35020+++ b/drivers/firewire/core-card.c
35021@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
35022
35023 void fw_core_remove_card(struct fw_card *card)
35024 {
35025- struct fw_card_driver dummy_driver = dummy_driver_template;
35026+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
35027
35028 card->driver->update_phy_reg(card, 4,
35029 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
35030diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
35031index 664a6ff..af13580 100644
35032--- a/drivers/firewire/core-device.c
35033+++ b/drivers/firewire/core-device.c
35034@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
35035 struct config_rom_attribute {
35036 struct device_attribute attr;
35037 u32 key;
35038-};
35039+} __do_const;
35040
35041 static ssize_t show_immediate(struct device *dev,
35042 struct device_attribute *dattr, char *buf)
35043diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
35044index 28a94c7..58da63a 100644
35045--- a/drivers/firewire/core-transaction.c
35046+++ b/drivers/firewire/core-transaction.c
35047@@ -38,6 +38,7 @@
35048 #include <linux/timer.h>
35049 #include <linux/types.h>
35050 #include <linux/workqueue.h>
35051+#include <linux/sched.h>
35052
35053 #include <asm/byteorder.h>
35054
35055diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
35056index 515a42c..5ecf3ba 100644
35057--- a/drivers/firewire/core.h
35058+++ b/drivers/firewire/core.h
35059@@ -111,6 +111,7 @@ struct fw_card_driver {
35060
35061 int (*stop_iso)(struct fw_iso_context *ctx);
35062 };
35063+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
35064
35065 void fw_card_initialize(struct fw_card *card,
35066 const struct fw_card_driver *driver, struct device *device);
35067diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
35068index 94a58a0..f5eba42 100644
35069--- a/drivers/firmware/dmi-id.c
35070+++ b/drivers/firmware/dmi-id.c
35071@@ -16,7 +16,7 @@
35072 struct dmi_device_attribute{
35073 struct device_attribute dev_attr;
35074 int field;
35075-};
35076+} __do_const;
35077 #define to_dmi_dev_attr(_dev_attr) \
35078 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
35079
35080diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
35081index b95159b..841ae55 100644
35082--- a/drivers/firmware/dmi_scan.c
35083+++ b/drivers/firmware/dmi_scan.c
35084@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
35085 }
35086 }
35087 else {
35088- /*
35089- * no iounmap() for that ioremap(); it would be a no-op, but
35090- * it's so early in setup that sucker gets confused into doing
35091- * what it shouldn't if we actually call it.
35092- */
35093 p = dmi_ioremap(0xF0000, 0x10000);
35094 if (p == NULL)
35095 goto error;
35096@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
35097 if (buf == NULL)
35098 return -1;
35099
35100- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
35101+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
35102
35103 iounmap(buf);
35104 return 0;
35105diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
35106index 5145fa3..0d3babd 100644
35107--- a/drivers/firmware/efi/efi.c
35108+++ b/drivers/firmware/efi/efi.c
35109@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
35110 };
35111
35112 static struct efivars generic_efivars;
35113-static struct efivar_operations generic_ops;
35114+static efivar_operations_no_const generic_ops __read_only;
35115
35116 static int generic_ops_register(void)
35117 {
35118- generic_ops.get_variable = efi.get_variable;
35119- generic_ops.set_variable = efi.set_variable;
35120- generic_ops.get_next_variable = efi.get_next_variable;
35121- generic_ops.query_variable_store = efi_query_variable_store;
35122+ pax_open_kernel();
35123+ *(void **)&generic_ops.get_variable = efi.get_variable;
35124+ *(void **)&generic_ops.set_variable = efi.set_variable;
35125+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
35126+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
35127+ pax_close_kernel();
35128
35129 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
35130 }
35131diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
35132index 8bd1bb6..c48b0c6 100644
35133--- a/drivers/firmware/efi/efivars.c
35134+++ b/drivers/firmware/efi/efivars.c
35135@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
35136 static int
35137 create_efivars_bin_attributes(void)
35138 {
35139- struct bin_attribute *attr;
35140+ bin_attribute_no_const *attr;
35141 int error;
35142
35143 /* new_var */
35144diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
35145index 2a90ba6..07f3733 100644
35146--- a/drivers/firmware/google/memconsole.c
35147+++ b/drivers/firmware/google/memconsole.c
35148@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
35149 if (!found_memconsole())
35150 return -ENODEV;
35151
35152- memconsole_bin_attr.size = memconsole_length;
35153+ pax_open_kernel();
35154+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
35155+ pax_close_kernel();
35156
35157 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
35158
35159diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
35160index e16d932..f0206ef 100644
35161--- a/drivers/gpio/gpio-ich.c
35162+++ b/drivers/gpio/gpio-ich.c
35163@@ -69,7 +69,7 @@ struct ichx_desc {
35164 /* Some chipsets have quirks, let these use their own request/get */
35165 int (*request)(struct gpio_chip *chip, unsigned offset);
35166 int (*get)(struct gpio_chip *chip, unsigned offset);
35167-};
35168+} __do_const;
35169
35170 static struct {
35171 spinlock_t lock;
35172diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
35173index 9902732..64b62dd 100644
35174--- a/drivers/gpio/gpio-vr41xx.c
35175+++ b/drivers/gpio/gpio-vr41xx.c
35176@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
35177 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
35178 maskl, pendl, maskh, pendh);
35179
35180- atomic_inc(&irq_err_count);
35181+ atomic_inc_unchecked(&irq_err_count);
35182
35183 return -EINVAL;
35184 }
35185diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
35186index ed1334e..ee0dd42 100644
35187--- a/drivers/gpu/drm/drm_crtc_helper.c
35188+++ b/drivers/gpu/drm/drm_crtc_helper.c
35189@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
35190 struct drm_crtc *tmp;
35191 int crtc_mask = 1;
35192
35193- WARN(!crtc, "checking null crtc?\n");
35194+ BUG_ON(!crtc);
35195
35196 dev = crtc->dev;
35197
35198diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35199index 9cc247f..36aa285 100644
35200--- a/drivers/gpu/drm/drm_drv.c
35201+++ b/drivers/gpu/drm/drm_drv.c
35202@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
35203 /**
35204 * Copy and IOCTL return string to user space
35205 */
35206-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35207+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35208 {
35209 int len;
35210
35211@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
35212 struct drm_file *file_priv = filp->private_data;
35213 struct drm_device *dev;
35214 const struct drm_ioctl_desc *ioctl = NULL;
35215- drm_ioctl_t *func;
35216+ drm_ioctl_no_const_t func;
35217 unsigned int nr = DRM_IOCTL_NR(cmd);
35218 int retcode = -EINVAL;
35219 char stack_kdata[128];
35220@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
35221 return -ENODEV;
35222
35223 atomic_inc(&dev->ioctl_count);
35224- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35225+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35226 ++file_priv->ioctl_count;
35227
35228 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
35229diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35230index 429e07d..e681a2c 100644
35231--- a/drivers/gpu/drm/drm_fops.c
35232+++ b/drivers/gpu/drm/drm_fops.c
35233@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35234 }
35235
35236 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35237- atomic_set(&dev->counts[i], 0);
35238+ atomic_set_unchecked(&dev->counts[i], 0);
35239
35240 dev->sigdata.lock = NULL;
35241
35242@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
35243 if (drm_device_is_unplugged(dev))
35244 return -ENODEV;
35245
35246- if (!dev->open_count++)
35247+ if (local_inc_return(&dev->open_count) == 1)
35248 need_setup = 1;
35249 mutex_lock(&dev->struct_mutex);
35250 old_imapping = inode->i_mapping;
35251@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
35252 retcode = drm_open_helper(inode, filp, dev);
35253 if (retcode)
35254 goto err_undo;
35255- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35256+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35257 if (need_setup) {
35258 retcode = drm_setup(dev);
35259 if (retcode)
35260@@ -166,7 +166,7 @@ err_undo:
35261 iput(container_of(dev->dev_mapping, struct inode, i_data));
35262 dev->dev_mapping = old_mapping;
35263 mutex_unlock(&dev->struct_mutex);
35264- dev->open_count--;
35265+ local_dec(&dev->open_count);
35266 return retcode;
35267 }
35268 EXPORT_SYMBOL(drm_open);
35269@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
35270
35271 mutex_lock(&drm_global_mutex);
35272
35273- DRM_DEBUG("open_count = %d\n", dev->open_count);
35274+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35275
35276 if (dev->driver->preclose)
35277 dev->driver->preclose(dev, file_priv);
35278@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
35279 * Begin inline drm_release
35280 */
35281
35282- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35283+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35284 task_pid_nr(current),
35285 (long)old_encode_dev(file_priv->minor->device),
35286- dev->open_count);
35287+ local_read(&dev->open_count));
35288
35289 /* Release any auth tokens that might point to this file_priv,
35290 (do that under the drm_global_mutex) */
35291@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
35292 * End inline drm_release
35293 */
35294
35295- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35296- if (!--dev->open_count) {
35297+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35298+ if (local_dec_and_test(&dev->open_count)) {
35299 if (atomic_read(&dev->ioctl_count)) {
35300 DRM_ERROR("Device busy: %d\n",
35301 atomic_read(&dev->ioctl_count));
35302diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35303index f731116..629842c 100644
35304--- a/drivers/gpu/drm/drm_global.c
35305+++ b/drivers/gpu/drm/drm_global.c
35306@@ -36,7 +36,7 @@
35307 struct drm_global_item {
35308 struct mutex mutex;
35309 void *object;
35310- int refcount;
35311+ atomic_t refcount;
35312 };
35313
35314 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35315@@ -49,7 +49,7 @@ void drm_global_init(void)
35316 struct drm_global_item *item = &glob[i];
35317 mutex_init(&item->mutex);
35318 item->object = NULL;
35319- item->refcount = 0;
35320+ atomic_set(&item->refcount, 0);
35321 }
35322 }
35323
35324@@ -59,7 +59,7 @@ void drm_global_release(void)
35325 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35326 struct drm_global_item *item = &glob[i];
35327 BUG_ON(item->object != NULL);
35328- BUG_ON(item->refcount != 0);
35329+ BUG_ON(atomic_read(&item->refcount) != 0);
35330 }
35331 }
35332
35333@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35334 void *object;
35335
35336 mutex_lock(&item->mutex);
35337- if (item->refcount == 0) {
35338+ if (atomic_read(&item->refcount) == 0) {
35339 item->object = kzalloc(ref->size, GFP_KERNEL);
35340 if (unlikely(item->object == NULL)) {
35341 ret = -ENOMEM;
35342@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35343 goto out_err;
35344
35345 }
35346- ++item->refcount;
35347+ atomic_inc(&item->refcount);
35348 ref->object = item->object;
35349 object = item->object;
35350 mutex_unlock(&item->mutex);
35351@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35352 struct drm_global_item *item = &glob[ref->global_type];
35353
35354 mutex_lock(&item->mutex);
35355- BUG_ON(item->refcount == 0);
35356+ BUG_ON(atomic_read(&item->refcount) == 0);
35357 BUG_ON(ref->object != item->object);
35358- if (--item->refcount == 0) {
35359+ if (atomic_dec_and_test(&item->refcount)) {
35360 ref->release(ref);
35361 item->object = NULL;
35362 }
35363diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35364index d4b20ce..77a8d41 100644
35365--- a/drivers/gpu/drm/drm_info.c
35366+++ b/drivers/gpu/drm/drm_info.c
35367@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35368 struct drm_local_map *map;
35369 struct drm_map_list *r_list;
35370
35371- /* Hardcoded from _DRM_FRAME_BUFFER,
35372- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35373- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35374- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35375+ static const char * const types[] = {
35376+ [_DRM_FRAME_BUFFER] = "FB",
35377+ [_DRM_REGISTERS] = "REG",
35378+ [_DRM_SHM] = "SHM",
35379+ [_DRM_AGP] = "AGP",
35380+ [_DRM_SCATTER_GATHER] = "SG",
35381+ [_DRM_CONSISTENT] = "PCI",
35382+ [_DRM_GEM] = "GEM" };
35383 const char *type;
35384 int i;
35385
35386@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35387 map = r_list->map;
35388 if (!map)
35389 continue;
35390- if (map->type < 0 || map->type > 5)
35391+ if (map->type >= ARRAY_SIZE(types))
35392 type = "??";
35393 else
35394 type = types[map->type];
35395@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35396 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35397 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35398 vma->vm_flags & VM_IO ? 'i' : '-',
35399+#ifdef CONFIG_GRKERNSEC_HIDESYM
35400+ 0);
35401+#else
35402 vma->vm_pgoff);
35403+#endif
35404
35405 #if defined(__i386__)
35406 pgprot = pgprot_val(vma->vm_page_prot);
35407diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35408index 2f4c434..dd12cd2 100644
35409--- a/drivers/gpu/drm/drm_ioc32.c
35410+++ b/drivers/gpu/drm/drm_ioc32.c
35411@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35412 request = compat_alloc_user_space(nbytes);
35413 if (!access_ok(VERIFY_WRITE, request, nbytes))
35414 return -EFAULT;
35415- list = (struct drm_buf_desc *) (request + 1);
35416+ list = (struct drm_buf_desc __user *) (request + 1);
35417
35418 if (__put_user(count, &request->count)
35419 || __put_user(list, &request->list))
35420@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35421 request = compat_alloc_user_space(nbytes);
35422 if (!access_ok(VERIFY_WRITE, request, nbytes))
35423 return -EFAULT;
35424- list = (struct drm_buf_pub *) (request + 1);
35425+ list = (struct drm_buf_pub __user *) (request + 1);
35426
35427 if (__put_user(count, &request->count)
35428 || __put_user(list, &request->list))
35429@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35430 return 0;
35431 }
35432
35433-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35434+drm_ioctl_compat_t drm_compat_ioctls[] = {
35435 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35436 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35437 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35438@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35439 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35440 {
35441 unsigned int nr = DRM_IOCTL_NR(cmd);
35442- drm_ioctl_compat_t *fn;
35443 int ret;
35444
35445 /* Assume that ioctls without an explicit compat routine will just
35446@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35447 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35448 return drm_ioctl(filp, cmd, arg);
35449
35450- fn = drm_compat_ioctls[nr];
35451-
35452- if (fn != NULL)
35453- ret = (*fn) (filp, cmd, arg);
35454+ if (drm_compat_ioctls[nr] != NULL)
35455+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35456 else
35457 ret = drm_ioctl(filp, cmd, arg);
35458
35459diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35460index e77bd8b..1571b85 100644
35461--- a/drivers/gpu/drm/drm_ioctl.c
35462+++ b/drivers/gpu/drm/drm_ioctl.c
35463@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35464 stats->data[i].value =
35465 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35466 else
35467- stats->data[i].value = atomic_read(&dev->counts[i]);
35468+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35469 stats->data[i].type = dev->types[i];
35470 }
35471
35472diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35473index d752c96..fe08455 100644
35474--- a/drivers/gpu/drm/drm_lock.c
35475+++ b/drivers/gpu/drm/drm_lock.c
35476@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35477 if (drm_lock_take(&master->lock, lock->context)) {
35478 master->lock.file_priv = file_priv;
35479 master->lock.lock_time = jiffies;
35480- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35481+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35482 break; /* Got lock */
35483 }
35484
35485@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35486 return -EINVAL;
35487 }
35488
35489- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35490+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35491
35492 if (drm_lock_free(&master->lock, lock->context)) {
35493 /* FIXME: Should really bail out here. */
35494diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35495index 16f3ec5..b28f9ca 100644
35496--- a/drivers/gpu/drm/drm_stub.c
35497+++ b/drivers/gpu/drm/drm_stub.c
35498@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
35499
35500 drm_device_set_unplugged(dev);
35501
35502- if (dev->open_count == 0) {
35503+ if (local_read(&dev->open_count) == 0) {
35504 drm_put_dev(dev);
35505 }
35506 mutex_unlock(&drm_global_mutex);
35507diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
35508index 0229665..f61329c 100644
35509--- a/drivers/gpu/drm/drm_sysfs.c
35510+++ b/drivers/gpu/drm/drm_sysfs.c
35511@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
35512 int drm_sysfs_device_add(struct drm_minor *minor)
35513 {
35514 int err;
35515- char *minor_str;
35516+ const char *minor_str;
35517
35518 minor->kdev.parent = minor->dev->dev;
35519
35520diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35521index 004ecdf..db1f6e0 100644
35522--- a/drivers/gpu/drm/i810/i810_dma.c
35523+++ b/drivers/gpu/drm/i810/i810_dma.c
35524@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35525 dma->buflist[vertex->idx],
35526 vertex->discard, vertex->used);
35527
35528- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35529- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35530+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35531+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35532 sarea_priv->last_enqueue = dev_priv->counter - 1;
35533 sarea_priv->last_dispatch = (int)hw_status[5];
35534
35535@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35536 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35537 mc->last_render);
35538
35539- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35540- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35541+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35542+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35543 sarea_priv->last_enqueue = dev_priv->counter - 1;
35544 sarea_priv->last_dispatch = (int)hw_status[5];
35545
35546diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35547index 6e0acad..93c8289 100644
35548--- a/drivers/gpu/drm/i810/i810_drv.h
35549+++ b/drivers/gpu/drm/i810/i810_drv.h
35550@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35551 int page_flipping;
35552
35553 wait_queue_head_t irq_queue;
35554- atomic_t irq_received;
35555- atomic_t irq_emitted;
35556+ atomic_unchecked_t irq_received;
35557+ atomic_unchecked_t irq_emitted;
35558
35559 int front_offset;
35560 } drm_i810_private_t;
35561diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35562index e913d32..4d9b351 100644
35563--- a/drivers/gpu/drm/i915/i915_debugfs.c
35564+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35565@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35566 I915_READ(GTIMR));
35567 }
35568 seq_printf(m, "Interrupts received: %d\n",
35569- atomic_read(&dev_priv->irq_received));
35570+ atomic_read_unchecked(&dev_priv->irq_received));
35571 for_each_ring(ring, dev_priv, i) {
35572 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35573 seq_printf(m,
35574diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35575index 3b315ba..aac280f 100644
35576--- a/drivers/gpu/drm/i915/i915_dma.c
35577+++ b/drivers/gpu/drm/i915/i915_dma.c
35578@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35579 bool can_switch;
35580
35581 spin_lock(&dev->count_lock);
35582- can_switch = (dev->open_count == 0);
35583+ can_switch = (local_read(&dev->open_count) == 0);
35584 spin_unlock(&dev->count_lock);
35585 return can_switch;
35586 }
35587diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35588index 9669a0b..bb65176 100644
35589--- a/drivers/gpu/drm/i915/i915_drv.h
35590+++ b/drivers/gpu/drm/i915/i915_drv.h
35591@@ -915,7 +915,7 @@ typedef struct drm_i915_private {
35592 drm_dma_handle_t *status_page_dmah;
35593 struct resource mch_res;
35594
35595- atomic_t irq_received;
35596+ atomic_unchecked_t irq_received;
35597
35598 /* protects the irq masks */
35599 spinlock_t irq_lock;
35600@@ -1811,7 +1811,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35601 struct drm_i915_private *dev_priv, unsigned port);
35602 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35603 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35604-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35605+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35606 {
35607 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35608 }
35609diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35610index 117ce38..eefd237 100644
35611--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35612+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35613@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35614
35615 static int
35616 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35617- int count)
35618+ unsigned int count)
35619 {
35620- int i;
35621+ unsigned int i;
35622 int relocs_total = 0;
35623 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35624
35625diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35626index 3c59584..500f2e9 100644
35627--- a/drivers/gpu/drm/i915/i915_ioc32.c
35628+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35629@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35630 (unsigned long)request);
35631 }
35632
35633-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35634+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35635 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35636 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35637 [DRM_I915_GETPARAM] = compat_i915_getparam,
35638@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35639 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35640 {
35641 unsigned int nr = DRM_IOCTL_NR(cmd);
35642- drm_ioctl_compat_t *fn = NULL;
35643 int ret;
35644
35645 if (nr < DRM_COMMAND_BASE)
35646 return drm_compat_ioctl(filp, cmd, arg);
35647
35648- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35649- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35650-
35651- if (fn != NULL)
35652+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35653+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35654 ret = (*fn) (filp, cmd, arg);
35655- else
35656+ } else
35657 ret = drm_ioctl(filp, cmd, arg);
35658
35659 return ret;
35660diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35661index 0aa2ef0..77c03d0 100644
35662--- a/drivers/gpu/drm/i915/i915_irq.c
35663+++ b/drivers/gpu/drm/i915/i915_irq.c
35664@@ -679,7 +679,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35665 int pipe;
35666 u32 pipe_stats[I915_MAX_PIPES];
35667
35668- atomic_inc(&dev_priv->irq_received);
35669+ atomic_inc_unchecked(&dev_priv->irq_received);
35670
35671 while (true) {
35672 iir = I915_READ(VLV_IIR);
35673@@ -844,7 +844,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35674 irqreturn_t ret = IRQ_NONE;
35675 int i;
35676
35677- atomic_inc(&dev_priv->irq_received);
35678+ atomic_inc_unchecked(&dev_priv->irq_received);
35679
35680 /* disable master interrupt before clearing iir */
35681 de_ier = I915_READ(DEIER);
35682@@ -934,7 +934,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35683 int ret = IRQ_NONE;
35684 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
35685
35686- atomic_inc(&dev_priv->irq_received);
35687+ atomic_inc_unchecked(&dev_priv->irq_received);
35688
35689 /* disable master interrupt before clearing iir */
35690 de_ier = I915_READ(DEIER);
35691@@ -2098,7 +2098,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35692 {
35693 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35694
35695- atomic_set(&dev_priv->irq_received, 0);
35696+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35697
35698 I915_WRITE(HWSTAM, 0xeffe);
35699
35700@@ -2133,7 +2133,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35701 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35702 int pipe;
35703
35704- atomic_set(&dev_priv->irq_received, 0);
35705+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35706
35707 /* VLV magic */
35708 I915_WRITE(VLV_IMR, 0);
35709@@ -2420,7 +2420,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35710 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35711 int pipe;
35712
35713- atomic_set(&dev_priv->irq_received, 0);
35714+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35715
35716 for_each_pipe(pipe)
35717 I915_WRITE(PIPESTAT(pipe), 0);
35718@@ -2499,7 +2499,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35719 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35720 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35721
35722- atomic_inc(&dev_priv->irq_received);
35723+ atomic_inc_unchecked(&dev_priv->irq_received);
35724
35725 iir = I915_READ16(IIR);
35726 if (iir == 0)
35727@@ -2574,7 +2574,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35728 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35729 int pipe;
35730
35731- atomic_set(&dev_priv->irq_received, 0);
35732+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35733
35734 if (I915_HAS_HOTPLUG(dev)) {
35735 I915_WRITE(PORT_HOTPLUG_EN, 0);
35736@@ -2673,7 +2673,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35737 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35738 int pipe, ret = IRQ_NONE;
35739
35740- atomic_inc(&dev_priv->irq_received);
35741+ atomic_inc_unchecked(&dev_priv->irq_received);
35742
35743 iir = I915_READ(IIR);
35744 do {
35745@@ -2800,7 +2800,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35746 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35747 int pipe;
35748
35749- atomic_set(&dev_priv->irq_received, 0);
35750+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35751
35752 I915_WRITE(PORT_HOTPLUG_EN, 0);
35753 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35754@@ -2907,7 +2907,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35755 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35756 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35757
35758- atomic_inc(&dev_priv->irq_received);
35759+ atomic_inc_unchecked(&dev_priv->irq_received);
35760
35761 iir = I915_READ(IIR);
35762
35763diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35764index 56746dc..b5a214f 100644
35765--- a/drivers/gpu/drm/i915/intel_display.c
35766+++ b/drivers/gpu/drm/i915/intel_display.c
35767@@ -8919,13 +8919,13 @@ struct intel_quirk {
35768 int subsystem_vendor;
35769 int subsystem_device;
35770 void (*hook)(struct drm_device *dev);
35771-};
35772+} __do_const;
35773
35774 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35775 struct intel_dmi_quirk {
35776 void (*hook)(struct drm_device *dev);
35777 const struct dmi_system_id (*dmi_id_list)[];
35778-};
35779+} __do_const;
35780
35781 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35782 {
35783@@ -8933,18 +8933,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35784 return 1;
35785 }
35786
35787-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35788+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35789 {
35790- .dmi_id_list = &(const struct dmi_system_id[]) {
35791- {
35792- .callback = intel_dmi_reverse_brightness,
35793- .ident = "NCR Corporation",
35794- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35795- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35796- },
35797- },
35798- { } /* terminating entry */
35799+ .callback = intel_dmi_reverse_brightness,
35800+ .ident = "NCR Corporation",
35801+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35802+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35803 },
35804+ },
35805+ { } /* terminating entry */
35806+};
35807+
35808+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35809+ {
35810+ .dmi_id_list = &intel_dmi_quirks_table,
35811 .hook = quirk_invert_brightness,
35812 },
35813 };
35814diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35815index 54558a0..2d97005 100644
35816--- a/drivers/gpu/drm/mga/mga_drv.h
35817+++ b/drivers/gpu/drm/mga/mga_drv.h
35818@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35819 u32 clear_cmd;
35820 u32 maccess;
35821
35822- atomic_t vbl_received; /**< Number of vblanks received. */
35823+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35824 wait_queue_head_t fence_queue;
35825- atomic_t last_fence_retired;
35826+ atomic_unchecked_t last_fence_retired;
35827 u32 next_fence_to_post;
35828
35829 unsigned int fb_cpp;
35830diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35831index 709e90d..89a1c0d 100644
35832--- a/drivers/gpu/drm/mga/mga_ioc32.c
35833+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35834@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35835 return 0;
35836 }
35837
35838-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35839+drm_ioctl_compat_t mga_compat_ioctls[] = {
35840 [DRM_MGA_INIT] = compat_mga_init,
35841 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35842 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35843@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35844 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35845 {
35846 unsigned int nr = DRM_IOCTL_NR(cmd);
35847- drm_ioctl_compat_t *fn = NULL;
35848 int ret;
35849
35850 if (nr < DRM_COMMAND_BASE)
35851 return drm_compat_ioctl(filp, cmd, arg);
35852
35853- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35854- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35855-
35856- if (fn != NULL)
35857+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35858+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35859 ret = (*fn) (filp, cmd, arg);
35860- else
35861+ } else
35862 ret = drm_ioctl(filp, cmd, arg);
35863
35864 return ret;
35865diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35866index 598c281..60d590e 100644
35867--- a/drivers/gpu/drm/mga/mga_irq.c
35868+++ b/drivers/gpu/drm/mga/mga_irq.c
35869@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35870 if (crtc != 0)
35871 return 0;
35872
35873- return atomic_read(&dev_priv->vbl_received);
35874+ return atomic_read_unchecked(&dev_priv->vbl_received);
35875 }
35876
35877
35878@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35879 /* VBLANK interrupt */
35880 if (status & MGA_VLINEPEN) {
35881 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35882- atomic_inc(&dev_priv->vbl_received);
35883+ atomic_inc_unchecked(&dev_priv->vbl_received);
35884 drm_handle_vblank(dev, 0);
35885 handled = 1;
35886 }
35887@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35888 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35889 MGA_WRITE(MGA_PRIMEND, prim_end);
35890
35891- atomic_inc(&dev_priv->last_fence_retired);
35892+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35893 DRM_WAKEUP(&dev_priv->fence_queue);
35894 handled = 1;
35895 }
35896@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35897 * using fences.
35898 */
35899 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35900- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35901+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35902 - *sequence) <= (1 << 23)));
35903
35904 *sequence = cur_fence;
35905diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35906index 6aa2137..fe8dc55 100644
35907--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35908+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35909@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35910 struct bit_table {
35911 const char id;
35912 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35913-};
35914+} __no_const;
35915
35916 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35917
35918diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35919index f2b30f8..d0f9a95 100644
35920--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35921+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35922@@ -92,7 +92,7 @@ struct nouveau_drm {
35923 struct drm_global_reference mem_global_ref;
35924 struct ttm_bo_global_ref bo_global_ref;
35925 struct ttm_bo_device bdev;
35926- atomic_t validate_sequence;
35927+ atomic_unchecked_t validate_sequence;
35928 int (*move)(struct nouveau_channel *,
35929 struct ttm_buffer_object *,
35930 struct ttm_mem_reg *, struct ttm_mem_reg *);
35931diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35932index b4b4d0c..b7edc15 100644
35933--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35934+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35935@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35936 int ret, i;
35937 struct nouveau_bo *res_bo = NULL;
35938
35939- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35940+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35941 retry:
35942 if (++trycnt > 100000) {
35943 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
35944@@ -359,7 +359,7 @@ retry:
35945 if (ret) {
35946 validate_fini(op, NULL);
35947 if (unlikely(ret == -EAGAIN)) {
35948- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35949+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35950 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
35951 sequence);
35952 if (!ret)
35953diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35954index 08214bc..9208577 100644
35955--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35956+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35957@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35958 unsigned long arg)
35959 {
35960 unsigned int nr = DRM_IOCTL_NR(cmd);
35961- drm_ioctl_compat_t *fn = NULL;
35962+ drm_ioctl_compat_t fn = NULL;
35963 int ret;
35964
35965 if (nr < DRM_COMMAND_BASE)
35966diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35967index 25d3495..d81aaf6 100644
35968--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35969+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35970@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35971 bool can_switch;
35972
35973 spin_lock(&dev->count_lock);
35974- can_switch = (dev->open_count == 0);
35975+ can_switch = (local_read(&dev->open_count) == 0);
35976 spin_unlock(&dev->count_lock);
35977 return can_switch;
35978 }
35979diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
35980index 489cb8c..0b8d0d3 100644
35981--- a/drivers/gpu/drm/qxl/qxl_ttm.c
35982+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
35983@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
35984 }
35985 }
35986
35987-static struct vm_operations_struct qxl_ttm_vm_ops;
35988+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
35989 static const struct vm_operations_struct *ttm_vm_ops;
35990
35991 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
35992@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
35993 return r;
35994 if (unlikely(ttm_vm_ops == NULL)) {
35995 ttm_vm_ops = vma->vm_ops;
35996+ pax_open_kernel();
35997 qxl_ttm_vm_ops = *ttm_vm_ops;
35998 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
35999+ pax_close_kernel();
36000 }
36001 vma->vm_ops = &qxl_ttm_vm_ops;
36002 return 0;
36003@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
36004 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
36005 {
36006 #if defined(CONFIG_DEBUG_FS)
36007- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
36008- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
36009- unsigned i;
36010+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
36011+ {
36012+ .name = "qxl_mem_mm",
36013+ .show = &qxl_mm_dump_table,
36014+ },
36015+ {
36016+ .name = "qxl_surf_mm",
36017+ .show = &qxl_mm_dump_table,
36018+ }
36019+ };
36020
36021- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
36022- if (i == 0)
36023- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
36024- else
36025- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
36026- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
36027- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
36028- qxl_mem_types_list[i].driver_features = 0;
36029- if (i == 0)
36030- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
36031- else
36032- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
36033+ pax_open_kernel();
36034+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
36035+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
36036+ pax_close_kernel();
36037
36038- }
36039- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
36040+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
36041 #else
36042 return 0;
36043 #endif
36044diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
36045index d4660cf..70dbe65 100644
36046--- a/drivers/gpu/drm/r128/r128_cce.c
36047+++ b/drivers/gpu/drm/r128/r128_cce.c
36048@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
36049
36050 /* GH: Simple idle check.
36051 */
36052- atomic_set(&dev_priv->idle_count, 0);
36053+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36054
36055 /* We don't support anything other than bus-mastering ring mode,
36056 * but the ring can be in either AGP or PCI space for the ring
36057diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
36058index 930c71b..499aded 100644
36059--- a/drivers/gpu/drm/r128/r128_drv.h
36060+++ b/drivers/gpu/drm/r128/r128_drv.h
36061@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
36062 int is_pci;
36063 unsigned long cce_buffers_offset;
36064
36065- atomic_t idle_count;
36066+ atomic_unchecked_t idle_count;
36067
36068 int page_flipping;
36069 int current_page;
36070 u32 crtc_offset;
36071 u32 crtc_offset_cntl;
36072
36073- atomic_t vbl_received;
36074+ atomic_unchecked_t vbl_received;
36075
36076 u32 color_fmt;
36077 unsigned int front_offset;
36078diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
36079index a954c54..9cc595c 100644
36080--- a/drivers/gpu/drm/r128/r128_ioc32.c
36081+++ b/drivers/gpu/drm/r128/r128_ioc32.c
36082@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
36083 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
36084 }
36085
36086-drm_ioctl_compat_t *r128_compat_ioctls[] = {
36087+drm_ioctl_compat_t r128_compat_ioctls[] = {
36088 [DRM_R128_INIT] = compat_r128_init,
36089 [DRM_R128_DEPTH] = compat_r128_depth,
36090 [DRM_R128_STIPPLE] = compat_r128_stipple,
36091@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
36092 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36093 {
36094 unsigned int nr = DRM_IOCTL_NR(cmd);
36095- drm_ioctl_compat_t *fn = NULL;
36096 int ret;
36097
36098 if (nr < DRM_COMMAND_BASE)
36099 return drm_compat_ioctl(filp, cmd, arg);
36100
36101- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
36102- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
36103-
36104- if (fn != NULL)
36105+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
36106+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
36107 ret = (*fn) (filp, cmd, arg);
36108- else
36109+ } else
36110 ret = drm_ioctl(filp, cmd, arg);
36111
36112 return ret;
36113diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
36114index 2ea4f09..d391371 100644
36115--- a/drivers/gpu/drm/r128/r128_irq.c
36116+++ b/drivers/gpu/drm/r128/r128_irq.c
36117@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
36118 if (crtc != 0)
36119 return 0;
36120
36121- return atomic_read(&dev_priv->vbl_received);
36122+ return atomic_read_unchecked(&dev_priv->vbl_received);
36123 }
36124
36125 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36126@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36127 /* VBLANK interrupt */
36128 if (status & R128_CRTC_VBLANK_INT) {
36129 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
36130- atomic_inc(&dev_priv->vbl_received);
36131+ atomic_inc_unchecked(&dev_priv->vbl_received);
36132 drm_handle_vblank(dev, 0);
36133 return IRQ_HANDLED;
36134 }
36135diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
36136index 19bb7e6..de7e2a2 100644
36137--- a/drivers/gpu/drm/r128/r128_state.c
36138+++ b/drivers/gpu/drm/r128/r128_state.c
36139@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
36140
36141 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
36142 {
36143- if (atomic_read(&dev_priv->idle_count) == 0)
36144+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
36145 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
36146 else
36147- atomic_set(&dev_priv->idle_count, 0);
36148+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36149 }
36150
36151 #endif
36152diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
36153index 5a82b6b..9e69c73 100644
36154--- a/drivers/gpu/drm/radeon/mkregtable.c
36155+++ b/drivers/gpu/drm/radeon/mkregtable.c
36156@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
36157 regex_t mask_rex;
36158 regmatch_t match[4];
36159 char buf[1024];
36160- size_t end;
36161+ long end;
36162 int len;
36163 int done = 0;
36164 int r;
36165 unsigned o;
36166 struct offset *offset;
36167 char last_reg_s[10];
36168- int last_reg;
36169+ unsigned long last_reg;
36170
36171 if (regcomp
36172 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
36173diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
36174index b0dc0b6..a9bfe9c 100644
36175--- a/drivers/gpu/drm/radeon/radeon_device.c
36176+++ b/drivers/gpu/drm/radeon/radeon_device.c
36177@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
36178 bool can_switch;
36179
36180 spin_lock(&dev->count_lock);
36181- can_switch = (dev->open_count == 0);
36182+ can_switch = (local_read(&dev->open_count) == 0);
36183 spin_unlock(&dev->count_lock);
36184 return can_switch;
36185 }
36186diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
36187index b369d42..8dd04eb 100644
36188--- a/drivers/gpu/drm/radeon/radeon_drv.h
36189+++ b/drivers/gpu/drm/radeon/radeon_drv.h
36190@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
36191
36192 /* SW interrupt */
36193 wait_queue_head_t swi_queue;
36194- atomic_t swi_emitted;
36195+ atomic_unchecked_t swi_emitted;
36196 int vblank_crtc;
36197 uint32_t irq_enable_reg;
36198 uint32_t r500_disp_irq_reg;
36199diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
36200index c180df8..5fd8186 100644
36201--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
36202+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
36203@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36204 request = compat_alloc_user_space(sizeof(*request));
36205 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
36206 || __put_user(req32.param, &request->param)
36207- || __put_user((void __user *)(unsigned long)req32.value,
36208+ || __put_user((unsigned long)req32.value,
36209 &request->value))
36210 return -EFAULT;
36211
36212@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36213 #define compat_radeon_cp_setparam NULL
36214 #endif /* X86_64 || IA64 */
36215
36216-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36217+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36218 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36219 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36220 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36221@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36222 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36223 {
36224 unsigned int nr = DRM_IOCTL_NR(cmd);
36225- drm_ioctl_compat_t *fn = NULL;
36226 int ret;
36227
36228 if (nr < DRM_COMMAND_BASE)
36229 return drm_compat_ioctl(filp, cmd, arg);
36230
36231- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36232- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36233-
36234- if (fn != NULL)
36235+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36236+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36237 ret = (*fn) (filp, cmd, arg);
36238- else
36239+ } else
36240 ret = drm_ioctl(filp, cmd, arg);
36241
36242 return ret;
36243diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36244index 8d68e97..9dcfed8 100644
36245--- a/drivers/gpu/drm/radeon/radeon_irq.c
36246+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36247@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36248 unsigned int ret;
36249 RING_LOCALS;
36250
36251- atomic_inc(&dev_priv->swi_emitted);
36252- ret = atomic_read(&dev_priv->swi_emitted);
36253+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36254+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36255
36256 BEGIN_RING(4);
36257 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36258@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36259 drm_radeon_private_t *dev_priv =
36260 (drm_radeon_private_t *) dev->dev_private;
36261
36262- atomic_set(&dev_priv->swi_emitted, 0);
36263+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36264 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36265
36266 dev->max_vblank_count = 0x001fffff;
36267diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36268index 4d20910..6726b6d 100644
36269--- a/drivers/gpu/drm/radeon/radeon_state.c
36270+++ b/drivers/gpu/drm/radeon/radeon_state.c
36271@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36272 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36273 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36274
36275- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36276+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36277 sarea_priv->nbox * sizeof(depth_boxes[0])))
36278 return -EFAULT;
36279
36280@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36281 {
36282 drm_radeon_private_t *dev_priv = dev->dev_private;
36283 drm_radeon_getparam_t *param = data;
36284- int value;
36285+ int value = 0;
36286
36287 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36288
36289diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36290index 6c0ce89..57a2529 100644
36291--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36292+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36293@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36294 man->size = size >> PAGE_SHIFT;
36295 }
36296
36297-static struct vm_operations_struct radeon_ttm_vm_ops;
36298+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36299 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36300
36301 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36302@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36303 }
36304 if (unlikely(ttm_vm_ops == NULL)) {
36305 ttm_vm_ops = vma->vm_ops;
36306+ pax_open_kernel();
36307 radeon_ttm_vm_ops = *ttm_vm_ops;
36308 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36309+ pax_close_kernel();
36310 }
36311 vma->vm_ops = &radeon_ttm_vm_ops;
36312 return 0;
36313@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
36314 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36315 {
36316 #if defined(CONFIG_DEBUG_FS)
36317- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
36318- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
36319+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
36320+ {
36321+ .name = "radeon_vram_mm",
36322+ .show = &radeon_mm_dump_table,
36323+ },
36324+ {
36325+ .name = "radeon_gtt_mm",
36326+ .show = &radeon_mm_dump_table,
36327+ },
36328+ {
36329+ .name = "ttm_page_pool",
36330+ .show = &ttm_page_alloc_debugfs,
36331+ },
36332+ {
36333+ .name = "ttm_dma_page_pool",
36334+ .show = &ttm_dma_page_alloc_debugfs,
36335+ },
36336+ };
36337 unsigned i;
36338
36339- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
36340- if (i == 0)
36341- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36342- else
36343- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36344- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36345- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36346- radeon_mem_types_list[i].driver_features = 0;
36347- if (i == 0)
36348- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36349- else
36350- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36351-
36352- }
36353- /* Add ttm page pool to debugfs */
36354- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36355- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36356- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36357- radeon_mem_types_list[i].driver_features = 0;
36358- radeon_mem_types_list[i++].data = NULL;
36359+ pax_open_kernel();
36360+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36361+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36362+ pax_close_kernel();
36363 #ifdef CONFIG_SWIOTLB
36364- if (swiotlb_nr_tbl()) {
36365- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36366- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36367- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36368- radeon_mem_types_list[i].driver_features = 0;
36369- radeon_mem_types_list[i++].data = NULL;
36370- }
36371+ if (swiotlb_nr_tbl())
36372+ i++;
36373 #endif
36374 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36375
36376diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36377index 55880d5..9e95342 100644
36378--- a/drivers/gpu/drm/radeon/rs690.c
36379+++ b/drivers/gpu/drm/radeon/rs690.c
36380@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36381 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36382 rdev->pm.sideport_bandwidth.full)
36383 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36384- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36385+ read_delay_latency.full = dfixed_const(800 * 1000);
36386 read_delay_latency.full = dfixed_div(read_delay_latency,
36387 rdev->pm.igp_sideport_mclk);
36388+ a.full = dfixed_const(370);
36389+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36390 } else {
36391 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36392 rdev->pm.k8_bandwidth.full)
36393diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
36394index dbc2def..0a9f710 100644
36395--- a/drivers/gpu/drm/ttm/ttm_memory.c
36396+++ b/drivers/gpu/drm/ttm/ttm_memory.c
36397@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
36398 zone->glob = glob;
36399 glob->zone_kernel = zone;
36400 ret = kobject_init_and_add(
36401- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
36402+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
36403 if (unlikely(ret != 0)) {
36404 kobject_put(&zone->kobj);
36405 return ret;
36406@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
36407 zone->glob = glob;
36408 glob->zone_dma32 = zone;
36409 ret = kobject_init_and_add(
36410- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
36411+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
36412 if (unlikely(ret != 0)) {
36413 kobject_put(&zone->kobj);
36414 return ret;
36415diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36416index bd2a3b4..122d9ad 100644
36417--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36418+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36419@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36420 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36421 struct shrink_control *sc)
36422 {
36423- static atomic_t start_pool = ATOMIC_INIT(0);
36424+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36425 unsigned i;
36426- unsigned pool_offset = atomic_add_return(1, &start_pool);
36427+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36428 struct ttm_page_pool *pool;
36429 int shrink_pages = sc->nr_to_scan;
36430
36431diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36432index dc0c065..58a0782 100644
36433--- a/drivers/gpu/drm/udl/udl_fb.c
36434+++ b/drivers/gpu/drm/udl/udl_fb.c
36435@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36436 fb_deferred_io_cleanup(info);
36437 kfree(info->fbdefio);
36438 info->fbdefio = NULL;
36439- info->fbops->fb_mmap = udl_fb_mmap;
36440 }
36441
36442 pr_warn("released /dev/fb%d user=%d count=%d\n",
36443diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36444index 893a650..6190d3b 100644
36445--- a/drivers/gpu/drm/via/via_drv.h
36446+++ b/drivers/gpu/drm/via/via_drv.h
36447@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36448 typedef uint32_t maskarray_t[5];
36449
36450 typedef struct drm_via_irq {
36451- atomic_t irq_received;
36452+ atomic_unchecked_t irq_received;
36453 uint32_t pending_mask;
36454 uint32_t enable_mask;
36455 wait_queue_head_t irq_queue;
36456@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36457 struct timeval last_vblank;
36458 int last_vblank_valid;
36459 unsigned usec_per_vblank;
36460- atomic_t vbl_received;
36461+ atomic_unchecked_t vbl_received;
36462 drm_via_state_t hc_state;
36463 char pci_buf[VIA_PCI_BUF_SIZE];
36464 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36465diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36466index ac98964..5dbf512 100644
36467--- a/drivers/gpu/drm/via/via_irq.c
36468+++ b/drivers/gpu/drm/via/via_irq.c
36469@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36470 if (crtc != 0)
36471 return 0;
36472
36473- return atomic_read(&dev_priv->vbl_received);
36474+ return atomic_read_unchecked(&dev_priv->vbl_received);
36475 }
36476
36477 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36478@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36479
36480 status = VIA_READ(VIA_REG_INTERRUPT);
36481 if (status & VIA_IRQ_VBLANK_PENDING) {
36482- atomic_inc(&dev_priv->vbl_received);
36483- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36484+ atomic_inc_unchecked(&dev_priv->vbl_received);
36485+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36486 do_gettimeofday(&cur_vblank);
36487 if (dev_priv->last_vblank_valid) {
36488 dev_priv->usec_per_vblank =
36489@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36490 dev_priv->last_vblank = cur_vblank;
36491 dev_priv->last_vblank_valid = 1;
36492 }
36493- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36494+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36495 DRM_DEBUG("US per vblank is: %u\n",
36496 dev_priv->usec_per_vblank);
36497 }
36498@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36499
36500 for (i = 0; i < dev_priv->num_irqs; ++i) {
36501 if (status & cur_irq->pending_mask) {
36502- atomic_inc(&cur_irq->irq_received);
36503+ atomic_inc_unchecked(&cur_irq->irq_received);
36504 DRM_WAKEUP(&cur_irq->irq_queue);
36505 handled = 1;
36506 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36507@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36508 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36509 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36510 masks[irq][4]));
36511- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36512+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36513 } else {
36514 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36515 (((cur_irq_sequence =
36516- atomic_read(&cur_irq->irq_received)) -
36517+ atomic_read_unchecked(&cur_irq->irq_received)) -
36518 *sequence) <= (1 << 23)));
36519 }
36520 *sequence = cur_irq_sequence;
36521@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36522 }
36523
36524 for (i = 0; i < dev_priv->num_irqs; ++i) {
36525- atomic_set(&cur_irq->irq_received, 0);
36526+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36527 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36528 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36529 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36530@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36531 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36532 case VIA_IRQ_RELATIVE:
36533 irqwait->request.sequence +=
36534- atomic_read(&cur_irq->irq_received);
36535+ atomic_read_unchecked(&cur_irq->irq_received);
36536 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36537 case VIA_IRQ_ABSOLUTE:
36538 break;
36539diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36540index 13aeda7..4a952d1 100644
36541--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36542+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36543@@ -290,7 +290,7 @@ struct vmw_private {
36544 * Fencing and IRQs.
36545 */
36546
36547- atomic_t marker_seq;
36548+ atomic_unchecked_t marker_seq;
36549 wait_queue_head_t fence_queue;
36550 wait_queue_head_t fifo_queue;
36551 int fence_queue_waiters; /* Protected by hw_mutex */
36552diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36553index 3eb1486..0a47ee9 100644
36554--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36555+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36556@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36557 (unsigned int) min,
36558 (unsigned int) fifo->capabilities);
36559
36560- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36561+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36562 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36563 vmw_marker_queue_init(&fifo->marker_queue);
36564 return vmw_fifo_send_fence(dev_priv, &dummy);
36565@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36566 if (reserveable)
36567 iowrite32(bytes, fifo_mem +
36568 SVGA_FIFO_RESERVED);
36569- return fifo_mem + (next_cmd >> 2);
36570+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36571 } else {
36572 need_bounce = true;
36573 }
36574@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36575
36576 fm = vmw_fifo_reserve(dev_priv, bytes);
36577 if (unlikely(fm == NULL)) {
36578- *seqno = atomic_read(&dev_priv->marker_seq);
36579+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36580 ret = -ENOMEM;
36581 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36582 false, 3*HZ);
36583@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36584 }
36585
36586 do {
36587- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36588+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36589 } while (*seqno == 0);
36590
36591 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36592diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36593index c509d40..3b640c3 100644
36594--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36595+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36596@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
36597 int ret;
36598
36599 num_clips = arg->num_clips;
36600- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
36601+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
36602
36603 if (unlikely(num_clips == 0))
36604 return 0;
36605@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
36606 int ret;
36607
36608 num_clips = arg->num_clips;
36609- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
36610+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
36611
36612 if (unlikely(num_clips == 0))
36613 return 0;
36614diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36615index 4640adb..e1384ed 100644
36616--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36617+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36618@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36619 * emitted. Then the fence is stale and signaled.
36620 */
36621
36622- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36623+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36624 > VMW_FENCE_WRAP);
36625
36626 return ret;
36627@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36628
36629 if (fifo_idle)
36630 down_read(&fifo_state->rwsem);
36631- signal_seq = atomic_read(&dev_priv->marker_seq);
36632+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36633 ret = 0;
36634
36635 for (;;) {
36636diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36637index 8a8725c2..afed796 100644
36638--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36639+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36640@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36641 while (!vmw_lag_lt(queue, us)) {
36642 spin_lock(&queue->lock);
36643 if (list_empty(&queue->head))
36644- seqno = atomic_read(&dev_priv->marker_seq);
36645+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36646 else {
36647 marker = list_first_entry(&queue->head,
36648 struct vmw_marker, head);
36649diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36650index 264f550..f92dd8c 100644
36651--- a/drivers/hid/hid-core.c
36652+++ b/drivers/hid/hid-core.c
36653@@ -2269,7 +2269,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36654
36655 int hid_add_device(struct hid_device *hdev)
36656 {
36657- static atomic_t id = ATOMIC_INIT(0);
36658+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36659 int ret;
36660
36661 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36662@@ -2303,7 +2303,7 @@ int hid_add_device(struct hid_device *hdev)
36663 /* XXX hack, any other cleaner solution after the driver core
36664 * is converted to allow more than 20 bytes as the device name? */
36665 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36666- hdev->vendor, hdev->product, atomic_inc_return(&id));
36667+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36668
36669 hid_debug_register(hdev, dev_name(&hdev->dev));
36670 ret = device_add(&hdev->dev);
36671diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36672index 90124ff..3761764 100644
36673--- a/drivers/hid/hid-wiimote-debug.c
36674+++ b/drivers/hid/hid-wiimote-debug.c
36675@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36676 else if (size == 0)
36677 return -EIO;
36678
36679- if (copy_to_user(u, buf, size))
36680+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36681 return -EFAULT;
36682
36683 *off += size;
36684diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36685index 0b122f8..b1d8160 100644
36686--- a/drivers/hv/channel.c
36687+++ b/drivers/hv/channel.c
36688@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36689 int ret = 0;
36690 int t;
36691
36692- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36693- atomic_inc(&vmbus_connection.next_gpadl_handle);
36694+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36695+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36696
36697 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36698 if (ret)
36699diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36700index ae49237..380d4c9 100644
36701--- a/drivers/hv/hv.c
36702+++ b/drivers/hv/hv.c
36703@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36704 u64 output_address = (output) ? virt_to_phys(output) : 0;
36705 u32 output_address_hi = output_address >> 32;
36706 u32 output_address_lo = output_address & 0xFFFFFFFF;
36707- void *hypercall_page = hv_context.hypercall_page;
36708+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36709
36710 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36711 "=a"(hv_status_lo) : "d" (control_hi),
36712diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36713index 12f2f9e..679603c 100644
36714--- a/drivers/hv/hyperv_vmbus.h
36715+++ b/drivers/hv/hyperv_vmbus.h
36716@@ -591,7 +591,7 @@ enum vmbus_connect_state {
36717 struct vmbus_connection {
36718 enum vmbus_connect_state conn_state;
36719
36720- atomic_t next_gpadl_handle;
36721+ atomic_unchecked_t next_gpadl_handle;
36722
36723 /*
36724 * Represents channel interrupts. Each bit position represents a
36725diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36726index bf421e0..ce2c897 100644
36727--- a/drivers/hv/vmbus_drv.c
36728+++ b/drivers/hv/vmbus_drv.c
36729@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36730 {
36731 int ret = 0;
36732
36733- static atomic_t device_num = ATOMIC_INIT(0);
36734+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36735
36736 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36737- atomic_inc_return(&device_num));
36738+ atomic_inc_return_unchecked(&device_num));
36739
36740 child_device_obj->device.bus = &hv_bus;
36741 child_device_obj->device.parent = &hv_acpi_dev->dev;
36742diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36743index 6351aba..dc4aaf4 100644
36744--- a/drivers/hwmon/acpi_power_meter.c
36745+++ b/drivers/hwmon/acpi_power_meter.c
36746@@ -117,7 +117,7 @@ struct sensor_template {
36747 struct device_attribute *devattr,
36748 const char *buf, size_t count);
36749 int index;
36750-};
36751+} __do_const;
36752
36753 /* Averaging interval */
36754 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36755@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36756 struct sensor_template *attrs)
36757 {
36758 struct device *dev = &resource->acpi_dev->dev;
36759- struct sensor_device_attribute *sensors =
36760+ sensor_device_attribute_no_const *sensors =
36761 &resource->sensors[resource->num_sensors];
36762 int res = 0;
36763
36764diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36765index 62c2e32..8f2859a 100644
36766--- a/drivers/hwmon/applesmc.c
36767+++ b/drivers/hwmon/applesmc.c
36768@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36769 {
36770 struct applesmc_node_group *grp;
36771 struct applesmc_dev_attr *node;
36772- struct attribute *attr;
36773+ attribute_no_const *attr;
36774 int ret, i;
36775
36776 for (grp = groups; grp->format; grp++) {
36777diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36778index b25c643..a13460d 100644
36779--- a/drivers/hwmon/asus_atk0110.c
36780+++ b/drivers/hwmon/asus_atk0110.c
36781@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36782 struct atk_sensor_data {
36783 struct list_head list;
36784 struct atk_data *data;
36785- struct device_attribute label_attr;
36786- struct device_attribute input_attr;
36787- struct device_attribute limit1_attr;
36788- struct device_attribute limit2_attr;
36789+ device_attribute_no_const label_attr;
36790+ device_attribute_no_const input_attr;
36791+ device_attribute_no_const limit1_attr;
36792+ device_attribute_no_const limit2_attr;
36793 char label_attr_name[ATTR_NAME_SIZE];
36794 char input_attr_name[ATTR_NAME_SIZE];
36795 char limit1_attr_name[ATTR_NAME_SIZE];
36796@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36797 static struct device_attribute atk_name_attr =
36798 __ATTR(name, 0444, atk_name_show, NULL);
36799
36800-static void atk_init_attribute(struct device_attribute *attr, char *name,
36801+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36802 sysfs_show_func show)
36803 {
36804 sysfs_attr_init(&attr->attr);
36805diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36806index 658ce3a..0d0c2f3 100644
36807--- a/drivers/hwmon/coretemp.c
36808+++ b/drivers/hwmon/coretemp.c
36809@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36810 return NOTIFY_OK;
36811 }
36812
36813-static struct notifier_block coretemp_cpu_notifier __refdata = {
36814+static struct notifier_block coretemp_cpu_notifier = {
36815 .notifier_call = coretemp_cpu_callback,
36816 };
36817
36818diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36819index 1429f6e..ee03d59 100644
36820--- a/drivers/hwmon/ibmaem.c
36821+++ b/drivers/hwmon/ibmaem.c
36822@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
36823 struct aem_rw_sensor_template *rw)
36824 {
36825 struct device *dev = &data->pdev->dev;
36826- struct sensor_device_attribute *sensors = data->sensors;
36827+ sensor_device_attribute_no_const *sensors = data->sensors;
36828 int err;
36829
36830 /* Set up read-only sensors */
36831diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
36832index 52b77af..aed1ddf 100644
36833--- a/drivers/hwmon/iio_hwmon.c
36834+++ b/drivers/hwmon/iio_hwmon.c
36835@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
36836 {
36837 struct device *dev = &pdev->dev;
36838 struct iio_hwmon_state *st;
36839- struct sensor_device_attribute *a;
36840+ sensor_device_attribute_no_const *a;
36841 int ret, i;
36842 int in_i = 1, temp_i = 1, curr_i = 1;
36843 enum iio_chan_type type;
36844diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36845index 9add6092..ee7ba3f 100644
36846--- a/drivers/hwmon/pmbus/pmbus_core.c
36847+++ b/drivers/hwmon/pmbus/pmbus_core.c
36848@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
36849 return 0;
36850 }
36851
36852-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36853+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
36854 const char *name,
36855 umode_t mode,
36856 ssize_t (*show)(struct device *dev,
36857@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36858 dev_attr->store = store;
36859 }
36860
36861-static void pmbus_attr_init(struct sensor_device_attribute *a,
36862+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
36863 const char *name,
36864 umode_t mode,
36865 ssize_t (*show)(struct device *dev,
36866@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
36867 u16 reg, u8 mask)
36868 {
36869 struct pmbus_boolean *boolean;
36870- struct sensor_device_attribute *a;
36871+ sensor_device_attribute_no_const *a;
36872
36873 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
36874 if (!boolean)
36875@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
36876 bool update, bool readonly)
36877 {
36878 struct pmbus_sensor *sensor;
36879- struct device_attribute *a;
36880+ device_attribute_no_const *a;
36881
36882 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
36883 if (!sensor)
36884@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
36885 const char *lstring, int index)
36886 {
36887 struct pmbus_label *label;
36888- struct device_attribute *a;
36889+ device_attribute_no_const *a;
36890
36891 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
36892 if (!label)
36893diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36894index 2507f90..1645765 100644
36895--- a/drivers/hwmon/sht15.c
36896+++ b/drivers/hwmon/sht15.c
36897@@ -169,7 +169,7 @@ struct sht15_data {
36898 int supply_uv;
36899 bool supply_uv_valid;
36900 struct work_struct update_supply_work;
36901- atomic_t interrupt_handled;
36902+ atomic_unchecked_t interrupt_handled;
36903 };
36904
36905 /**
36906@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
36907 ret = gpio_direction_input(data->pdata->gpio_data);
36908 if (ret)
36909 return ret;
36910- atomic_set(&data->interrupt_handled, 0);
36911+ atomic_set_unchecked(&data->interrupt_handled, 0);
36912
36913 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36914 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36915 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36916 /* Only relevant if the interrupt hasn't occurred. */
36917- if (!atomic_read(&data->interrupt_handled))
36918+ if (!atomic_read_unchecked(&data->interrupt_handled))
36919 schedule_work(&data->read_work);
36920 }
36921 ret = wait_event_timeout(data->wait_queue,
36922@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36923
36924 /* First disable the interrupt */
36925 disable_irq_nosync(irq);
36926- atomic_inc(&data->interrupt_handled);
36927+ atomic_inc_unchecked(&data->interrupt_handled);
36928 /* Then schedule a reading work struct */
36929 if (data->state != SHT15_READING_NOTHING)
36930 schedule_work(&data->read_work);
36931@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36932 * If not, then start the interrupt again - care here as could
36933 * have gone low in meantime so verify it hasn't!
36934 */
36935- atomic_set(&data->interrupt_handled, 0);
36936+ atomic_set_unchecked(&data->interrupt_handled, 0);
36937 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36938 /* If still not occurred or another handler was scheduled */
36939 if (gpio_get_value(data->pdata->gpio_data)
36940- || atomic_read(&data->interrupt_handled))
36941+ || atomic_read_unchecked(&data->interrupt_handled))
36942 return;
36943 }
36944
36945diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36946index 76f157b..9c0db1b 100644
36947--- a/drivers/hwmon/via-cputemp.c
36948+++ b/drivers/hwmon/via-cputemp.c
36949@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36950 return NOTIFY_OK;
36951 }
36952
36953-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36954+static struct notifier_block via_cputemp_cpu_notifier = {
36955 .notifier_call = via_cputemp_cpu_callback,
36956 };
36957
36958diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36959index 07f01ac..d79ad3d 100644
36960--- a/drivers/i2c/busses/i2c-amd756-s4882.c
36961+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
36962@@ -43,7 +43,7 @@
36963 extern struct i2c_adapter amd756_smbus;
36964
36965 static struct i2c_adapter *s4882_adapter;
36966-static struct i2c_algorithm *s4882_algo;
36967+static i2c_algorithm_no_const *s4882_algo;
36968
36969 /* Wrapper access functions for multiplexed SMBus */
36970 static DEFINE_MUTEX(amd756_lock);
36971diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
36972index 2ca268d..c6acbdf 100644
36973--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
36974+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
36975@@ -41,7 +41,7 @@
36976 extern struct i2c_adapter *nforce2_smbus;
36977
36978 static struct i2c_adapter *s4985_adapter;
36979-static struct i2c_algorithm *s4985_algo;
36980+static i2c_algorithm_no_const *s4985_algo;
36981
36982 /* Wrapper access functions for multiplexed SMBus */
36983 static DEFINE_MUTEX(nforce2_lock);
36984diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
36985index c3ccdea..5b3dc1a 100644
36986--- a/drivers/i2c/i2c-dev.c
36987+++ b/drivers/i2c/i2c-dev.c
36988@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
36989 break;
36990 }
36991
36992- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
36993+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
36994 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
36995 if (IS_ERR(rdwr_pa[i].buf)) {
36996 res = PTR_ERR(rdwr_pa[i].buf);
36997diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
36998index 2ff6204..218c16e 100644
36999--- a/drivers/ide/ide-cd.c
37000+++ b/drivers/ide/ide-cd.c
37001@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
37002 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
37003 if ((unsigned long)buf & alignment
37004 || blk_rq_bytes(rq) & q->dma_pad_mask
37005- || object_is_on_stack(buf))
37006+ || object_starts_on_stack(buf))
37007 drive->dma = 0;
37008 }
37009 }
37010diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
37011index e145931..08bfc59 100644
37012--- a/drivers/iio/industrialio-core.c
37013+++ b/drivers/iio/industrialio-core.c
37014@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
37015 }
37016
37017 static
37018-int __iio_device_attr_init(struct device_attribute *dev_attr,
37019+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
37020 const char *postfix,
37021 struct iio_chan_spec const *chan,
37022 ssize_t (*readfunc)(struct device *dev,
37023diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
37024index 784b97c..c9ceadf 100644
37025--- a/drivers/infiniband/core/cm.c
37026+++ b/drivers/infiniband/core/cm.c
37027@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
37028
37029 struct cm_counter_group {
37030 struct kobject obj;
37031- atomic_long_t counter[CM_ATTR_COUNT];
37032+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
37033 };
37034
37035 struct cm_counter_attribute {
37036@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
37037 struct ib_mad_send_buf *msg = NULL;
37038 int ret;
37039
37040- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37041+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37042 counter[CM_REQ_COUNTER]);
37043
37044 /* Quick state check to discard duplicate REQs. */
37045@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
37046 if (!cm_id_priv)
37047 return;
37048
37049- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37050+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37051 counter[CM_REP_COUNTER]);
37052 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
37053 if (ret)
37054@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
37055 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
37056 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
37057 spin_unlock_irq(&cm_id_priv->lock);
37058- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37059+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37060 counter[CM_RTU_COUNTER]);
37061 goto out;
37062 }
37063@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
37064 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
37065 dreq_msg->local_comm_id);
37066 if (!cm_id_priv) {
37067- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37068+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37069 counter[CM_DREQ_COUNTER]);
37070 cm_issue_drep(work->port, work->mad_recv_wc);
37071 return -EINVAL;
37072@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
37073 case IB_CM_MRA_REP_RCVD:
37074 break;
37075 case IB_CM_TIMEWAIT:
37076- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37077+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37078 counter[CM_DREQ_COUNTER]);
37079 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
37080 goto unlock;
37081@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
37082 cm_free_msg(msg);
37083 goto deref;
37084 case IB_CM_DREQ_RCVD:
37085- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37086+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37087 counter[CM_DREQ_COUNTER]);
37088 goto unlock;
37089 default:
37090@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
37091 ib_modify_mad(cm_id_priv->av.port->mad_agent,
37092 cm_id_priv->msg, timeout)) {
37093 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
37094- atomic_long_inc(&work->port->
37095+ atomic_long_inc_unchecked(&work->port->
37096 counter_group[CM_RECV_DUPLICATES].
37097 counter[CM_MRA_COUNTER]);
37098 goto out;
37099@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
37100 break;
37101 case IB_CM_MRA_REQ_RCVD:
37102 case IB_CM_MRA_REP_RCVD:
37103- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37104+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37105 counter[CM_MRA_COUNTER]);
37106 /* fall through */
37107 default:
37108@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
37109 case IB_CM_LAP_IDLE:
37110 break;
37111 case IB_CM_MRA_LAP_SENT:
37112- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37113+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37114 counter[CM_LAP_COUNTER]);
37115 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
37116 goto unlock;
37117@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
37118 cm_free_msg(msg);
37119 goto deref;
37120 case IB_CM_LAP_RCVD:
37121- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37122+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37123 counter[CM_LAP_COUNTER]);
37124 goto unlock;
37125 default:
37126@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
37127 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
37128 if (cur_cm_id_priv) {
37129 spin_unlock_irq(&cm.lock);
37130- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37131+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37132 counter[CM_SIDR_REQ_COUNTER]);
37133 goto out; /* Duplicate message. */
37134 }
37135@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
37136 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
37137 msg->retries = 1;
37138
37139- atomic_long_add(1 + msg->retries,
37140+ atomic_long_add_unchecked(1 + msg->retries,
37141 &port->counter_group[CM_XMIT].counter[attr_index]);
37142 if (msg->retries)
37143- atomic_long_add(msg->retries,
37144+ atomic_long_add_unchecked(msg->retries,
37145 &port->counter_group[CM_XMIT_RETRIES].
37146 counter[attr_index]);
37147
37148@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
37149 }
37150
37151 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
37152- atomic_long_inc(&port->counter_group[CM_RECV].
37153+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
37154 counter[attr_id - CM_ATTR_ID_OFFSET]);
37155
37156 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
37157@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
37158 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
37159
37160 return sprintf(buf, "%ld\n",
37161- atomic_long_read(&group->counter[cm_attr->index]));
37162+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
37163 }
37164
37165 static const struct sysfs_ops cm_counter_ops = {
37166diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
37167index 9f5ad7c..588cd84 100644
37168--- a/drivers/infiniband/core/fmr_pool.c
37169+++ b/drivers/infiniband/core/fmr_pool.c
37170@@ -98,8 +98,8 @@ struct ib_fmr_pool {
37171
37172 struct task_struct *thread;
37173
37174- atomic_t req_ser;
37175- atomic_t flush_ser;
37176+ atomic_unchecked_t req_ser;
37177+ atomic_unchecked_t flush_ser;
37178
37179 wait_queue_head_t force_wait;
37180 };
37181@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37182 struct ib_fmr_pool *pool = pool_ptr;
37183
37184 do {
37185- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
37186+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
37187 ib_fmr_batch_release(pool);
37188
37189- atomic_inc(&pool->flush_ser);
37190+ atomic_inc_unchecked(&pool->flush_ser);
37191 wake_up_interruptible(&pool->force_wait);
37192
37193 if (pool->flush_function)
37194@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37195 }
37196
37197 set_current_state(TASK_INTERRUPTIBLE);
37198- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
37199+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
37200 !kthread_should_stop())
37201 schedule();
37202 __set_current_state(TASK_RUNNING);
37203@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
37204 pool->dirty_watermark = params->dirty_watermark;
37205 pool->dirty_len = 0;
37206 spin_lock_init(&pool->pool_lock);
37207- atomic_set(&pool->req_ser, 0);
37208- atomic_set(&pool->flush_ser, 0);
37209+ atomic_set_unchecked(&pool->req_ser, 0);
37210+ atomic_set_unchecked(&pool->flush_ser, 0);
37211 init_waitqueue_head(&pool->force_wait);
37212
37213 pool->thread = kthread_run(ib_fmr_cleanup_thread,
37214@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
37215 }
37216 spin_unlock_irq(&pool->pool_lock);
37217
37218- serial = atomic_inc_return(&pool->req_ser);
37219+ serial = atomic_inc_return_unchecked(&pool->req_ser);
37220 wake_up_process(pool->thread);
37221
37222 if (wait_event_interruptible(pool->force_wait,
37223- atomic_read(&pool->flush_ser) - serial >= 0))
37224+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
37225 return -EINTR;
37226
37227 return 0;
37228@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
37229 } else {
37230 list_add_tail(&fmr->list, &pool->dirty_list);
37231 if (++pool->dirty_len >= pool->dirty_watermark) {
37232- atomic_inc(&pool->req_ser);
37233+ atomic_inc_unchecked(&pool->req_ser);
37234 wake_up_process(pool->thread);
37235 }
37236 }
37237diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
37238index 4cb8eb2..146bf60 100644
37239--- a/drivers/infiniband/hw/cxgb4/mem.c
37240+++ b/drivers/infiniband/hw/cxgb4/mem.c
37241@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37242 int err;
37243 struct fw_ri_tpte tpt;
37244 u32 stag_idx;
37245- static atomic_t key;
37246+ static atomic_unchecked_t key;
37247
37248 if (c4iw_fatal_error(rdev))
37249 return -EIO;
37250@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37251 if (rdev->stats.stag.cur > rdev->stats.stag.max)
37252 rdev->stats.stag.max = rdev->stats.stag.cur;
37253 mutex_unlock(&rdev->stats.lock);
37254- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
37255+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
37256 }
37257 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
37258 __func__, stag_state, type, pdid, stag_idx);
37259diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
37260index 79b3dbc..96e5fcc 100644
37261--- a/drivers/infiniband/hw/ipath/ipath_rc.c
37262+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
37263@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37264 struct ib_atomic_eth *ateth;
37265 struct ipath_ack_entry *e;
37266 u64 vaddr;
37267- atomic64_t *maddr;
37268+ atomic64_unchecked_t *maddr;
37269 u64 sdata;
37270 u32 rkey;
37271 u8 next;
37272@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37273 IB_ACCESS_REMOTE_ATOMIC)))
37274 goto nack_acc_unlck;
37275 /* Perform atomic OP and save result. */
37276- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37277+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37278 sdata = be64_to_cpu(ateth->swap_data);
37279 e = &qp->s_ack_queue[qp->r_head_ack_queue];
37280 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
37281- (u64) atomic64_add_return(sdata, maddr) - sdata :
37282+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37283 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37284 be64_to_cpu(ateth->compare_data),
37285 sdata);
37286diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
37287index 1f95bba..9530f87 100644
37288--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
37289+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
37290@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
37291 unsigned long flags;
37292 struct ib_wc wc;
37293 u64 sdata;
37294- atomic64_t *maddr;
37295+ atomic64_unchecked_t *maddr;
37296 enum ib_wc_status send_status;
37297
37298 /*
37299@@ -382,11 +382,11 @@ again:
37300 IB_ACCESS_REMOTE_ATOMIC)))
37301 goto acc_err;
37302 /* Perform atomic OP and save result. */
37303- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37304+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37305 sdata = wqe->wr.wr.atomic.compare_add;
37306 *(u64 *) sqp->s_sge.sge.vaddr =
37307 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
37308- (u64) atomic64_add_return(sdata, maddr) - sdata :
37309+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37310 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37311 sdata, wqe->wr.wr.atomic.swap);
37312 goto send_comp;
37313diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
37314index 9d3e5c1..d9afe4a 100644
37315--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
37316+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
37317@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
37318 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
37319 }
37320
37321-int mthca_QUERY_FW(struct mthca_dev *dev)
37322+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
37323 {
37324 struct mthca_mailbox *mailbox;
37325 u32 *outbox;
37326diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
37327index ed9a989..e0c5871 100644
37328--- a/drivers/infiniband/hw/mthca/mthca_mr.c
37329+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37330@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37331 return key;
37332 }
37333
37334-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37335+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37336 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37337 {
37338 struct mthca_mailbox *mailbox;
37339diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37340index 4291410..d2ab1fb 100644
37341--- a/drivers/infiniband/hw/nes/nes.c
37342+++ b/drivers/infiniband/hw/nes/nes.c
37343@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37344 LIST_HEAD(nes_adapter_list);
37345 static LIST_HEAD(nes_dev_list);
37346
37347-atomic_t qps_destroyed;
37348+atomic_unchecked_t qps_destroyed;
37349
37350 static unsigned int ee_flsh_adapter;
37351 static unsigned int sysfs_nonidx_addr;
37352@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37353 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37354 struct nes_adapter *nesadapter = nesdev->nesadapter;
37355
37356- atomic_inc(&qps_destroyed);
37357+ atomic_inc_unchecked(&qps_destroyed);
37358
37359 /* Free the control structures */
37360
37361diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37362index 33cc589..3bd6538 100644
37363--- a/drivers/infiniband/hw/nes/nes.h
37364+++ b/drivers/infiniband/hw/nes/nes.h
37365@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37366 extern unsigned int wqm_quanta;
37367 extern struct list_head nes_adapter_list;
37368
37369-extern atomic_t cm_connects;
37370-extern atomic_t cm_accepts;
37371-extern atomic_t cm_disconnects;
37372-extern atomic_t cm_closes;
37373-extern atomic_t cm_connecteds;
37374-extern atomic_t cm_connect_reqs;
37375-extern atomic_t cm_rejects;
37376-extern atomic_t mod_qp_timouts;
37377-extern atomic_t qps_created;
37378-extern atomic_t qps_destroyed;
37379-extern atomic_t sw_qps_destroyed;
37380+extern atomic_unchecked_t cm_connects;
37381+extern atomic_unchecked_t cm_accepts;
37382+extern atomic_unchecked_t cm_disconnects;
37383+extern atomic_unchecked_t cm_closes;
37384+extern atomic_unchecked_t cm_connecteds;
37385+extern atomic_unchecked_t cm_connect_reqs;
37386+extern atomic_unchecked_t cm_rejects;
37387+extern atomic_unchecked_t mod_qp_timouts;
37388+extern atomic_unchecked_t qps_created;
37389+extern atomic_unchecked_t qps_destroyed;
37390+extern atomic_unchecked_t sw_qps_destroyed;
37391 extern u32 mh_detected;
37392 extern u32 mh_pauses_sent;
37393 extern u32 cm_packets_sent;
37394@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37395 extern u32 cm_packets_received;
37396 extern u32 cm_packets_dropped;
37397 extern u32 cm_packets_retrans;
37398-extern atomic_t cm_listens_created;
37399-extern atomic_t cm_listens_destroyed;
37400+extern atomic_unchecked_t cm_listens_created;
37401+extern atomic_unchecked_t cm_listens_destroyed;
37402 extern u32 cm_backlog_drops;
37403-extern atomic_t cm_loopbacks;
37404-extern atomic_t cm_nodes_created;
37405-extern atomic_t cm_nodes_destroyed;
37406-extern atomic_t cm_accel_dropped_pkts;
37407-extern atomic_t cm_resets_recvd;
37408-extern atomic_t pau_qps_created;
37409-extern atomic_t pau_qps_destroyed;
37410+extern atomic_unchecked_t cm_loopbacks;
37411+extern atomic_unchecked_t cm_nodes_created;
37412+extern atomic_unchecked_t cm_nodes_destroyed;
37413+extern atomic_unchecked_t cm_accel_dropped_pkts;
37414+extern atomic_unchecked_t cm_resets_recvd;
37415+extern atomic_unchecked_t pau_qps_created;
37416+extern atomic_unchecked_t pau_qps_destroyed;
37417
37418 extern u32 int_mod_timer_init;
37419 extern u32 int_mod_cq_depth_256;
37420diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37421index 24b9f1a..00fd004 100644
37422--- a/drivers/infiniband/hw/nes/nes_cm.c
37423+++ b/drivers/infiniband/hw/nes/nes_cm.c
37424@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37425 u32 cm_packets_retrans;
37426 u32 cm_packets_created;
37427 u32 cm_packets_received;
37428-atomic_t cm_listens_created;
37429-atomic_t cm_listens_destroyed;
37430+atomic_unchecked_t cm_listens_created;
37431+atomic_unchecked_t cm_listens_destroyed;
37432 u32 cm_backlog_drops;
37433-atomic_t cm_loopbacks;
37434-atomic_t cm_nodes_created;
37435-atomic_t cm_nodes_destroyed;
37436-atomic_t cm_accel_dropped_pkts;
37437-atomic_t cm_resets_recvd;
37438+atomic_unchecked_t cm_loopbacks;
37439+atomic_unchecked_t cm_nodes_created;
37440+atomic_unchecked_t cm_nodes_destroyed;
37441+atomic_unchecked_t cm_accel_dropped_pkts;
37442+atomic_unchecked_t cm_resets_recvd;
37443
37444 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37445 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37446@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37447
37448 static struct nes_cm_core *g_cm_core;
37449
37450-atomic_t cm_connects;
37451-atomic_t cm_accepts;
37452-atomic_t cm_disconnects;
37453-atomic_t cm_closes;
37454-atomic_t cm_connecteds;
37455-atomic_t cm_connect_reqs;
37456-atomic_t cm_rejects;
37457+atomic_unchecked_t cm_connects;
37458+atomic_unchecked_t cm_accepts;
37459+atomic_unchecked_t cm_disconnects;
37460+atomic_unchecked_t cm_closes;
37461+atomic_unchecked_t cm_connecteds;
37462+atomic_unchecked_t cm_connect_reqs;
37463+atomic_unchecked_t cm_rejects;
37464
37465 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37466 {
37467@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37468 kfree(listener);
37469 listener = NULL;
37470 ret = 0;
37471- atomic_inc(&cm_listens_destroyed);
37472+ atomic_inc_unchecked(&cm_listens_destroyed);
37473 } else {
37474 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37475 }
37476@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37477 cm_node->rem_mac);
37478
37479 add_hte_node(cm_core, cm_node);
37480- atomic_inc(&cm_nodes_created);
37481+ atomic_inc_unchecked(&cm_nodes_created);
37482
37483 return cm_node;
37484 }
37485@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37486 }
37487
37488 atomic_dec(&cm_core->node_cnt);
37489- atomic_inc(&cm_nodes_destroyed);
37490+ atomic_inc_unchecked(&cm_nodes_destroyed);
37491 nesqp = cm_node->nesqp;
37492 if (nesqp) {
37493 nesqp->cm_node = NULL;
37494@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37495
37496 static void drop_packet(struct sk_buff *skb)
37497 {
37498- atomic_inc(&cm_accel_dropped_pkts);
37499+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37500 dev_kfree_skb_any(skb);
37501 }
37502
37503@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37504 {
37505
37506 int reset = 0; /* whether to send reset in case of err.. */
37507- atomic_inc(&cm_resets_recvd);
37508+ atomic_inc_unchecked(&cm_resets_recvd);
37509 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37510 " refcnt=%d\n", cm_node, cm_node->state,
37511 atomic_read(&cm_node->ref_count));
37512@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37513 rem_ref_cm_node(cm_node->cm_core, cm_node);
37514 return NULL;
37515 }
37516- atomic_inc(&cm_loopbacks);
37517+ atomic_inc_unchecked(&cm_loopbacks);
37518 loopbackremotenode->loopbackpartner = cm_node;
37519 loopbackremotenode->tcp_cntxt.rcv_wscale =
37520 NES_CM_DEFAULT_RCV_WND_SCALE;
37521@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37522 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37523 else {
37524 rem_ref_cm_node(cm_core, cm_node);
37525- atomic_inc(&cm_accel_dropped_pkts);
37526+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37527 dev_kfree_skb_any(skb);
37528 }
37529 break;
37530@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37531
37532 if ((cm_id) && (cm_id->event_handler)) {
37533 if (issue_disconn) {
37534- atomic_inc(&cm_disconnects);
37535+ atomic_inc_unchecked(&cm_disconnects);
37536 cm_event.event = IW_CM_EVENT_DISCONNECT;
37537 cm_event.status = disconn_status;
37538 cm_event.local_addr = cm_id->local_addr;
37539@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37540 }
37541
37542 if (issue_close) {
37543- atomic_inc(&cm_closes);
37544+ atomic_inc_unchecked(&cm_closes);
37545 nes_disconnect(nesqp, 1);
37546
37547 cm_id->provider_data = nesqp;
37548@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37549
37550 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37551 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37552- atomic_inc(&cm_accepts);
37553+ atomic_inc_unchecked(&cm_accepts);
37554
37555 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37556 netdev_refcnt_read(nesvnic->netdev));
37557@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37558 struct nes_cm_core *cm_core;
37559 u8 *start_buff;
37560
37561- atomic_inc(&cm_rejects);
37562+ atomic_inc_unchecked(&cm_rejects);
37563 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37564 loopback = cm_node->loopbackpartner;
37565 cm_core = cm_node->cm_core;
37566@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37567 ntohl(cm_id->local_addr.sin_addr.s_addr),
37568 ntohs(cm_id->local_addr.sin_port));
37569
37570- atomic_inc(&cm_connects);
37571+ atomic_inc_unchecked(&cm_connects);
37572 nesqp->active_conn = 1;
37573
37574 /* cache the cm_id in the qp */
37575@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37576 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37577 return err;
37578 }
37579- atomic_inc(&cm_listens_created);
37580+ atomic_inc_unchecked(&cm_listens_created);
37581 }
37582
37583 cm_id->add_ref(cm_id);
37584@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37585
37586 if (nesqp->destroyed)
37587 return;
37588- atomic_inc(&cm_connecteds);
37589+ atomic_inc_unchecked(&cm_connecteds);
37590 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37591 " local port 0x%04X. jiffies = %lu.\n",
37592 nesqp->hwqp.qp_id,
37593@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37594
37595 cm_id->add_ref(cm_id);
37596 ret = cm_id->event_handler(cm_id, &cm_event);
37597- atomic_inc(&cm_closes);
37598+ atomic_inc_unchecked(&cm_closes);
37599 cm_event.event = IW_CM_EVENT_CLOSE;
37600 cm_event.status = 0;
37601 cm_event.provider_data = cm_id->provider_data;
37602@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37603 return;
37604 cm_id = cm_node->cm_id;
37605
37606- atomic_inc(&cm_connect_reqs);
37607+ atomic_inc_unchecked(&cm_connect_reqs);
37608 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37609 cm_node, cm_id, jiffies);
37610
37611@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37612 return;
37613 cm_id = cm_node->cm_id;
37614
37615- atomic_inc(&cm_connect_reqs);
37616+ atomic_inc_unchecked(&cm_connect_reqs);
37617 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37618 cm_node, cm_id, jiffies);
37619
37620diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37621index 4166452..fc952c3 100644
37622--- a/drivers/infiniband/hw/nes/nes_mgt.c
37623+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37624@@ -40,8 +40,8 @@
37625 #include "nes.h"
37626 #include "nes_mgt.h"
37627
37628-atomic_t pau_qps_created;
37629-atomic_t pau_qps_destroyed;
37630+atomic_unchecked_t pau_qps_created;
37631+atomic_unchecked_t pau_qps_destroyed;
37632
37633 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37634 {
37635@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37636 {
37637 struct sk_buff *skb;
37638 unsigned long flags;
37639- atomic_inc(&pau_qps_destroyed);
37640+ atomic_inc_unchecked(&pau_qps_destroyed);
37641
37642 /* Free packets that have not yet been forwarded */
37643 /* Lock is acquired by skb_dequeue when removing the skb */
37644@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37645 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37646 skb_queue_head_init(&nesqp->pau_list);
37647 spin_lock_init(&nesqp->pau_lock);
37648- atomic_inc(&pau_qps_created);
37649+ atomic_inc_unchecked(&pau_qps_created);
37650 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37651 }
37652
37653diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37654index 49eb511..a774366 100644
37655--- a/drivers/infiniband/hw/nes/nes_nic.c
37656+++ b/drivers/infiniband/hw/nes/nes_nic.c
37657@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37658 target_stat_values[++index] = mh_detected;
37659 target_stat_values[++index] = mh_pauses_sent;
37660 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37661- target_stat_values[++index] = atomic_read(&cm_connects);
37662- target_stat_values[++index] = atomic_read(&cm_accepts);
37663- target_stat_values[++index] = atomic_read(&cm_disconnects);
37664- target_stat_values[++index] = atomic_read(&cm_connecteds);
37665- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37666- target_stat_values[++index] = atomic_read(&cm_rejects);
37667- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37668- target_stat_values[++index] = atomic_read(&qps_created);
37669- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37670- target_stat_values[++index] = atomic_read(&qps_destroyed);
37671- target_stat_values[++index] = atomic_read(&cm_closes);
37672+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37673+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37674+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37675+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37676+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37677+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37678+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37679+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37680+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37681+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37682+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37683 target_stat_values[++index] = cm_packets_sent;
37684 target_stat_values[++index] = cm_packets_bounced;
37685 target_stat_values[++index] = cm_packets_created;
37686 target_stat_values[++index] = cm_packets_received;
37687 target_stat_values[++index] = cm_packets_dropped;
37688 target_stat_values[++index] = cm_packets_retrans;
37689- target_stat_values[++index] = atomic_read(&cm_listens_created);
37690- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37691+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37692+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37693 target_stat_values[++index] = cm_backlog_drops;
37694- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37695- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37696- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37697- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37698- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37699+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37700+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37701+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37702+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37703+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37704 target_stat_values[++index] = nesadapter->free_4kpbl;
37705 target_stat_values[++index] = nesadapter->free_256pbl;
37706 target_stat_values[++index] = int_mod_timer_init;
37707 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37708 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37709 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37710- target_stat_values[++index] = atomic_read(&pau_qps_created);
37711- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37712+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37713+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37714 }
37715
37716 /**
37717diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37718index 8f67fe2..8960859 100644
37719--- a/drivers/infiniband/hw/nes/nes_verbs.c
37720+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37721@@ -46,9 +46,9 @@
37722
37723 #include <rdma/ib_umem.h>
37724
37725-atomic_t mod_qp_timouts;
37726-atomic_t qps_created;
37727-atomic_t sw_qps_destroyed;
37728+atomic_unchecked_t mod_qp_timouts;
37729+atomic_unchecked_t qps_created;
37730+atomic_unchecked_t sw_qps_destroyed;
37731
37732 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37733
37734@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37735 if (init_attr->create_flags)
37736 return ERR_PTR(-EINVAL);
37737
37738- atomic_inc(&qps_created);
37739+ atomic_inc_unchecked(&qps_created);
37740 switch (init_attr->qp_type) {
37741 case IB_QPT_RC:
37742 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37743@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37744 struct iw_cm_event cm_event;
37745 int ret = 0;
37746
37747- atomic_inc(&sw_qps_destroyed);
37748+ atomic_inc_unchecked(&sw_qps_destroyed);
37749 nesqp->destroyed = 1;
37750
37751 /* Blow away the connection if it exists. */
37752diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37753index 4d11575..3e890e5 100644
37754--- a/drivers/infiniband/hw/qib/qib.h
37755+++ b/drivers/infiniband/hw/qib/qib.h
37756@@ -51,6 +51,7 @@
37757 #include <linux/completion.h>
37758 #include <linux/kref.h>
37759 #include <linux/sched.h>
37760+#include <linux/slab.h>
37761
37762 #include "qib_common.h"
37763 #include "qib_verbs.h"
37764diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37765index da739d9..da1c7f4 100644
37766--- a/drivers/input/gameport/gameport.c
37767+++ b/drivers/input/gameport/gameport.c
37768@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37769 */
37770 static void gameport_init_port(struct gameport *gameport)
37771 {
37772- static atomic_t gameport_no = ATOMIC_INIT(0);
37773+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37774
37775 __module_get(THIS_MODULE);
37776
37777 mutex_init(&gameport->drv_mutex);
37778 device_initialize(&gameport->dev);
37779 dev_set_name(&gameport->dev, "gameport%lu",
37780- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37781+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37782 gameport->dev.bus = &gameport_bus;
37783 gameport->dev.release = gameport_release_port;
37784 if (gameport->parent)
37785diff --git a/drivers/input/input.c b/drivers/input/input.c
37786index c044699..174d71a 100644
37787--- a/drivers/input/input.c
37788+++ b/drivers/input/input.c
37789@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37790 */
37791 int input_register_device(struct input_dev *dev)
37792 {
37793- static atomic_t input_no = ATOMIC_INIT(0);
37794+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37795 struct input_devres *devres = NULL;
37796 struct input_handler *handler;
37797 unsigned int packet_size;
37798@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37799 dev->setkeycode = input_default_setkeycode;
37800
37801 dev_set_name(&dev->dev, "input%ld",
37802- (unsigned long) atomic_inc_return(&input_no) - 1);
37803+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37804
37805 error = device_add(&dev->dev);
37806 if (error)
37807diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37808index 04c69af..5f92d00 100644
37809--- a/drivers/input/joystick/sidewinder.c
37810+++ b/drivers/input/joystick/sidewinder.c
37811@@ -30,6 +30,7 @@
37812 #include <linux/kernel.h>
37813 #include <linux/module.h>
37814 #include <linux/slab.h>
37815+#include <linux/sched.h>
37816 #include <linux/init.h>
37817 #include <linux/input.h>
37818 #include <linux/gameport.h>
37819diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37820index fa061d4..4a6957c 100644
37821--- a/drivers/input/joystick/xpad.c
37822+++ b/drivers/input/joystick/xpad.c
37823@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37824
37825 static int xpad_led_probe(struct usb_xpad *xpad)
37826 {
37827- static atomic_t led_seq = ATOMIC_INIT(0);
37828+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37829 long led_no;
37830 struct xpad_led *led;
37831 struct led_classdev *led_cdev;
37832@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37833 if (!led)
37834 return -ENOMEM;
37835
37836- led_no = (long)atomic_inc_return(&led_seq) - 1;
37837+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37838
37839 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37840 led->xpad = xpad;
37841diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37842index 2f0b39d..7370f13 100644
37843--- a/drivers/input/mouse/psmouse.h
37844+++ b/drivers/input/mouse/psmouse.h
37845@@ -116,7 +116,7 @@ struct psmouse_attribute {
37846 ssize_t (*set)(struct psmouse *psmouse, void *data,
37847 const char *buf, size_t count);
37848 bool protect;
37849-};
37850+} __do_const;
37851 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37852
37853 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37854diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37855index 4c842c3..590b0bf 100644
37856--- a/drivers/input/mousedev.c
37857+++ b/drivers/input/mousedev.c
37858@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37859
37860 spin_unlock_irq(&client->packet_lock);
37861
37862- if (copy_to_user(buffer, data, count))
37863+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37864 return -EFAULT;
37865
37866 return count;
37867diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37868index 25fc597..558bf3b3 100644
37869--- a/drivers/input/serio/serio.c
37870+++ b/drivers/input/serio/serio.c
37871@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37872 */
37873 static void serio_init_port(struct serio *serio)
37874 {
37875- static atomic_t serio_no = ATOMIC_INIT(0);
37876+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37877
37878 __module_get(THIS_MODULE);
37879
37880@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37881 mutex_init(&serio->drv_mutex);
37882 device_initialize(&serio->dev);
37883 dev_set_name(&serio->dev, "serio%ld",
37884- (long)atomic_inc_return(&serio_no) - 1);
37885+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37886 serio->dev.bus = &serio_bus;
37887 serio->dev.release = serio_release_port;
37888 serio->dev.groups = serio_device_attr_groups;
37889diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37890index d8f98b1..f62a640 100644
37891--- a/drivers/iommu/iommu.c
37892+++ b/drivers/iommu/iommu.c
37893@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
37894 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37895 {
37896 bus_register_notifier(bus, &iommu_bus_nb);
37897- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37898+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37899 }
37900
37901 /**
37902diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
37903index dcfea4e..f4226b2 100644
37904--- a/drivers/iommu/irq_remapping.c
37905+++ b/drivers/iommu/irq_remapping.c
37906@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
37907 void panic_if_irq_remap(const char *msg)
37908 {
37909 if (irq_remapping_enabled)
37910- panic(msg);
37911+ panic("%s", msg);
37912 }
37913
37914 static void ir_ack_apic_edge(struct irq_data *data)
37915@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
37916
37917 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
37918 {
37919- chip->irq_print_chip = ir_print_prefix;
37920- chip->irq_ack = ir_ack_apic_edge;
37921- chip->irq_eoi = ir_ack_apic_level;
37922- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37923+ pax_open_kernel();
37924+ *(void **)&chip->irq_print_chip = ir_print_prefix;
37925+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
37926+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
37927+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37928+ pax_close_kernel();
37929 }
37930
37931 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
37932diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
37933index 19ceaa6..3625818 100644
37934--- a/drivers/irqchip/irq-gic.c
37935+++ b/drivers/irqchip/irq-gic.c
37936@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
37937 * Supported arch specific GIC irq extension.
37938 * Default make them NULL.
37939 */
37940-struct irq_chip gic_arch_extn = {
37941+irq_chip_no_const gic_arch_extn = {
37942 .irq_eoi = NULL,
37943 .irq_mask = NULL,
37944 .irq_unmask = NULL,
37945@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
37946 chained_irq_exit(chip, desc);
37947 }
37948
37949-static struct irq_chip gic_chip = {
37950+static irq_chip_no_const gic_chip __read_only = {
37951 .name = "GIC",
37952 .irq_mask = gic_mask_irq,
37953 .irq_unmask = gic_unmask_irq,
37954diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37955index ac6f72b..81150f2 100644
37956--- a/drivers/isdn/capi/capi.c
37957+++ b/drivers/isdn/capi/capi.c
37958@@ -81,8 +81,8 @@ struct capiminor {
37959
37960 struct capi20_appl *ap;
37961 u32 ncci;
37962- atomic_t datahandle;
37963- atomic_t msgid;
37964+ atomic_unchecked_t datahandle;
37965+ atomic_unchecked_t msgid;
37966
37967 struct tty_port port;
37968 int ttyinstop;
37969@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
37970 capimsg_setu16(s, 2, mp->ap->applid);
37971 capimsg_setu8 (s, 4, CAPI_DATA_B3);
37972 capimsg_setu8 (s, 5, CAPI_RESP);
37973- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
37974+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
37975 capimsg_setu32(s, 8, mp->ncci);
37976 capimsg_setu16(s, 12, datahandle);
37977 }
37978@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
37979 mp->outbytes -= len;
37980 spin_unlock_bh(&mp->outlock);
37981
37982- datahandle = atomic_inc_return(&mp->datahandle);
37983+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
37984 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
37985 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37986 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
37987 capimsg_setu16(skb->data, 2, mp->ap->applid);
37988 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
37989 capimsg_setu8 (skb->data, 5, CAPI_REQ);
37990- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
37991+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
37992 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
37993 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
37994 capimsg_setu16(skb->data, 16, len); /* Data length */
37995diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
37996index 600c79b..3752bab 100644
37997--- a/drivers/isdn/gigaset/interface.c
37998+++ b/drivers/isdn/gigaset/interface.c
37999@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
38000 }
38001 tty->driver_data = cs;
38002
38003- ++cs->port.count;
38004+ atomic_inc(&cs->port.count);
38005
38006- if (cs->port.count == 1) {
38007+ if (atomic_read(&cs->port.count) == 1) {
38008 tty_port_tty_set(&cs->port, tty);
38009 cs->port.low_latency = 1;
38010 }
38011@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
38012
38013 if (!cs->connected)
38014 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
38015- else if (!cs->port.count)
38016+ else if (!atomic_read(&cs->port.count))
38017 dev_warn(cs->dev, "%s: device not opened\n", __func__);
38018- else if (!--cs->port.count)
38019+ else if (!atomic_dec_return(&cs->port.count))
38020 tty_port_tty_set(&cs->port, NULL);
38021
38022 mutex_unlock(&cs->mutex);
38023diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
38024index 4d9b195..455075c 100644
38025--- a/drivers/isdn/hardware/avm/b1.c
38026+++ b/drivers/isdn/hardware/avm/b1.c
38027@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
38028 }
38029 if (left) {
38030 if (t4file->user) {
38031- if (copy_from_user(buf, dp, left))
38032+ if (left > sizeof buf || copy_from_user(buf, dp, left))
38033 return -EFAULT;
38034 } else {
38035 memcpy(buf, dp, left);
38036@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
38037 }
38038 if (left) {
38039 if (config->user) {
38040- if (copy_from_user(buf, dp, left))
38041+ if (left > sizeof buf || copy_from_user(buf, dp, left))
38042 return -EFAULT;
38043 } else {
38044 memcpy(buf, dp, left);
38045diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
38046index 3c5f249..5fac4d0 100644
38047--- a/drivers/isdn/i4l/isdn_tty.c
38048+++ b/drivers/isdn/i4l/isdn_tty.c
38049@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
38050
38051 #ifdef ISDN_DEBUG_MODEM_OPEN
38052 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
38053- port->count);
38054+ atomic_read(&port->count));
38055 #endif
38056- port->count++;
38057+ atomic_inc(&port->count);
38058 port->tty = tty;
38059 /*
38060 * Start up serial port
38061@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
38062 #endif
38063 return;
38064 }
38065- if ((tty->count == 1) && (port->count != 1)) {
38066+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
38067 /*
38068 * Uh, oh. tty->count is 1, which means that the tty
38069 * structure will be freed. Info->count should always
38070@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
38071 * serial port won't be shutdown.
38072 */
38073 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
38074- "info->count is %d\n", port->count);
38075- port->count = 1;
38076+ "info->count is %d\n", atomic_read(&port->count));
38077+ atomic_set(&port->count, 1);
38078 }
38079- if (--port->count < 0) {
38080+ if (atomic_dec_return(&port->count) < 0) {
38081 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
38082- info->line, port->count);
38083- port->count = 0;
38084+ info->line, atomic_read(&port->count));
38085+ atomic_set(&port->count, 0);
38086 }
38087- if (port->count) {
38088+ if (atomic_read(&port->count)) {
38089 #ifdef ISDN_DEBUG_MODEM_OPEN
38090 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
38091 #endif
38092@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
38093 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
38094 return;
38095 isdn_tty_shutdown(info);
38096- port->count = 0;
38097+ atomic_set(&port->count, 0);
38098 port->flags &= ~ASYNC_NORMAL_ACTIVE;
38099 port->tty = NULL;
38100 wake_up_interruptible(&port->open_wait);
38101@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
38102 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
38103 modem_info *info = &dev->mdm.info[i];
38104
38105- if (info->port.count == 0)
38106+ if (atomic_read(&info->port.count) == 0)
38107 continue;
38108 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
38109 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
38110diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
38111index e74df7c..03a03ba 100644
38112--- a/drivers/isdn/icn/icn.c
38113+++ b/drivers/isdn/icn/icn.c
38114@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
38115 if (count > len)
38116 count = len;
38117 if (user) {
38118- if (copy_from_user(msg, buf, count))
38119+ if (count > sizeof msg || copy_from_user(msg, buf, count))
38120 return -EFAULT;
38121 } else
38122 memcpy(msg, buf, count);
38123diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
38124index 6a8405d..0bd1c7e 100644
38125--- a/drivers/leds/leds-clevo-mail.c
38126+++ b/drivers/leds/leds-clevo-mail.c
38127@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
38128 * detected as working, but in reality it is not) as low as
38129 * possible.
38130 */
38131-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
38132+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
38133 {
38134 .callback = clevo_mail_led_dmi_callback,
38135 .ident = "Clevo D410J",
38136diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
38137index 64e204e..c6bf189 100644
38138--- a/drivers/leds/leds-ss4200.c
38139+++ b/drivers/leds/leds-ss4200.c
38140@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
38141 * detected as working, but in reality it is not) as low as
38142 * possible.
38143 */
38144-static struct dmi_system_id __initdata nas_led_whitelist[] = {
38145+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
38146 {
38147 .callback = ss4200_led_dmi_callback,
38148 .ident = "Intel SS4200-E",
38149diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
38150index 0bf1e4e..b4bf44e 100644
38151--- a/drivers/lguest/core.c
38152+++ b/drivers/lguest/core.c
38153@@ -97,9 +97,17 @@ static __init int map_switcher(void)
38154 * The end address needs +1 because __get_vm_area allocates an
38155 * extra guard page, so we need space for that.
38156 */
38157+
38158+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
38159+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38160+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
38161+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38162+#else
38163 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38164 VM_ALLOC, switcher_addr, switcher_addr
38165 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38166+#endif
38167+
38168 if (!switcher_vma) {
38169 err = -ENOMEM;
38170 printk("lguest: could not map switcher pages high\n");
38171@@ -124,7 +132,7 @@ static __init int map_switcher(void)
38172 * Now the Switcher is mapped at the right address, we can't fail!
38173 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
38174 */
38175- memcpy(switcher_vma->addr, start_switcher_text,
38176+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
38177 end_switcher_text - start_switcher_text);
38178
38179 printk(KERN_INFO "lguest: mapped switcher at %p\n",
38180diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
38181index 5b9ac32..2ef4f26 100644
38182--- a/drivers/lguest/page_tables.c
38183+++ b/drivers/lguest/page_tables.c
38184@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
38185 /*:*/
38186
38187 #ifdef CONFIG_X86_PAE
38188-static void release_pmd(pmd_t *spmd)
38189+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
38190 {
38191 /* If the entry's not present, there's nothing to release. */
38192 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
38193diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
38194index f0a3347..f6608b2 100644
38195--- a/drivers/lguest/x86/core.c
38196+++ b/drivers/lguest/x86/core.c
38197@@ -59,7 +59,7 @@ static struct {
38198 /* Offset from where switcher.S was compiled to where we've copied it */
38199 static unsigned long switcher_offset(void)
38200 {
38201- return switcher_addr - (unsigned long)start_switcher_text;
38202+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
38203 }
38204
38205 /* This cpu's struct lguest_pages (after the Switcher text page) */
38206@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
38207 * These copies are pretty cheap, so we do them unconditionally: */
38208 /* Save the current Host top-level page directory.
38209 */
38210+
38211+#ifdef CONFIG_PAX_PER_CPU_PGD
38212+ pages->state.host_cr3 = read_cr3();
38213+#else
38214 pages->state.host_cr3 = __pa(current->mm->pgd);
38215+#endif
38216+
38217 /*
38218 * Set up the Guest's page tables to see this CPU's pages (and no
38219 * other CPU's pages).
38220@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
38221 * compiled-in switcher code and the high-mapped copy we just made.
38222 */
38223 for (i = 0; i < IDT_ENTRIES; i++)
38224- default_idt_entries[i] += switcher_offset();
38225+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
38226
38227 /*
38228 * Set up the Switcher's per-cpu areas.
38229@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
38230 * it will be undisturbed when we switch. To change %cs and jump we
38231 * need this structure to feed to Intel's "lcall" instruction.
38232 */
38233- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
38234+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
38235 lguest_entry.segment = LGUEST_CS;
38236
38237 /*
38238diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
38239index 40634b0..4f5855e 100644
38240--- a/drivers/lguest/x86/switcher_32.S
38241+++ b/drivers/lguest/x86/switcher_32.S
38242@@ -87,6 +87,7 @@
38243 #include <asm/page.h>
38244 #include <asm/segment.h>
38245 #include <asm/lguest.h>
38246+#include <asm/processor-flags.h>
38247
38248 // We mark the start of the code to copy
38249 // It's placed in .text tho it's never run here
38250@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
38251 // Changes type when we load it: damn Intel!
38252 // For after we switch over our page tables
38253 // That entry will be read-only: we'd crash.
38254+
38255+#ifdef CONFIG_PAX_KERNEXEC
38256+ mov %cr0, %edx
38257+ xor $X86_CR0_WP, %edx
38258+ mov %edx, %cr0
38259+#endif
38260+
38261 movl $(GDT_ENTRY_TSS*8), %edx
38262 ltr %dx
38263
38264@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
38265 // Let's clear it again for our return.
38266 // The GDT descriptor of the Host
38267 // Points to the table after two "size" bytes
38268- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
38269+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
38270 // Clear "used" from type field (byte 5, bit 2)
38271- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
38272+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
38273+
38274+#ifdef CONFIG_PAX_KERNEXEC
38275+ mov %cr0, %eax
38276+ xor $X86_CR0_WP, %eax
38277+ mov %eax, %cr0
38278+#endif
38279
38280 // Once our page table's switched, the Guest is live!
38281 // The Host fades as we run this final step.
38282@@ -295,13 +309,12 @@ deliver_to_host:
38283 // I consulted gcc, and it gave
38284 // These instructions, which I gladly credit:
38285 leal (%edx,%ebx,8), %eax
38286- movzwl (%eax),%edx
38287- movl 4(%eax), %eax
38288- xorw %ax, %ax
38289- orl %eax, %edx
38290+ movl 4(%eax), %edx
38291+ movw (%eax), %dx
38292 // Now the address of the handler's in %edx
38293 // We call it now: its "iret" drops us home.
38294- jmp *%edx
38295+ ljmp $__KERNEL_CS, $1f
38296+1: jmp *%edx
38297
38298 // Every interrupt can come to us here
38299 // But we must truly tell each apart.
38300diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
38301index 0003992..854bbce 100644
38302--- a/drivers/md/bcache/closure.h
38303+++ b/drivers/md/bcache/closure.h
38304@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
38305 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
38306 struct workqueue_struct *wq)
38307 {
38308- BUG_ON(object_is_on_stack(cl));
38309+ BUG_ON(object_starts_on_stack(cl));
38310 closure_set_ip(cl);
38311 cl->fn = fn;
38312 cl->wq = wq;
38313diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
38314index 5a2c754..0fa55db 100644
38315--- a/drivers/md/bitmap.c
38316+++ b/drivers/md/bitmap.c
38317@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
38318 chunk_kb ? "KB" : "B");
38319 if (bitmap->storage.file) {
38320 seq_printf(seq, ", file: ");
38321- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
38322+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
38323 }
38324
38325 seq_printf(seq, "\n");
38326diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
38327index aa04f02..2a1309e 100644
38328--- a/drivers/md/dm-ioctl.c
38329+++ b/drivers/md/dm-ioctl.c
38330@@ -1694,7 +1694,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
38331 cmd == DM_LIST_VERSIONS_CMD)
38332 return 0;
38333
38334- if ((cmd == DM_DEV_CREATE_CMD)) {
38335+ if (cmd == DM_DEV_CREATE_CMD) {
38336 if (!*param->name) {
38337 DMWARN("name not supplied when creating device");
38338 return -EINVAL;
38339diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
38340index 699b5be..eac0a15 100644
38341--- a/drivers/md/dm-raid1.c
38342+++ b/drivers/md/dm-raid1.c
38343@@ -40,7 +40,7 @@ enum dm_raid1_error {
38344
38345 struct mirror {
38346 struct mirror_set *ms;
38347- atomic_t error_count;
38348+ atomic_unchecked_t error_count;
38349 unsigned long error_type;
38350 struct dm_dev *dev;
38351 sector_t offset;
38352@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
38353 struct mirror *m;
38354
38355 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
38356- if (!atomic_read(&m->error_count))
38357+ if (!atomic_read_unchecked(&m->error_count))
38358 return m;
38359
38360 return NULL;
38361@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
38362 * simple way to tell if a device has encountered
38363 * errors.
38364 */
38365- atomic_inc(&m->error_count);
38366+ atomic_inc_unchecked(&m->error_count);
38367
38368 if (test_and_set_bit(error_type, &m->error_type))
38369 return;
38370@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
38371 struct mirror *m = get_default_mirror(ms);
38372
38373 do {
38374- if (likely(!atomic_read(&m->error_count)))
38375+ if (likely(!atomic_read_unchecked(&m->error_count)))
38376 return m;
38377
38378 if (m-- == ms->mirror)
38379@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
38380 {
38381 struct mirror *default_mirror = get_default_mirror(m->ms);
38382
38383- return !atomic_read(&default_mirror->error_count);
38384+ return !atomic_read_unchecked(&default_mirror->error_count);
38385 }
38386
38387 static int mirror_available(struct mirror_set *ms, struct bio *bio)
38388@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
38389 */
38390 if (likely(region_in_sync(ms, region, 1)))
38391 m = choose_mirror(ms, bio->bi_sector);
38392- else if (m && atomic_read(&m->error_count))
38393+ else if (m && atomic_read_unchecked(&m->error_count))
38394 m = NULL;
38395
38396 if (likely(m))
38397@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38398 }
38399
38400 ms->mirror[mirror].ms = ms;
38401- atomic_set(&(ms->mirror[mirror].error_count), 0);
38402+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38403 ms->mirror[mirror].error_type = 0;
38404 ms->mirror[mirror].offset = offset;
38405
38406@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
38407 */
38408 static char device_status_char(struct mirror *m)
38409 {
38410- if (!atomic_read(&(m->error_count)))
38411+ if (!atomic_read_unchecked(&(m->error_count)))
38412 return 'A';
38413
38414 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38415diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38416index d907ca6..cfb8384 100644
38417--- a/drivers/md/dm-stripe.c
38418+++ b/drivers/md/dm-stripe.c
38419@@ -20,7 +20,7 @@ struct stripe {
38420 struct dm_dev *dev;
38421 sector_t physical_start;
38422
38423- atomic_t error_count;
38424+ atomic_unchecked_t error_count;
38425 };
38426
38427 struct stripe_c {
38428@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38429 kfree(sc);
38430 return r;
38431 }
38432- atomic_set(&(sc->stripe[i].error_count), 0);
38433+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38434 }
38435
38436 ti->private = sc;
38437@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38438 DMEMIT("%d ", sc->stripes);
38439 for (i = 0; i < sc->stripes; i++) {
38440 DMEMIT("%s ", sc->stripe[i].dev->name);
38441- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38442+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38443 'D' : 'A';
38444 }
38445 buffer[i] = '\0';
38446@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38447 */
38448 for (i = 0; i < sc->stripes; i++)
38449 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38450- atomic_inc(&(sc->stripe[i].error_count));
38451- if (atomic_read(&(sc->stripe[i].error_count)) <
38452+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38453+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38454 DM_IO_ERROR_THRESHOLD)
38455 schedule_work(&sc->trigger_event);
38456 }
38457diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38458index 1ff252a..ee384c1 100644
38459--- a/drivers/md/dm-table.c
38460+++ b/drivers/md/dm-table.c
38461@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38462 if (!dev_size)
38463 return 0;
38464
38465- if ((start >= dev_size) || (start + len > dev_size)) {
38466+ if ((start >= dev_size) || (len > dev_size - start)) {
38467 DMWARN("%s: %s too small for target: "
38468 "start=%llu, len=%llu, dev_size=%llu",
38469 dm_device_name(ti->table->md), bdevname(bdev, b),
38470diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38471index 60bce43..9b997d0 100644
38472--- a/drivers/md/dm-thin-metadata.c
38473+++ b/drivers/md/dm-thin-metadata.c
38474@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38475 {
38476 pmd->info.tm = pmd->tm;
38477 pmd->info.levels = 2;
38478- pmd->info.value_type.context = pmd->data_sm;
38479+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38480 pmd->info.value_type.size = sizeof(__le64);
38481 pmd->info.value_type.inc = data_block_inc;
38482 pmd->info.value_type.dec = data_block_dec;
38483@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38484
38485 pmd->bl_info.tm = pmd->tm;
38486 pmd->bl_info.levels = 1;
38487- pmd->bl_info.value_type.context = pmd->data_sm;
38488+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38489 pmd->bl_info.value_type.size = sizeof(__le64);
38490 pmd->bl_info.value_type.inc = data_block_inc;
38491 pmd->bl_info.value_type.dec = data_block_dec;
38492diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38493index d5370a9..8761bbc 100644
38494--- a/drivers/md/dm.c
38495+++ b/drivers/md/dm.c
38496@@ -169,9 +169,9 @@ struct mapped_device {
38497 /*
38498 * Event handling.
38499 */
38500- atomic_t event_nr;
38501+ atomic_unchecked_t event_nr;
38502 wait_queue_head_t eventq;
38503- atomic_t uevent_seq;
38504+ atomic_unchecked_t uevent_seq;
38505 struct list_head uevent_list;
38506 spinlock_t uevent_lock; /* Protect access to uevent_list */
38507
38508@@ -1877,8 +1877,8 @@ static struct mapped_device *alloc_dev(int minor)
38509 rwlock_init(&md->map_lock);
38510 atomic_set(&md->holders, 1);
38511 atomic_set(&md->open_count, 0);
38512- atomic_set(&md->event_nr, 0);
38513- atomic_set(&md->uevent_seq, 0);
38514+ atomic_set_unchecked(&md->event_nr, 0);
38515+ atomic_set_unchecked(&md->uevent_seq, 0);
38516 INIT_LIST_HEAD(&md->uevent_list);
38517 spin_lock_init(&md->uevent_lock);
38518
38519@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
38520
38521 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38522
38523- atomic_inc(&md->event_nr);
38524+ atomic_inc_unchecked(&md->event_nr);
38525 wake_up(&md->eventq);
38526 }
38527
38528@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38529
38530 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38531 {
38532- return atomic_add_return(1, &md->uevent_seq);
38533+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38534 }
38535
38536 uint32_t dm_get_event_nr(struct mapped_device *md)
38537 {
38538- return atomic_read(&md->event_nr);
38539+ return atomic_read_unchecked(&md->event_nr);
38540 }
38541
38542 int dm_wait_event(struct mapped_device *md, int event_nr)
38543 {
38544 return wait_event_interruptible(md->eventq,
38545- (event_nr != atomic_read(&md->event_nr)));
38546+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38547 }
38548
38549 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38550diff --git a/drivers/md/md.c b/drivers/md/md.c
38551index 9b82377..6b6922d 100644
38552--- a/drivers/md/md.c
38553+++ b/drivers/md/md.c
38554@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38555 * start build, activate spare
38556 */
38557 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38558-static atomic_t md_event_count;
38559+static atomic_unchecked_t md_event_count;
38560 void md_new_event(struct mddev *mddev)
38561 {
38562- atomic_inc(&md_event_count);
38563+ atomic_inc_unchecked(&md_event_count);
38564 wake_up(&md_event_waiters);
38565 }
38566 EXPORT_SYMBOL_GPL(md_new_event);
38567@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38568 */
38569 static void md_new_event_inintr(struct mddev *mddev)
38570 {
38571- atomic_inc(&md_event_count);
38572+ atomic_inc_unchecked(&md_event_count);
38573 wake_up(&md_event_waiters);
38574 }
38575
38576@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38577 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38578 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38579 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38580- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38581+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38582
38583 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38584 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38585@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38586 else
38587 sb->resync_offset = cpu_to_le64(0);
38588
38589- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38590+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38591
38592 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38593 sb->size = cpu_to_le64(mddev->dev_sectors);
38594@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38595 static ssize_t
38596 errors_show(struct md_rdev *rdev, char *page)
38597 {
38598- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38599+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38600 }
38601
38602 static ssize_t
38603@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38604 char *e;
38605 unsigned long n = simple_strtoul(buf, &e, 10);
38606 if (*buf && (*e == 0 || *e == '\n')) {
38607- atomic_set(&rdev->corrected_errors, n);
38608+ atomic_set_unchecked(&rdev->corrected_errors, n);
38609 return len;
38610 }
38611 return -EINVAL;
38612@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
38613 rdev->sb_loaded = 0;
38614 rdev->bb_page = NULL;
38615 atomic_set(&rdev->nr_pending, 0);
38616- atomic_set(&rdev->read_errors, 0);
38617- atomic_set(&rdev->corrected_errors, 0);
38618+ atomic_set_unchecked(&rdev->read_errors, 0);
38619+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38620
38621 INIT_LIST_HEAD(&rdev->same_set);
38622 init_waitqueue_head(&rdev->blocked_wait);
38623@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38624
38625 spin_unlock(&pers_lock);
38626 seq_printf(seq, "\n");
38627- seq->poll_event = atomic_read(&md_event_count);
38628+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38629 return 0;
38630 }
38631 if (v == (void*)2) {
38632@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38633 return error;
38634
38635 seq = file->private_data;
38636- seq->poll_event = atomic_read(&md_event_count);
38637+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38638 return error;
38639 }
38640
38641@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38642 /* always allow read */
38643 mask = POLLIN | POLLRDNORM;
38644
38645- if (seq->poll_event != atomic_read(&md_event_count))
38646+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38647 mask |= POLLERR | POLLPRI;
38648 return mask;
38649 }
38650@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38651 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38652 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38653 (int)part_stat_read(&disk->part0, sectors[1]) -
38654- atomic_read(&disk->sync_io);
38655+ atomic_read_unchecked(&disk->sync_io);
38656 /* sync IO will cause sync_io to increase before the disk_stats
38657 * as sync_io is counted when a request starts, and
38658 * disk_stats is counted when it completes.
38659diff --git a/drivers/md/md.h b/drivers/md/md.h
38660index 653f992b6..6af6c40 100644
38661--- a/drivers/md/md.h
38662+++ b/drivers/md/md.h
38663@@ -94,13 +94,13 @@ struct md_rdev {
38664 * only maintained for arrays that
38665 * support hot removal
38666 */
38667- atomic_t read_errors; /* number of consecutive read errors that
38668+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38669 * we have tried to ignore.
38670 */
38671 struct timespec last_read_error; /* monotonic time since our
38672 * last read error
38673 */
38674- atomic_t corrected_errors; /* number of corrected read errors,
38675+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38676 * for reporting to userspace and storing
38677 * in superblock.
38678 */
38679@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38680
38681 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38682 {
38683- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38684+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38685 }
38686
38687 struct md_personality
38688diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38689index 3e6d115..ffecdeb 100644
38690--- a/drivers/md/persistent-data/dm-space-map.h
38691+++ b/drivers/md/persistent-data/dm-space-map.h
38692@@ -71,6 +71,7 @@ struct dm_space_map {
38693 dm_sm_threshold_fn fn,
38694 void *context);
38695 };
38696+typedef struct dm_space_map __no_const dm_space_map_no_const;
38697
38698 /*----------------------------------------------------------------*/
38699
38700diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38701index 6e17f81..140f717 100644
38702--- a/drivers/md/raid1.c
38703+++ b/drivers/md/raid1.c
38704@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38705 if (r1_sync_page_io(rdev, sect, s,
38706 bio->bi_io_vec[idx].bv_page,
38707 READ) != 0)
38708- atomic_add(s, &rdev->corrected_errors);
38709+ atomic_add_unchecked(s, &rdev->corrected_errors);
38710 }
38711 sectors -= s;
38712 sect += s;
38713@@ -2042,7 +2042,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38714 test_bit(In_sync, &rdev->flags)) {
38715 if (r1_sync_page_io(rdev, sect, s,
38716 conf->tmppage, READ)) {
38717- atomic_add(s, &rdev->corrected_errors);
38718+ atomic_add_unchecked(s, &rdev->corrected_errors);
38719 printk(KERN_INFO
38720 "md/raid1:%s: read error corrected "
38721 "(%d sectors at %llu on %s)\n",
38722diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38723index 6ddae25..514caa9 100644
38724--- a/drivers/md/raid10.c
38725+++ b/drivers/md/raid10.c
38726@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
38727 /* The write handler will notice the lack of
38728 * R10BIO_Uptodate and record any errors etc
38729 */
38730- atomic_add(r10_bio->sectors,
38731+ atomic_add_unchecked(r10_bio->sectors,
38732 &conf->mirrors[d].rdev->corrected_errors);
38733
38734 /* for reconstruct, we always reschedule after a read.
38735@@ -2286,7 +2286,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38736 {
38737 struct timespec cur_time_mon;
38738 unsigned long hours_since_last;
38739- unsigned int read_errors = atomic_read(&rdev->read_errors);
38740+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38741
38742 ktime_get_ts(&cur_time_mon);
38743
38744@@ -2308,9 +2308,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38745 * overflowing the shift of read_errors by hours_since_last.
38746 */
38747 if (hours_since_last >= 8 * sizeof(read_errors))
38748- atomic_set(&rdev->read_errors, 0);
38749+ atomic_set_unchecked(&rdev->read_errors, 0);
38750 else
38751- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38752+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38753 }
38754
38755 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38756@@ -2364,8 +2364,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38757 return;
38758
38759 check_decay_read_errors(mddev, rdev);
38760- atomic_inc(&rdev->read_errors);
38761- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38762+ atomic_inc_unchecked(&rdev->read_errors);
38763+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38764 char b[BDEVNAME_SIZE];
38765 bdevname(rdev->bdev, b);
38766
38767@@ -2373,7 +2373,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38768 "md/raid10:%s: %s: Raid device exceeded "
38769 "read_error threshold [cur %d:max %d]\n",
38770 mdname(mddev), b,
38771- atomic_read(&rdev->read_errors), max_read_errors);
38772+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38773 printk(KERN_NOTICE
38774 "md/raid10:%s: %s: Failing raid device\n",
38775 mdname(mddev), b);
38776@@ -2528,7 +2528,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38777 sect +
38778 choose_data_offset(r10_bio, rdev)),
38779 bdevname(rdev->bdev, b));
38780- atomic_add(s, &rdev->corrected_errors);
38781+ atomic_add_unchecked(s, &rdev->corrected_errors);
38782 }
38783
38784 rdev_dec_pending(rdev, mddev);
38785diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38786index 05e4a10..48fbe37 100644
38787--- a/drivers/md/raid5.c
38788+++ b/drivers/md/raid5.c
38789@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38790 mdname(conf->mddev), STRIPE_SECTORS,
38791 (unsigned long long)s,
38792 bdevname(rdev->bdev, b));
38793- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38794+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38795 clear_bit(R5_ReadError, &sh->dev[i].flags);
38796 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38797 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38798 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38799
38800- if (atomic_read(&rdev->read_errors))
38801- atomic_set(&rdev->read_errors, 0);
38802+ if (atomic_read_unchecked(&rdev->read_errors))
38803+ atomic_set_unchecked(&rdev->read_errors, 0);
38804 } else {
38805 const char *bdn = bdevname(rdev->bdev, b);
38806 int retry = 0;
38807 int set_bad = 0;
38808
38809 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38810- atomic_inc(&rdev->read_errors);
38811+ atomic_inc_unchecked(&rdev->read_errors);
38812 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38813 printk_ratelimited(
38814 KERN_WARNING
38815@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38816 mdname(conf->mddev),
38817 (unsigned long long)s,
38818 bdn);
38819- } else if (atomic_read(&rdev->read_errors)
38820+ } else if (atomic_read_unchecked(&rdev->read_errors)
38821 > conf->max_nr_stripes)
38822 printk(KERN_WARNING
38823 "md/raid:%s: Too many read errors, failing device %s.\n",
38824diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38825index 401ef64..836e563 100644
38826--- a/drivers/media/dvb-core/dvbdev.c
38827+++ b/drivers/media/dvb-core/dvbdev.c
38828@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38829 const struct dvb_device *template, void *priv, int type)
38830 {
38831 struct dvb_device *dvbdev;
38832- struct file_operations *dvbdevfops;
38833+ file_operations_no_const *dvbdevfops;
38834 struct device *clsdev;
38835 int minor;
38836 int id;
38837diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38838index 9b6c3bb..baeb5c7 100644
38839--- a/drivers/media/dvb-frontends/dib3000.h
38840+++ b/drivers/media/dvb-frontends/dib3000.h
38841@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38842 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38843 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38844 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38845-};
38846+} __no_const;
38847
38848 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
38849 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38850diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38851index c7a9be1..683f6f8 100644
38852--- a/drivers/media/pci/cx88/cx88-video.c
38853+++ b/drivers/media/pci/cx88/cx88-video.c
38854@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38855
38856 /* ------------------------------------------------------------------ */
38857
38858-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38859-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38860-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38861+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38862+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38863+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38864
38865 module_param_array(video_nr, int, NULL, 0444);
38866 module_param_array(vbi_nr, int, NULL, 0444);
38867diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38868index d338b19..aae4f9e 100644
38869--- a/drivers/media/platform/omap/omap_vout.c
38870+++ b/drivers/media/platform/omap/omap_vout.c
38871@@ -63,7 +63,6 @@ enum omap_vout_channels {
38872 OMAP_VIDEO2,
38873 };
38874
38875-static struct videobuf_queue_ops video_vbq_ops;
38876 /* Variables configurable through module params*/
38877 static u32 video1_numbuffers = 3;
38878 static u32 video2_numbuffers = 3;
38879@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
38880 {
38881 struct videobuf_queue *q;
38882 struct omap_vout_device *vout = NULL;
38883+ static struct videobuf_queue_ops video_vbq_ops = {
38884+ .buf_setup = omap_vout_buffer_setup,
38885+ .buf_prepare = omap_vout_buffer_prepare,
38886+ .buf_release = omap_vout_buffer_release,
38887+ .buf_queue = omap_vout_buffer_queue,
38888+ };
38889
38890 vout = video_drvdata(file);
38891 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38892@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
38893 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38894
38895 q = &vout->vbq;
38896- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38897- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38898- video_vbq_ops.buf_release = omap_vout_buffer_release;
38899- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38900 spin_lock_init(&vout->vbq_lock);
38901
38902 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38903diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38904index 04e6490..2df65bf 100644
38905--- a/drivers/media/platform/s5p-tv/mixer.h
38906+++ b/drivers/media/platform/s5p-tv/mixer.h
38907@@ -156,7 +156,7 @@ struct mxr_layer {
38908 /** layer index (unique identifier) */
38909 int idx;
38910 /** callbacks for layer methods */
38911- struct mxr_layer_ops ops;
38912+ struct mxr_layer_ops *ops;
38913 /** format array */
38914 const struct mxr_format **fmt_array;
38915 /** size of format array */
38916diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38917index b93a21f..2535195 100644
38918--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38919+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38920@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38921 {
38922 struct mxr_layer *layer;
38923 int ret;
38924- struct mxr_layer_ops ops = {
38925+ static struct mxr_layer_ops ops = {
38926 .release = mxr_graph_layer_release,
38927 .buffer_set = mxr_graph_buffer_set,
38928 .stream_set = mxr_graph_stream_set,
38929diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38930index b713403..53cb5ad 100644
38931--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38932+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38933@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38934 layer->update_buf = next;
38935 }
38936
38937- layer->ops.buffer_set(layer, layer->update_buf);
38938+ layer->ops->buffer_set(layer, layer->update_buf);
38939
38940 if (done && done != layer->shadow_buf)
38941 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38942diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38943index ef0efdf..8c78eb6 100644
38944--- a/drivers/media/platform/s5p-tv/mixer_video.c
38945+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38946@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38947 layer->geo.src.height = layer->geo.src.full_height;
38948
38949 mxr_geometry_dump(mdev, &layer->geo);
38950- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38951+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38952 mxr_geometry_dump(mdev, &layer->geo);
38953 }
38954
38955@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38956 layer->geo.dst.full_width = mbus_fmt.width;
38957 layer->geo.dst.full_height = mbus_fmt.height;
38958 layer->geo.dst.field = mbus_fmt.field;
38959- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38960+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38961
38962 mxr_geometry_dump(mdev, &layer->geo);
38963 }
38964@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
38965 /* set source size to highest accepted value */
38966 geo->src.full_width = max(geo->dst.full_width, pix->width);
38967 geo->src.full_height = max(geo->dst.full_height, pix->height);
38968- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38969+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38970 mxr_geometry_dump(mdev, &layer->geo);
38971 /* set cropping to total visible screen */
38972 geo->src.width = pix->width;
38973@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
38974 geo->src.x_offset = 0;
38975 geo->src.y_offset = 0;
38976 /* assure consistency of geometry */
38977- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38978+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
38979 mxr_geometry_dump(mdev, &layer->geo);
38980 /* set full size to lowest possible value */
38981 geo->src.full_width = 0;
38982 geo->src.full_height = 0;
38983- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38984+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
38985 mxr_geometry_dump(mdev, &layer->geo);
38986
38987 /* returning results */
38988@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
38989 target->width = s->r.width;
38990 target->height = s->r.height;
38991
38992- layer->ops.fix_geometry(layer, stage, s->flags);
38993+ layer->ops->fix_geometry(layer, stage, s->flags);
38994
38995 /* retrieve update selection rectangle */
38996 res.left = target->x_offset;
38997@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
38998 mxr_output_get(mdev);
38999
39000 mxr_layer_update_output(layer);
39001- layer->ops.format_set(layer);
39002+ layer->ops->format_set(layer);
39003 /* enabling layer in hardware */
39004 spin_lock_irqsave(&layer->enq_slock, flags);
39005 layer->state = MXR_LAYER_STREAMING;
39006 spin_unlock_irqrestore(&layer->enq_slock, flags);
39007
39008- layer->ops.stream_set(layer, MXR_ENABLE);
39009+ layer->ops->stream_set(layer, MXR_ENABLE);
39010 mxr_streamer_get(mdev);
39011
39012 return 0;
39013@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
39014 spin_unlock_irqrestore(&layer->enq_slock, flags);
39015
39016 /* disabling layer in hardware */
39017- layer->ops.stream_set(layer, MXR_DISABLE);
39018+ layer->ops->stream_set(layer, MXR_DISABLE);
39019 /* remove one streamer */
39020 mxr_streamer_put(mdev);
39021 /* allow changes in output configuration */
39022@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
39023
39024 void mxr_layer_release(struct mxr_layer *layer)
39025 {
39026- if (layer->ops.release)
39027- layer->ops.release(layer);
39028+ if (layer->ops->release)
39029+ layer->ops->release(layer);
39030 }
39031
39032 void mxr_base_layer_release(struct mxr_layer *layer)
39033@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
39034
39035 layer->mdev = mdev;
39036 layer->idx = idx;
39037- layer->ops = *ops;
39038+ layer->ops = ops;
39039
39040 spin_lock_init(&layer->enq_slock);
39041 INIT_LIST_HEAD(&layer->enq_list);
39042diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39043index 3d13a63..da31bf1 100644
39044--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39045+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39046@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
39047 {
39048 struct mxr_layer *layer;
39049 int ret;
39050- struct mxr_layer_ops ops = {
39051+ static struct mxr_layer_ops ops = {
39052 .release = mxr_vp_layer_release,
39053 .buffer_set = mxr_vp_buffer_set,
39054 .stream_set = mxr_vp_stream_set,
39055diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
39056index 545c04c..a14bded 100644
39057--- a/drivers/media/radio/radio-cadet.c
39058+++ b/drivers/media/radio/radio-cadet.c
39059@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
39060 unsigned char readbuf[RDS_BUFFER];
39061 int i = 0;
39062
39063+ if (count > RDS_BUFFER)
39064+ return -EFAULT;
39065 mutex_lock(&dev->lock);
39066 if (dev->rdsstat == 0)
39067 cadet_start_rds(dev);
39068@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
39069 while (i < count && dev->rdsin != dev->rdsout)
39070 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
39071
39072- if (i && copy_to_user(data, readbuf, i))
39073+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
39074 i = -EFAULT;
39075 unlock:
39076 mutex_unlock(&dev->lock);
39077diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
39078index 3940bb0..fb3952a 100644
39079--- a/drivers/media/usb/dvb-usb/cxusb.c
39080+++ b/drivers/media/usb/dvb-usb/cxusb.c
39081@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
39082
39083 struct dib0700_adapter_state {
39084 int (*set_param_save) (struct dvb_frontend *);
39085-};
39086+} __no_const;
39087
39088 static int dib7070_set_param_override(struct dvb_frontend *fe)
39089 {
39090diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
39091index 6e237b6..dc25556 100644
39092--- a/drivers/media/usb/dvb-usb/dw2102.c
39093+++ b/drivers/media/usb/dvb-usb/dw2102.c
39094@@ -118,7 +118,7 @@ struct su3000_state {
39095
39096 struct s6x0_state {
39097 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
39098-};
39099+} __no_const;
39100
39101 /* debug */
39102 static int dvb_usb_dw2102_debug;
39103diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
39104index f129551..ecf6514 100644
39105--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
39106+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
39107@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
39108 __u32 reserved;
39109 };
39110
39111-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
39112+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
39113 enum v4l2_memory memory)
39114 {
39115 void __user *up_pln;
39116@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
39117 return 0;
39118 }
39119
39120-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
39121+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
39122 enum v4l2_memory memory)
39123 {
39124 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
39125@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
39126 put_user(kp->start_block, &up->start_block) ||
39127 put_user(kp->blocks, &up->blocks) ||
39128 put_user(tmp, &up->edid) ||
39129- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
39130+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
39131 return -EFAULT;
39132 return 0;
39133 }
39134diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
39135index 7658586..1079260 100644
39136--- a/drivers/media/v4l2-core/v4l2-ioctl.c
39137+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
39138@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
39139 struct file *file, void *fh, void *p);
39140 } u;
39141 void (*debug)(const void *arg, bool write_only);
39142-};
39143+} __do_const;
39144+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
39145
39146 /* This control needs a priority check */
39147 #define INFO_FL_PRIO (1 << 0)
39148@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
39149 struct video_device *vfd = video_devdata(file);
39150 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
39151 bool write_only = false;
39152- struct v4l2_ioctl_info default_info;
39153+ v4l2_ioctl_info_no_const default_info;
39154 const struct v4l2_ioctl_info *info;
39155 void *fh = file->private_data;
39156 struct v4l2_fh *vfh = NULL;
39157@@ -2251,7 +2252,7 @@ done:
39158 }
39159
39160 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39161- void * __user *user_ptr, void ***kernel_ptr)
39162+ void __user **user_ptr, void ***kernel_ptr)
39163 {
39164 int ret = 0;
39165
39166@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39167 ret = -EINVAL;
39168 break;
39169 }
39170- *user_ptr = (void __user *)buf->m.planes;
39171+ *user_ptr = (void __force_user *)buf->m.planes;
39172 *kernel_ptr = (void *)&buf->m.planes;
39173 *array_size = sizeof(struct v4l2_plane) * buf->length;
39174 ret = 1;
39175@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39176 ret = -EINVAL;
39177 break;
39178 }
39179- *user_ptr = (void __user *)ctrls->controls;
39180+ *user_ptr = (void __force_user *)ctrls->controls;
39181 *kernel_ptr = (void *)&ctrls->controls;
39182 *array_size = sizeof(struct v4l2_ext_control)
39183 * ctrls->count;
39184diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
39185index 767ff4d..c69d259 100644
39186--- a/drivers/message/fusion/mptbase.c
39187+++ b/drivers/message/fusion/mptbase.c
39188@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39189 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
39190 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
39191
39192+#ifdef CONFIG_GRKERNSEC_HIDESYM
39193+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
39194+#else
39195 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
39196 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
39197+#endif
39198+
39199 /*
39200 * Rounding UP to nearest 4-kB boundary here...
39201 */
39202@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39203 ioc->facts.GlobalCredits);
39204
39205 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
39206+#ifdef CONFIG_GRKERNSEC_HIDESYM
39207+ NULL, NULL);
39208+#else
39209 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
39210+#endif
39211 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
39212 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
39213 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
39214diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
39215index dd239bd..689c4f7 100644
39216--- a/drivers/message/fusion/mptsas.c
39217+++ b/drivers/message/fusion/mptsas.c
39218@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
39219 return 0;
39220 }
39221
39222+static inline void
39223+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39224+{
39225+ if (phy_info->port_details) {
39226+ phy_info->port_details->rphy = rphy;
39227+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39228+ ioc->name, rphy));
39229+ }
39230+
39231+ if (rphy) {
39232+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39233+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39234+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39235+ ioc->name, rphy, rphy->dev.release));
39236+ }
39237+}
39238+
39239 /* no mutex */
39240 static void
39241 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
39242@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
39243 return NULL;
39244 }
39245
39246-static inline void
39247-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39248-{
39249- if (phy_info->port_details) {
39250- phy_info->port_details->rphy = rphy;
39251- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39252- ioc->name, rphy));
39253- }
39254-
39255- if (rphy) {
39256- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39257- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39258- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39259- ioc->name, rphy, rphy->dev.release));
39260- }
39261-}
39262-
39263 static inline struct sas_port *
39264 mptsas_get_port(struct mptsas_phyinfo *phy_info)
39265 {
39266diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
39267index 727819c..ad74694 100644
39268--- a/drivers/message/fusion/mptscsih.c
39269+++ b/drivers/message/fusion/mptscsih.c
39270@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
39271
39272 h = shost_priv(SChost);
39273
39274- if (h) {
39275- if (h->info_kbuf == NULL)
39276- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39277- return h->info_kbuf;
39278- h->info_kbuf[0] = '\0';
39279+ if (!h)
39280+ return NULL;
39281
39282- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39283- h->info_kbuf[size-1] = '\0';
39284- }
39285+ if (h->info_kbuf == NULL)
39286+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39287+ return h->info_kbuf;
39288+ h->info_kbuf[0] = '\0';
39289+
39290+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39291+ h->info_kbuf[size-1] = '\0';
39292
39293 return h->info_kbuf;
39294 }
39295diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
39296index b7d87cd..9890039 100644
39297--- a/drivers/message/i2o/i2o_proc.c
39298+++ b/drivers/message/i2o/i2o_proc.c
39299@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
39300 "Array Controller Device"
39301 };
39302
39303-static char *chtostr(char *tmp, u8 *chars, int n)
39304-{
39305- tmp[0] = 0;
39306- return strncat(tmp, (char *)chars, n);
39307-}
39308-
39309 static int i2o_report_query_status(struct seq_file *seq, int block_status,
39310 char *group)
39311 {
39312@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39313 } *result;
39314
39315 i2o_exec_execute_ddm_table ddm_table;
39316- char tmp[28 + 1];
39317
39318 result = kmalloc(sizeof(*result), GFP_KERNEL);
39319 if (!result)
39320@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39321
39322 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
39323 seq_printf(seq, "%-#8x", ddm_table.module_id);
39324- seq_printf(seq, "%-29s",
39325- chtostr(tmp, ddm_table.module_name_version, 28));
39326+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
39327 seq_printf(seq, "%9d ", ddm_table.data_size);
39328 seq_printf(seq, "%8d", ddm_table.code_size);
39329
39330@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39331
39332 i2o_driver_result_table *result;
39333 i2o_driver_store_table *dst;
39334- char tmp[28 + 1];
39335
39336 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
39337 if (result == NULL)
39338@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39339
39340 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
39341 seq_printf(seq, "%-#8x", dst->module_id);
39342- seq_printf(seq, "%-29s",
39343- chtostr(tmp, dst->module_name_version, 28));
39344- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
39345+ seq_printf(seq, "%-.28s", dst->module_name_version);
39346+ seq_printf(seq, "%-.8s", dst->date);
39347 seq_printf(seq, "%8d ", dst->module_size);
39348 seq_printf(seq, "%8d ", dst->mpb_size);
39349 seq_printf(seq, "0x%04x", dst->module_flags);
39350@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39351 // == (allow) 512d bytes (max)
39352 static u16 *work16 = (u16 *) work32;
39353 int token;
39354- char tmp[16 + 1];
39355
39356 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
39357
39358@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39359 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
39360 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
39361 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
39362- seq_printf(seq, "Vendor info : %s\n",
39363- chtostr(tmp, (u8 *) (work32 + 2), 16));
39364- seq_printf(seq, "Product info : %s\n",
39365- chtostr(tmp, (u8 *) (work32 + 6), 16));
39366- seq_printf(seq, "Description : %s\n",
39367- chtostr(tmp, (u8 *) (work32 + 10), 16));
39368- seq_printf(seq, "Product rev. : %s\n",
39369- chtostr(tmp, (u8 *) (work32 + 14), 8));
39370+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
39371+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
39372+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
39373+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
39374
39375 seq_printf(seq, "Serial number : ");
39376 print_serial_number(seq, (u8 *) (work32 + 16),
39377@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39378 u8 pad[256]; // allow up to 256 byte (max) serial number
39379 } result;
39380
39381- char tmp[24 + 1];
39382-
39383 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
39384
39385 if (token < 0) {
39386@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39387 }
39388
39389 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
39390- seq_printf(seq, "Module name : %s\n",
39391- chtostr(tmp, result.module_name, 24));
39392- seq_printf(seq, "Module revision : %s\n",
39393- chtostr(tmp, result.module_rev, 8));
39394+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
39395+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
39396
39397 seq_printf(seq, "Serial number : ");
39398 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
39399@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39400 u8 instance_number[4];
39401 } result;
39402
39403- char tmp[64 + 1];
39404-
39405 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
39406
39407 if (token < 0) {
39408@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39409 return 0;
39410 }
39411
39412- seq_printf(seq, "Device name : %s\n",
39413- chtostr(tmp, result.device_name, 64));
39414- seq_printf(seq, "Service name : %s\n",
39415- chtostr(tmp, result.service_name, 64));
39416- seq_printf(seq, "Physical name : %s\n",
39417- chtostr(tmp, result.physical_location, 64));
39418- seq_printf(seq, "Instance number : %s\n",
39419- chtostr(tmp, result.instance_number, 4));
39420+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
39421+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
39422+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
39423+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
39424
39425 return 0;
39426 }
39427diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
39428index a8c08f3..155fe3d 100644
39429--- a/drivers/message/i2o/iop.c
39430+++ b/drivers/message/i2o/iop.c
39431@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39432
39433 spin_lock_irqsave(&c->context_list_lock, flags);
39434
39435- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39436- atomic_inc(&c->context_list_counter);
39437+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39438+ atomic_inc_unchecked(&c->context_list_counter);
39439
39440- entry->context = atomic_read(&c->context_list_counter);
39441+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39442
39443 list_add(&entry->list, &c->context_list);
39444
39445@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39446
39447 #if BITS_PER_LONG == 64
39448 spin_lock_init(&c->context_list_lock);
39449- atomic_set(&c->context_list_counter, 0);
39450+ atomic_set_unchecked(&c->context_list_counter, 0);
39451 INIT_LIST_HEAD(&c->context_list);
39452 #endif
39453
39454diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39455index 45ece11..8efa218 100644
39456--- a/drivers/mfd/janz-cmodio.c
39457+++ b/drivers/mfd/janz-cmodio.c
39458@@ -13,6 +13,7 @@
39459
39460 #include <linux/kernel.h>
39461 #include <linux/module.h>
39462+#include <linux/slab.h>
39463 #include <linux/init.h>
39464 #include <linux/pci.h>
39465 #include <linux/interrupt.h>
39466diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39467index a5f9888..1c0ed56 100644
39468--- a/drivers/mfd/twl4030-irq.c
39469+++ b/drivers/mfd/twl4030-irq.c
39470@@ -35,6 +35,7 @@
39471 #include <linux/of.h>
39472 #include <linux/irqdomain.h>
39473 #include <linux/i2c/twl.h>
39474+#include <asm/pgtable.h>
39475
39476 #include "twl-core.h"
39477
39478@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39479 * Install an irq handler for each of the SIH modules;
39480 * clone dummy irq_chip since PIH can't *do* anything
39481 */
39482- twl4030_irq_chip = dummy_irq_chip;
39483- twl4030_irq_chip.name = "twl4030";
39484+ pax_open_kernel();
39485+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39486+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39487
39488- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39489+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39490+ pax_close_kernel();
39491
39492 for (i = irq_base; i < irq_end; i++) {
39493 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39494diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39495index 277a8db..0e0b754 100644
39496--- a/drivers/mfd/twl6030-irq.c
39497+++ b/drivers/mfd/twl6030-irq.c
39498@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39499 * install an irq handler for each of the modules;
39500 * clone dummy irq_chip since PIH can't *do* anything
39501 */
39502- twl6030_irq_chip = dummy_irq_chip;
39503- twl6030_irq_chip.name = "twl6030";
39504- twl6030_irq_chip.irq_set_type = NULL;
39505- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39506+ pax_open_kernel();
39507+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39508+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39509+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39510+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39511+ pax_close_kernel();
39512
39513 for (i = irq_base; i < irq_end; i++) {
39514 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39515diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39516index f32550a..e3e52a2 100644
39517--- a/drivers/misc/c2port/core.c
39518+++ b/drivers/misc/c2port/core.c
39519@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
39520 mutex_init(&c2dev->mutex);
39521
39522 /* Create binary file */
39523- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39524+ pax_open_kernel();
39525+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39526+ pax_close_kernel();
39527 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39528 if (unlikely(ret))
39529 goto error_device_create_bin_file;
39530diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39531index 36f5d52..32311c3 100644
39532--- a/drivers/misc/kgdbts.c
39533+++ b/drivers/misc/kgdbts.c
39534@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
39535 char before[BREAK_INSTR_SIZE];
39536 char after[BREAK_INSTR_SIZE];
39537
39538- probe_kernel_read(before, (char *)kgdbts_break_test,
39539+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39540 BREAK_INSTR_SIZE);
39541 init_simple_test();
39542 ts.tst = plant_and_detach_test;
39543@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
39544 /* Activate test with initial breakpoint */
39545 if (!is_early)
39546 kgdb_breakpoint();
39547- probe_kernel_read(after, (char *)kgdbts_break_test,
39548+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39549 BREAK_INSTR_SIZE);
39550 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39551 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39552diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39553index 4cd4a3d..b48cbc7 100644
39554--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39555+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39556@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39557 * the lid is closed. This leads to interrupts as soon as a little move
39558 * is done.
39559 */
39560- atomic_inc(&lis3->count);
39561+ atomic_inc_unchecked(&lis3->count);
39562
39563 wake_up_interruptible(&lis3->misc_wait);
39564 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39565@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39566 if (lis3->pm_dev)
39567 pm_runtime_get_sync(lis3->pm_dev);
39568
39569- atomic_set(&lis3->count, 0);
39570+ atomic_set_unchecked(&lis3->count, 0);
39571 return 0;
39572 }
39573
39574@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39575 add_wait_queue(&lis3->misc_wait, &wait);
39576 while (true) {
39577 set_current_state(TASK_INTERRUPTIBLE);
39578- data = atomic_xchg(&lis3->count, 0);
39579+ data = atomic_xchg_unchecked(&lis3->count, 0);
39580 if (data)
39581 break;
39582
39583@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39584 struct lis3lv02d, miscdev);
39585
39586 poll_wait(file, &lis3->misc_wait, wait);
39587- if (atomic_read(&lis3->count))
39588+ if (atomic_read_unchecked(&lis3->count))
39589 return POLLIN | POLLRDNORM;
39590 return 0;
39591 }
39592diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39593index c439c82..1f20f57 100644
39594--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39595+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39596@@ -297,7 +297,7 @@ struct lis3lv02d {
39597 struct input_polled_dev *idev; /* input device */
39598 struct platform_device *pdev; /* platform device */
39599 struct regulator_bulk_data regulators[2];
39600- atomic_t count; /* interrupt count after last read */
39601+ atomic_unchecked_t count; /* interrupt count after last read */
39602 union axis_conversion ac; /* hw -> logical axis */
39603 int mapped_btns[3];
39604
39605diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39606index 2f30bad..c4c13d0 100644
39607--- a/drivers/misc/sgi-gru/gruhandles.c
39608+++ b/drivers/misc/sgi-gru/gruhandles.c
39609@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39610 unsigned long nsec;
39611
39612 nsec = CLKS2NSEC(clks);
39613- atomic_long_inc(&mcs_op_statistics[op].count);
39614- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39615+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39616+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39617 if (mcs_op_statistics[op].max < nsec)
39618 mcs_op_statistics[op].max = nsec;
39619 }
39620diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39621index 797d796..ae8f01e 100644
39622--- a/drivers/misc/sgi-gru/gruprocfs.c
39623+++ b/drivers/misc/sgi-gru/gruprocfs.c
39624@@ -32,9 +32,9 @@
39625
39626 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39627
39628-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39629+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39630 {
39631- unsigned long val = atomic_long_read(v);
39632+ unsigned long val = atomic_long_read_unchecked(v);
39633
39634 seq_printf(s, "%16lu %s\n", val, id);
39635 }
39636@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39637
39638 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39639 for (op = 0; op < mcsop_last; op++) {
39640- count = atomic_long_read(&mcs_op_statistics[op].count);
39641- total = atomic_long_read(&mcs_op_statistics[op].total);
39642+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39643+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39644 max = mcs_op_statistics[op].max;
39645 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39646 count ? total / count : 0, max);
39647diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39648index 5c3ce24..4915ccb 100644
39649--- a/drivers/misc/sgi-gru/grutables.h
39650+++ b/drivers/misc/sgi-gru/grutables.h
39651@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39652 * GRU statistics.
39653 */
39654 struct gru_stats_s {
39655- atomic_long_t vdata_alloc;
39656- atomic_long_t vdata_free;
39657- atomic_long_t gts_alloc;
39658- atomic_long_t gts_free;
39659- atomic_long_t gms_alloc;
39660- atomic_long_t gms_free;
39661- atomic_long_t gts_double_allocate;
39662- atomic_long_t assign_context;
39663- atomic_long_t assign_context_failed;
39664- atomic_long_t free_context;
39665- atomic_long_t load_user_context;
39666- atomic_long_t load_kernel_context;
39667- atomic_long_t lock_kernel_context;
39668- atomic_long_t unlock_kernel_context;
39669- atomic_long_t steal_user_context;
39670- atomic_long_t steal_kernel_context;
39671- atomic_long_t steal_context_failed;
39672- atomic_long_t nopfn;
39673- atomic_long_t asid_new;
39674- atomic_long_t asid_next;
39675- atomic_long_t asid_wrap;
39676- atomic_long_t asid_reuse;
39677- atomic_long_t intr;
39678- atomic_long_t intr_cbr;
39679- atomic_long_t intr_tfh;
39680- atomic_long_t intr_spurious;
39681- atomic_long_t intr_mm_lock_failed;
39682- atomic_long_t call_os;
39683- atomic_long_t call_os_wait_queue;
39684- atomic_long_t user_flush_tlb;
39685- atomic_long_t user_unload_context;
39686- atomic_long_t user_exception;
39687- atomic_long_t set_context_option;
39688- atomic_long_t check_context_retarget_intr;
39689- atomic_long_t check_context_unload;
39690- atomic_long_t tlb_dropin;
39691- atomic_long_t tlb_preload_page;
39692- atomic_long_t tlb_dropin_fail_no_asid;
39693- atomic_long_t tlb_dropin_fail_upm;
39694- atomic_long_t tlb_dropin_fail_invalid;
39695- atomic_long_t tlb_dropin_fail_range_active;
39696- atomic_long_t tlb_dropin_fail_idle;
39697- atomic_long_t tlb_dropin_fail_fmm;
39698- atomic_long_t tlb_dropin_fail_no_exception;
39699- atomic_long_t tfh_stale_on_fault;
39700- atomic_long_t mmu_invalidate_range;
39701- atomic_long_t mmu_invalidate_page;
39702- atomic_long_t flush_tlb;
39703- atomic_long_t flush_tlb_gru;
39704- atomic_long_t flush_tlb_gru_tgh;
39705- atomic_long_t flush_tlb_gru_zero_asid;
39706+ atomic_long_unchecked_t vdata_alloc;
39707+ atomic_long_unchecked_t vdata_free;
39708+ atomic_long_unchecked_t gts_alloc;
39709+ atomic_long_unchecked_t gts_free;
39710+ atomic_long_unchecked_t gms_alloc;
39711+ atomic_long_unchecked_t gms_free;
39712+ atomic_long_unchecked_t gts_double_allocate;
39713+ atomic_long_unchecked_t assign_context;
39714+ atomic_long_unchecked_t assign_context_failed;
39715+ atomic_long_unchecked_t free_context;
39716+ atomic_long_unchecked_t load_user_context;
39717+ atomic_long_unchecked_t load_kernel_context;
39718+ atomic_long_unchecked_t lock_kernel_context;
39719+ atomic_long_unchecked_t unlock_kernel_context;
39720+ atomic_long_unchecked_t steal_user_context;
39721+ atomic_long_unchecked_t steal_kernel_context;
39722+ atomic_long_unchecked_t steal_context_failed;
39723+ atomic_long_unchecked_t nopfn;
39724+ atomic_long_unchecked_t asid_new;
39725+ atomic_long_unchecked_t asid_next;
39726+ atomic_long_unchecked_t asid_wrap;
39727+ atomic_long_unchecked_t asid_reuse;
39728+ atomic_long_unchecked_t intr;
39729+ atomic_long_unchecked_t intr_cbr;
39730+ atomic_long_unchecked_t intr_tfh;
39731+ atomic_long_unchecked_t intr_spurious;
39732+ atomic_long_unchecked_t intr_mm_lock_failed;
39733+ atomic_long_unchecked_t call_os;
39734+ atomic_long_unchecked_t call_os_wait_queue;
39735+ atomic_long_unchecked_t user_flush_tlb;
39736+ atomic_long_unchecked_t user_unload_context;
39737+ atomic_long_unchecked_t user_exception;
39738+ atomic_long_unchecked_t set_context_option;
39739+ atomic_long_unchecked_t check_context_retarget_intr;
39740+ atomic_long_unchecked_t check_context_unload;
39741+ atomic_long_unchecked_t tlb_dropin;
39742+ atomic_long_unchecked_t tlb_preload_page;
39743+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39744+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39745+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39746+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39747+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39748+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39749+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39750+ atomic_long_unchecked_t tfh_stale_on_fault;
39751+ atomic_long_unchecked_t mmu_invalidate_range;
39752+ atomic_long_unchecked_t mmu_invalidate_page;
39753+ atomic_long_unchecked_t flush_tlb;
39754+ atomic_long_unchecked_t flush_tlb_gru;
39755+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39756+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39757
39758- atomic_long_t copy_gpa;
39759- atomic_long_t read_gpa;
39760+ atomic_long_unchecked_t copy_gpa;
39761+ atomic_long_unchecked_t read_gpa;
39762
39763- atomic_long_t mesq_receive;
39764- atomic_long_t mesq_receive_none;
39765- atomic_long_t mesq_send;
39766- atomic_long_t mesq_send_failed;
39767- atomic_long_t mesq_noop;
39768- atomic_long_t mesq_send_unexpected_error;
39769- atomic_long_t mesq_send_lb_overflow;
39770- atomic_long_t mesq_send_qlimit_reached;
39771- atomic_long_t mesq_send_amo_nacked;
39772- atomic_long_t mesq_send_put_nacked;
39773- atomic_long_t mesq_page_overflow;
39774- atomic_long_t mesq_qf_locked;
39775- atomic_long_t mesq_qf_noop_not_full;
39776- atomic_long_t mesq_qf_switch_head_failed;
39777- atomic_long_t mesq_qf_unexpected_error;
39778- atomic_long_t mesq_noop_unexpected_error;
39779- atomic_long_t mesq_noop_lb_overflow;
39780- atomic_long_t mesq_noop_qlimit_reached;
39781- atomic_long_t mesq_noop_amo_nacked;
39782- atomic_long_t mesq_noop_put_nacked;
39783- atomic_long_t mesq_noop_page_overflow;
39784+ atomic_long_unchecked_t mesq_receive;
39785+ atomic_long_unchecked_t mesq_receive_none;
39786+ atomic_long_unchecked_t mesq_send;
39787+ atomic_long_unchecked_t mesq_send_failed;
39788+ atomic_long_unchecked_t mesq_noop;
39789+ atomic_long_unchecked_t mesq_send_unexpected_error;
39790+ atomic_long_unchecked_t mesq_send_lb_overflow;
39791+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39792+ atomic_long_unchecked_t mesq_send_amo_nacked;
39793+ atomic_long_unchecked_t mesq_send_put_nacked;
39794+ atomic_long_unchecked_t mesq_page_overflow;
39795+ atomic_long_unchecked_t mesq_qf_locked;
39796+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39797+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39798+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39799+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39800+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39801+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39802+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39803+ atomic_long_unchecked_t mesq_noop_put_nacked;
39804+ atomic_long_unchecked_t mesq_noop_page_overflow;
39805
39806 };
39807
39808@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39809 tghop_invalidate, mcsop_last};
39810
39811 struct mcs_op_statistic {
39812- atomic_long_t count;
39813- atomic_long_t total;
39814+ atomic_long_unchecked_t count;
39815+ atomic_long_unchecked_t total;
39816 unsigned long max;
39817 };
39818
39819@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39820
39821 #define STAT(id) do { \
39822 if (gru_options & OPT_STATS) \
39823- atomic_long_inc(&gru_stats.id); \
39824+ atomic_long_inc_unchecked(&gru_stats.id); \
39825 } while (0)
39826
39827 #ifdef CONFIG_SGI_GRU_DEBUG
39828diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39829index c862cd4..0d176fe 100644
39830--- a/drivers/misc/sgi-xp/xp.h
39831+++ b/drivers/misc/sgi-xp/xp.h
39832@@ -288,7 +288,7 @@ struct xpc_interface {
39833 xpc_notify_func, void *);
39834 void (*received) (short, int, void *);
39835 enum xp_retval (*partid_to_nasids) (short, void *);
39836-};
39837+} __no_const;
39838
39839 extern struct xpc_interface xpc_interface;
39840
39841diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39842index b94d5f7..7f494c5 100644
39843--- a/drivers/misc/sgi-xp/xpc.h
39844+++ b/drivers/misc/sgi-xp/xpc.h
39845@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39846 void (*received_payload) (struct xpc_channel *, void *);
39847 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39848 };
39849+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39850
39851 /* struct xpc_partition act_state values (for XPC HB) */
39852
39853@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39854 /* found in xpc_main.c */
39855 extern struct device *xpc_part;
39856 extern struct device *xpc_chan;
39857-extern struct xpc_arch_operations xpc_arch_ops;
39858+extern xpc_arch_operations_no_const xpc_arch_ops;
39859 extern int xpc_disengage_timelimit;
39860 extern int xpc_disengage_timedout;
39861 extern int xpc_activate_IRQ_rcvd;
39862diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39863index d971817..33bdca5 100644
39864--- a/drivers/misc/sgi-xp/xpc_main.c
39865+++ b/drivers/misc/sgi-xp/xpc_main.c
39866@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39867 .notifier_call = xpc_system_die,
39868 };
39869
39870-struct xpc_arch_operations xpc_arch_ops;
39871+xpc_arch_operations_no_const xpc_arch_ops;
39872
39873 /*
39874 * Timer function to enforce the timelimit on the partition disengage.
39875@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39876
39877 if (((die_args->trapnr == X86_TRAP_MF) ||
39878 (die_args->trapnr == X86_TRAP_XF)) &&
39879- !user_mode_vm(die_args->regs))
39880+ !user_mode(die_args->regs))
39881 xpc_die_deactivate();
39882
39883 break;
39884diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39885index 49f04bc..65660c2 100644
39886--- a/drivers/mmc/core/mmc_ops.c
39887+++ b/drivers/mmc/core/mmc_ops.c
39888@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39889 void *data_buf;
39890 int is_on_stack;
39891
39892- is_on_stack = object_is_on_stack(buf);
39893+ is_on_stack = object_starts_on_stack(buf);
39894 if (is_on_stack) {
39895 /*
39896 * dma onto stack is unsafe/nonportable, but callers to this
39897diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39898index 0b74189..818358f 100644
39899--- a/drivers/mmc/host/dw_mmc.h
39900+++ b/drivers/mmc/host/dw_mmc.h
39901@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
39902 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
39903 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
39904 int (*parse_dt)(struct dw_mci *host);
39905-};
39906+} __do_const;
39907 #endif /* _DW_MMC_H_ */
39908diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39909index c6f6246..60760a8 100644
39910--- a/drivers/mmc/host/sdhci-s3c.c
39911+++ b/drivers/mmc/host/sdhci-s3c.c
39912@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39913 * we can use overriding functions instead of default.
39914 */
39915 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39916- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39917- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39918- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39919+ pax_open_kernel();
39920+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39921+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39922+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39923+ pax_close_kernel();
39924 }
39925
39926 /* It supports additional host capabilities if needed */
39927diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39928index 0c8bb6b..6f35deb 100644
39929--- a/drivers/mtd/nand/denali.c
39930+++ b/drivers/mtd/nand/denali.c
39931@@ -24,6 +24,7 @@
39932 #include <linux/slab.h>
39933 #include <linux/mtd/mtd.h>
39934 #include <linux/module.h>
39935+#include <linux/slab.h>
39936
39937 #include "denali.h"
39938
39939diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39940index 51b9d6a..52af9a7 100644
39941--- a/drivers/mtd/nftlmount.c
39942+++ b/drivers/mtd/nftlmount.c
39943@@ -24,6 +24,7 @@
39944 #include <asm/errno.h>
39945 #include <linux/delay.h>
39946 #include <linux/slab.h>
39947+#include <linux/sched.h>
39948 #include <linux/mtd/mtd.h>
39949 #include <linux/mtd/nand.h>
39950 #include <linux/mtd/nftl.h>
39951diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39952index f9d5615..99dd95f 100644
39953--- a/drivers/mtd/sm_ftl.c
39954+++ b/drivers/mtd/sm_ftl.c
39955@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39956 #define SM_CIS_VENDOR_OFFSET 0x59
39957 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39958 {
39959- struct attribute_group *attr_group;
39960+ attribute_group_no_const *attr_group;
39961 struct attribute **attributes;
39962 struct sm_sysfs_attribute *vendor_attribute;
39963
39964diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
39965index f975696..4597e21 100644
39966--- a/drivers/net/bonding/bond_main.c
39967+++ b/drivers/net/bonding/bond_main.c
39968@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
39969 return tx_queues;
39970 }
39971
39972-static struct rtnl_link_ops bond_link_ops __read_mostly = {
39973+static struct rtnl_link_ops bond_link_ops = {
39974 .kind = "bond",
39975 .priv_size = sizeof(struct bonding),
39976 .setup = bond_setup,
39977@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
39978
39979 bond_destroy_debugfs();
39980
39981- rtnl_link_unregister(&bond_link_ops);
39982 unregister_pernet_subsys(&bond_net_ops);
39983+ rtnl_link_unregister(&bond_link_ops);
39984
39985 #ifdef CONFIG_NET_POLL_CONTROLLER
39986 /*
39987diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
39988index e1d2643..7f4133b 100644
39989--- a/drivers/net/ethernet/8390/ax88796.c
39990+++ b/drivers/net/ethernet/8390/ax88796.c
39991@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
39992 if (ax->plat->reg_offsets)
39993 ei_local->reg_offset = ax->plat->reg_offsets;
39994 else {
39995+ resource_size_t _mem_size = mem_size;
39996+ do_div(_mem_size, 0x18);
39997 ei_local->reg_offset = ax->reg_offsets;
39998 for (ret = 0; ret < 0x18; ret++)
39999- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
40000+ ax->reg_offsets[ret] = _mem_size * ret;
40001 }
40002
40003 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
40004diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40005index 151675d..0139a9d 100644
40006--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40007+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40008@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
40009 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
40010 {
40011 /* RX_MODE controlling object */
40012- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
40013+ bnx2x_init_rx_mode_obj(bp);
40014
40015 /* multicast configuration controlling object */
40016 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
40017diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
40018index ce1a916..10b52b0 100644
40019--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
40020+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
40021@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
40022 struct bnx2x *bp = netdev_priv(dev);
40023
40024 /* Use the ethtool_dump "flag" field as the dump preset index */
40025+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
40026+ return -EINVAL;
40027+
40028 bp->dump_preset_idx = val->flag;
40029 return 0;
40030 }
40031@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
40032 struct bnx2x *bp = netdev_priv(dev);
40033 struct dump_header dump_hdr = {0};
40034
40035- memset(p, 0, dump->len);
40036-
40037 /* Disable parity attentions as long as following dump may
40038 * cause false alarms by reading never written registers. We
40039 * will re-enable parity attentions right after the dump.
40040diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40041index b4c9dea..2a9927f 100644
40042--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40043+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40044@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
40045 bp->min_msix_vec_cnt = 2;
40046 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
40047
40048+ bp->dump_preset_idx = 1;
40049+
40050 return rc;
40051 }
40052
40053diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40054index 32a9609..0b1c53a 100644
40055--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40056+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40057@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
40058 return rc;
40059 }
40060
40061-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
40062- struct bnx2x_rx_mode_obj *o)
40063+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
40064 {
40065 if (CHIP_IS_E1x(bp)) {
40066- o->wait_comp = bnx2x_empty_rx_mode_wait;
40067- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
40068+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
40069+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
40070 } else {
40071- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
40072- o->config_rx_mode = bnx2x_set_rx_mode_e2;
40073+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
40074+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
40075 }
40076 }
40077
40078diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40079index 43c00bc..dd1d03d 100644
40080--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40081+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40082@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
40083
40084 /********************* RX MODE ****************/
40085
40086-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
40087- struct bnx2x_rx_mode_obj *o);
40088+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
40089
40090 /**
40091 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
40092diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
40093index ff6e30e..87e8452 100644
40094--- a/drivers/net/ethernet/broadcom/tg3.h
40095+++ b/drivers/net/ethernet/broadcom/tg3.h
40096@@ -147,6 +147,7 @@
40097 #define CHIPREV_ID_5750_A0 0x4000
40098 #define CHIPREV_ID_5750_A1 0x4001
40099 #define CHIPREV_ID_5750_A3 0x4003
40100+#define CHIPREV_ID_5750_C1 0x4201
40101 #define CHIPREV_ID_5750_C2 0x4202
40102 #define CHIPREV_ID_5752_A0_HW 0x5000
40103 #define CHIPREV_ID_5752_A0 0x6000
40104diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
40105index 71497e8..b650951 100644
40106--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
40107+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
40108@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
40109 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
40110 t3_read_reg(adapter, A_PCIE_PEX_ERR));
40111
40112+ rtnl_lock();
40113 t3_resume_ports(adapter);
40114+ rtnl_unlock();
40115 }
40116
40117 static const struct pci_error_handlers t3_err_handler = {
40118diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40119index 8cffcdf..aadf043 100644
40120--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40121+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40122@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
40123 */
40124 struct l2t_skb_cb {
40125 arp_failure_handler_func arp_failure_handler;
40126-};
40127+} __no_const;
40128
40129 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
40130
40131diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
40132index 4c83003..2a2a5b9 100644
40133--- a/drivers/net/ethernet/dec/tulip/de4x5.c
40134+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
40135@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40136 for (i=0; i<ETH_ALEN; i++) {
40137 tmp.addr[i] = dev->dev_addr[i];
40138 }
40139- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40140+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40141 break;
40142
40143 case DE4X5_SET_HWADDR: /* Set the hardware address */
40144@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40145 spin_lock_irqsave(&lp->lock, flags);
40146 memcpy(&statbuf, &lp->pktStats, ioc->len);
40147 spin_unlock_irqrestore(&lp->lock, flags);
40148- if (copy_to_user(ioc->data, &statbuf, ioc->len))
40149+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
40150 return -EFAULT;
40151 break;
40152 }
40153diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
40154index a0b4be5..d8cb082 100644
40155--- a/drivers/net/ethernet/emulex/benet/be_main.c
40156+++ b/drivers/net/ethernet/emulex/benet/be_main.c
40157@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
40158
40159 if (wrapped)
40160 newacc += 65536;
40161- ACCESS_ONCE(*acc) = newacc;
40162+ ACCESS_ONCE_RW(*acc) = newacc;
40163 }
40164
40165 void populate_erx_stats(struct be_adapter *adapter,
40166diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
40167index 21b85fb..b49e5fc 100644
40168--- a/drivers/net/ethernet/faraday/ftgmac100.c
40169+++ b/drivers/net/ethernet/faraday/ftgmac100.c
40170@@ -31,6 +31,8 @@
40171 #include <linux/netdevice.h>
40172 #include <linux/phy.h>
40173 #include <linux/platform_device.h>
40174+#include <linux/interrupt.h>
40175+#include <linux/irqreturn.h>
40176 #include <net/ip.h>
40177
40178 #include "ftgmac100.h"
40179diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
40180index a6eda8d..935d273 100644
40181--- a/drivers/net/ethernet/faraday/ftmac100.c
40182+++ b/drivers/net/ethernet/faraday/ftmac100.c
40183@@ -31,6 +31,8 @@
40184 #include <linux/module.h>
40185 #include <linux/netdevice.h>
40186 #include <linux/platform_device.h>
40187+#include <linux/interrupt.h>
40188+#include <linux/irqreturn.h>
40189
40190 #include "ftmac100.h"
40191
40192diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40193index 331987d..3be1135 100644
40194--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40195+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40196@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
40197 }
40198
40199 /* update the base incval used to calculate frequency adjustment */
40200- ACCESS_ONCE(adapter->base_incval) = incval;
40201+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
40202 smp_mb();
40203
40204 /* need lock to prevent incorrect read while modifying cyclecounter */
40205diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40206index fbe5363..266b4e3 100644
40207--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
40208+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40209@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40210 struct __vxge_hw_fifo *fifo;
40211 struct vxge_hw_fifo_config *config;
40212 u32 txdl_size, txdl_per_memblock;
40213- struct vxge_hw_mempool_cbs fifo_mp_callback;
40214+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
40215+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
40216+ };
40217+
40218 struct __vxge_hw_virtualpath *vpath;
40219
40220 if ((vp == NULL) || (attr == NULL)) {
40221@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40222 goto exit;
40223 }
40224
40225- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
40226-
40227 fifo->mempool =
40228 __vxge_hw_mempool_create(vpath->hldev,
40229 fifo->config->memblock_size,
40230diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40231index 5e7fb1d..f8d1810 100644
40232--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40233+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40234@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
40235 op_mode = QLC_83XX_DEFAULT_OPMODE;
40236
40237 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
40238- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
40239+ pax_open_kernel();
40240+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
40241+ pax_close_kernel();
40242 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40243 } else {
40244 return -EIO;
40245diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40246index b0c3de9..fc5857e 100644
40247--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40248+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40249@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
40250 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
40251 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
40252 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40253- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
40254+ pax_open_kernel();
40255+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
40256+ pax_close_kernel();
40257 } else if (priv_level == QLCNIC_PRIV_FUNC) {
40258 ahw->op_mode = QLCNIC_PRIV_FUNC;
40259 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
40260- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
40261+ pax_open_kernel();
40262+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
40263+ pax_close_kernel();
40264 } else if (priv_level == QLCNIC_MGMT_FUNC) {
40265 ahw->op_mode = QLCNIC_MGMT_FUNC;
40266 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40267- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
40268+ pax_open_kernel();
40269+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
40270+ pax_close_kernel();
40271 } else {
40272 return -EIO;
40273 }
40274diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
40275index 393f961..d343034 100644
40276--- a/drivers/net/ethernet/realtek/r8169.c
40277+++ b/drivers/net/ethernet/realtek/r8169.c
40278@@ -753,22 +753,22 @@ struct rtl8169_private {
40279 struct mdio_ops {
40280 void (*write)(struct rtl8169_private *, int, int);
40281 int (*read)(struct rtl8169_private *, int);
40282- } mdio_ops;
40283+ } __no_const mdio_ops;
40284
40285 struct pll_power_ops {
40286 void (*down)(struct rtl8169_private *);
40287 void (*up)(struct rtl8169_private *);
40288- } pll_power_ops;
40289+ } __no_const pll_power_ops;
40290
40291 struct jumbo_ops {
40292 void (*enable)(struct rtl8169_private *);
40293 void (*disable)(struct rtl8169_private *);
40294- } jumbo_ops;
40295+ } __no_const jumbo_ops;
40296
40297 struct csi_ops {
40298 void (*write)(struct rtl8169_private *, int, int);
40299 u32 (*read)(struct rtl8169_private *, int);
40300- } csi_ops;
40301+ } __no_const csi_ops;
40302
40303 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
40304 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
40305diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
40306index 9a95abf..36df7f9 100644
40307--- a/drivers/net/ethernet/sfc/ptp.c
40308+++ b/drivers/net/ethernet/sfc/ptp.c
40309@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
40310 (u32)((u64)ptp->start.dma_addr >> 32));
40311
40312 /* Clear flag that signals MC ready */
40313- ACCESS_ONCE(*start) = 0;
40314+ ACCESS_ONCE_RW(*start) = 0;
40315 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
40316 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
40317
40318diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40319index 50617c5..b13724c 100644
40320--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40321+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40322@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
40323
40324 writel(value, ioaddr + MMC_CNTRL);
40325
40326- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40327- MMC_CNTRL, value);
40328+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40329+// MMC_CNTRL, value);
40330 }
40331
40332 /* To mask all all interrupts.*/
40333diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
40334index e6fe0d8..2b7d752 100644
40335--- a/drivers/net/hyperv/hyperv_net.h
40336+++ b/drivers/net/hyperv/hyperv_net.h
40337@@ -101,7 +101,7 @@ struct rndis_device {
40338
40339 enum rndis_device_state state;
40340 bool link_state;
40341- atomic_t new_req_id;
40342+ atomic_unchecked_t new_req_id;
40343
40344 spinlock_t request_lock;
40345 struct list_head req_list;
40346diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
40347index 0775f0a..d4fb316 100644
40348--- a/drivers/net/hyperv/rndis_filter.c
40349+++ b/drivers/net/hyperv/rndis_filter.c
40350@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
40351 * template
40352 */
40353 set = &rndis_msg->msg.set_req;
40354- set->req_id = atomic_inc_return(&dev->new_req_id);
40355+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40356
40357 /* Add to the request list */
40358 spin_lock_irqsave(&dev->request_lock, flags);
40359@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
40360
40361 /* Setup the rndis set */
40362 halt = &request->request_msg.msg.halt_req;
40363- halt->req_id = atomic_inc_return(&dev->new_req_id);
40364+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40365
40366 /* Ignore return since this msg is optional. */
40367 rndis_filter_send_request(dev, request);
40368diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
40369index bf0d55e..82bcfbd1 100644
40370--- a/drivers/net/ieee802154/fakehard.c
40371+++ b/drivers/net/ieee802154/fakehard.c
40372@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
40373 phy->transmit_power = 0xbf;
40374
40375 dev->netdev_ops = &fake_ops;
40376- dev->ml_priv = &fake_mlme;
40377+ dev->ml_priv = (void *)&fake_mlme;
40378
40379 priv = netdev_priv(dev);
40380 priv->phy = phy;
40381diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
40382index 6e91931..2b0ebe7 100644
40383--- a/drivers/net/macvlan.c
40384+++ b/drivers/net/macvlan.c
40385@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
40386 int macvlan_link_register(struct rtnl_link_ops *ops)
40387 {
40388 /* common fields */
40389- ops->priv_size = sizeof(struct macvlan_dev);
40390- ops->validate = macvlan_validate;
40391- ops->maxtype = IFLA_MACVLAN_MAX;
40392- ops->policy = macvlan_policy;
40393- ops->changelink = macvlan_changelink;
40394- ops->get_size = macvlan_get_size;
40395- ops->fill_info = macvlan_fill_info;
40396+ pax_open_kernel();
40397+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
40398+ *(void **)&ops->validate = macvlan_validate;
40399+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
40400+ *(const void **)&ops->policy = macvlan_policy;
40401+ *(void **)&ops->changelink = macvlan_changelink;
40402+ *(void **)&ops->get_size = macvlan_get_size;
40403+ *(void **)&ops->fill_info = macvlan_fill_info;
40404+ pax_close_kernel();
40405
40406 return rtnl_link_register(ops);
40407 };
40408@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
40409 return NOTIFY_DONE;
40410 }
40411
40412-static struct notifier_block macvlan_notifier_block __read_mostly = {
40413+static struct notifier_block macvlan_notifier_block = {
40414 .notifier_call = macvlan_device_event,
40415 };
40416
40417diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
40418index b6dd6a7..5c38a02 100644
40419--- a/drivers/net/macvtap.c
40420+++ b/drivers/net/macvtap.c
40421@@ -1094,7 +1094,7 @@ static int macvtap_device_event(struct notifier_block *unused,
40422 return NOTIFY_DONE;
40423 }
40424
40425-static struct notifier_block macvtap_notifier_block __read_mostly = {
40426+static struct notifier_block macvtap_notifier_block = {
40427 .notifier_call = macvtap_device_event,
40428 };
40429
40430diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40431index daec9b0..6428fcb 100644
40432--- a/drivers/net/phy/mdio-bitbang.c
40433+++ b/drivers/net/phy/mdio-bitbang.c
40434@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40435 struct mdiobb_ctrl *ctrl = bus->priv;
40436
40437 module_put(ctrl->ops->owner);
40438+ mdiobus_unregister(bus);
40439 mdiobus_free(bus);
40440 }
40441 EXPORT_SYMBOL(free_mdio_bitbang);
40442diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40443index 72ff14b..11d442d 100644
40444--- a/drivers/net/ppp/ppp_generic.c
40445+++ b/drivers/net/ppp/ppp_generic.c
40446@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40447 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40448 struct ppp_stats stats;
40449 struct ppp_comp_stats cstats;
40450- char *vers;
40451
40452 switch (cmd) {
40453 case SIOCGPPPSTATS:
40454@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40455 break;
40456
40457 case SIOCGPPPVER:
40458- vers = PPP_VERSION;
40459- if (copy_to_user(addr, vers, strlen(vers) + 1))
40460+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40461 break;
40462 err = 0;
40463 break;
40464diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
40465index 1252d9c..80e660b 100644
40466--- a/drivers/net/slip/slhc.c
40467+++ b/drivers/net/slip/slhc.c
40468@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
40469 register struct tcphdr *thp;
40470 register struct iphdr *ip;
40471 register struct cstate *cs;
40472- int len, hdrlen;
40473+ long len, hdrlen;
40474 unsigned char *cp = icp;
40475
40476 /* We've got a compressed packet; read the change byte */
40477diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40478index b305105..8ead6df 100644
40479--- a/drivers/net/team/team.c
40480+++ b/drivers/net/team/team.c
40481@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
40482 return NOTIFY_DONE;
40483 }
40484
40485-static struct notifier_block team_notifier_block __read_mostly = {
40486+static struct notifier_block team_notifier_block = {
40487 .notifier_call = team_device_event,
40488 };
40489
40490diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40491index 9c61f87..213609e 100644
40492--- a/drivers/net/tun.c
40493+++ b/drivers/net/tun.c
40494@@ -1852,7 +1852,7 @@ unlock:
40495 }
40496
40497 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40498- unsigned long arg, int ifreq_len)
40499+ unsigned long arg, size_t ifreq_len)
40500 {
40501 struct tun_file *tfile = file->private_data;
40502 struct tun_struct *tun;
40503@@ -1864,6 +1864,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40504 int vnet_hdr_sz;
40505 int ret;
40506
40507+ if (ifreq_len > sizeof ifr)
40508+ return -EFAULT;
40509+
40510 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40511 if (copy_from_user(&ifr, argp, ifreq_len))
40512 return -EFAULT;
40513diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40514index cba1d46..f703766 100644
40515--- a/drivers/net/usb/hso.c
40516+++ b/drivers/net/usb/hso.c
40517@@ -71,7 +71,7 @@
40518 #include <asm/byteorder.h>
40519 #include <linux/serial_core.h>
40520 #include <linux/serial.h>
40521-
40522+#include <asm/local.h>
40523
40524 #define MOD_AUTHOR "Option Wireless"
40525 #define MOD_DESCRIPTION "USB High Speed Option driver"
40526@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40527 struct urb *urb;
40528
40529 urb = serial->rx_urb[0];
40530- if (serial->port.count > 0) {
40531+ if (atomic_read(&serial->port.count) > 0) {
40532 count = put_rxbuf_data(urb, serial);
40533 if (count == -1)
40534 return;
40535@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40536 DUMP1(urb->transfer_buffer, urb->actual_length);
40537
40538 /* Anyone listening? */
40539- if (serial->port.count == 0)
40540+ if (atomic_read(&serial->port.count) == 0)
40541 return;
40542
40543 if (status == 0) {
40544@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40545 tty_port_tty_set(&serial->port, tty);
40546
40547 /* check for port already opened, if not set the termios */
40548- serial->port.count++;
40549- if (serial->port.count == 1) {
40550+ if (atomic_inc_return(&serial->port.count) == 1) {
40551 serial->rx_state = RX_IDLE;
40552 /* Force default termio settings */
40553 _hso_serial_set_termios(tty, NULL);
40554@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40555 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40556 if (result) {
40557 hso_stop_serial_device(serial->parent);
40558- serial->port.count--;
40559+ atomic_dec(&serial->port.count);
40560 kref_put(&serial->parent->ref, hso_serial_ref_free);
40561 }
40562 } else {
40563@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40564
40565 /* reset the rts and dtr */
40566 /* do the actual close */
40567- serial->port.count--;
40568+ atomic_dec(&serial->port.count);
40569
40570- if (serial->port.count <= 0) {
40571- serial->port.count = 0;
40572+ if (atomic_read(&serial->port.count) <= 0) {
40573+ atomic_set(&serial->port.count, 0);
40574 tty_port_tty_set(&serial->port, NULL);
40575 if (!usb_gone)
40576 hso_stop_serial_device(serial->parent);
40577@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40578
40579 /* the actual setup */
40580 spin_lock_irqsave(&serial->serial_lock, flags);
40581- if (serial->port.count)
40582+ if (atomic_read(&serial->port.count))
40583 _hso_serial_set_termios(tty, old);
40584 else
40585 tty->termios = *old;
40586@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40587 D1("Pending read interrupt on port %d\n", i);
40588 spin_lock(&serial->serial_lock);
40589 if (serial->rx_state == RX_IDLE &&
40590- serial->port.count > 0) {
40591+ atomic_read(&serial->port.count) > 0) {
40592 /* Setup and send a ctrl req read on
40593 * port i */
40594 if (!serial->rx_urb_filled[0]) {
40595@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
40596 /* Start all serial ports */
40597 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40598 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40599- if (dev2ser(serial_table[i])->port.count) {
40600+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40601 result =
40602 hso_start_serial_device(serial_table[i], GFP_NOIO);
40603 hso_kick_transmit(dev2ser(serial_table[i]));
40604diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40605index 57325f3..36b181f 100644
40606--- a/drivers/net/vxlan.c
40607+++ b/drivers/net/vxlan.c
40608@@ -1579,7 +1579,7 @@ nla_put_failure:
40609 return -EMSGSIZE;
40610 }
40611
40612-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
40613+static struct rtnl_link_ops vxlan_link_ops = {
40614 .kind = "vxlan",
40615 .maxtype = IFLA_VXLAN_MAX,
40616 .policy = vxlan_policy,
40617diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
40618index 34c8a33..3261fdc 100644
40619--- a/drivers/net/wireless/at76c50x-usb.c
40620+++ b/drivers/net/wireless/at76c50x-usb.c
40621@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
40622 }
40623
40624 /* Convert timeout from the DFU status to jiffies */
40625-static inline unsigned long at76_get_timeout(struct dfu_status *s)
40626+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
40627 {
40628 return msecs_to_jiffies((s->poll_timeout[2] << 16)
40629 | (s->poll_timeout[1] << 8)
40630diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40631index 8d78253..bebbb68 100644
40632--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40633+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40634@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40635 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
40636 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
40637
40638- ACCESS_ONCE(ads->ds_link) = i->link;
40639- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
40640+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
40641+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
40642
40643 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
40644 ctl6 = SM(i->keytype, AR_EncrType);
40645@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40646
40647 if ((i->is_first || i->is_last) &&
40648 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
40649- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
40650+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
40651 | set11nTries(i->rates, 1)
40652 | set11nTries(i->rates, 2)
40653 | set11nTries(i->rates, 3)
40654 | (i->dur_update ? AR_DurUpdateEna : 0)
40655 | SM(0, AR_BurstDur);
40656
40657- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
40658+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
40659 | set11nRate(i->rates, 1)
40660 | set11nRate(i->rates, 2)
40661 | set11nRate(i->rates, 3);
40662 } else {
40663- ACCESS_ONCE(ads->ds_ctl2) = 0;
40664- ACCESS_ONCE(ads->ds_ctl3) = 0;
40665+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
40666+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
40667 }
40668
40669 if (!i->is_first) {
40670- ACCESS_ONCE(ads->ds_ctl0) = 0;
40671- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40672- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40673+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
40674+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40675+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40676 return;
40677 }
40678
40679@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40680 break;
40681 }
40682
40683- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40684+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40685 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40686 | SM(i->txpower, AR_XmitPower)
40687 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40688@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40689 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
40690 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
40691
40692- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40693- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40694+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40695+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40696
40697 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
40698 return;
40699
40700- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40701+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40702 | set11nPktDurRTSCTS(i->rates, 1);
40703
40704- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40705+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40706 | set11nPktDurRTSCTS(i->rates, 3);
40707
40708- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40709+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40710 | set11nRateFlags(i->rates, 1)
40711 | set11nRateFlags(i->rates, 2)
40712 | set11nRateFlags(i->rates, 3)
40713diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40714index 301bf72..3f5654f 100644
40715--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40716+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40717@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40718 (i->qcu << AR_TxQcuNum_S) | desc_len;
40719
40720 checksum += val;
40721- ACCESS_ONCE(ads->info) = val;
40722+ ACCESS_ONCE_RW(ads->info) = val;
40723
40724 checksum += i->link;
40725- ACCESS_ONCE(ads->link) = i->link;
40726+ ACCESS_ONCE_RW(ads->link) = i->link;
40727
40728 checksum += i->buf_addr[0];
40729- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
40730+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
40731 checksum += i->buf_addr[1];
40732- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
40733+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
40734 checksum += i->buf_addr[2];
40735- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
40736+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
40737 checksum += i->buf_addr[3];
40738- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
40739+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
40740
40741 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
40742- ACCESS_ONCE(ads->ctl3) = val;
40743+ ACCESS_ONCE_RW(ads->ctl3) = val;
40744 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
40745- ACCESS_ONCE(ads->ctl5) = val;
40746+ ACCESS_ONCE_RW(ads->ctl5) = val;
40747 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
40748- ACCESS_ONCE(ads->ctl7) = val;
40749+ ACCESS_ONCE_RW(ads->ctl7) = val;
40750 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
40751- ACCESS_ONCE(ads->ctl9) = val;
40752+ ACCESS_ONCE_RW(ads->ctl9) = val;
40753
40754 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
40755- ACCESS_ONCE(ads->ctl10) = checksum;
40756+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
40757
40758 if (i->is_first || i->is_last) {
40759- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
40760+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
40761 | set11nTries(i->rates, 1)
40762 | set11nTries(i->rates, 2)
40763 | set11nTries(i->rates, 3)
40764 | (i->dur_update ? AR_DurUpdateEna : 0)
40765 | SM(0, AR_BurstDur);
40766
40767- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
40768+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
40769 | set11nRate(i->rates, 1)
40770 | set11nRate(i->rates, 2)
40771 | set11nRate(i->rates, 3);
40772 } else {
40773- ACCESS_ONCE(ads->ctl13) = 0;
40774- ACCESS_ONCE(ads->ctl14) = 0;
40775+ ACCESS_ONCE_RW(ads->ctl13) = 0;
40776+ ACCESS_ONCE_RW(ads->ctl14) = 0;
40777 }
40778
40779 ads->ctl20 = 0;
40780@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40781
40782 ctl17 = SM(i->keytype, AR_EncrType);
40783 if (!i->is_first) {
40784- ACCESS_ONCE(ads->ctl11) = 0;
40785- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40786- ACCESS_ONCE(ads->ctl15) = 0;
40787- ACCESS_ONCE(ads->ctl16) = 0;
40788- ACCESS_ONCE(ads->ctl17) = ctl17;
40789- ACCESS_ONCE(ads->ctl18) = 0;
40790- ACCESS_ONCE(ads->ctl19) = 0;
40791+ ACCESS_ONCE_RW(ads->ctl11) = 0;
40792+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40793+ ACCESS_ONCE_RW(ads->ctl15) = 0;
40794+ ACCESS_ONCE_RW(ads->ctl16) = 0;
40795+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40796+ ACCESS_ONCE_RW(ads->ctl18) = 0;
40797+ ACCESS_ONCE_RW(ads->ctl19) = 0;
40798 return;
40799 }
40800
40801- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40802+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40803 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40804 | SM(i->txpower, AR_XmitPower)
40805 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40806@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40807 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40808 ctl12 |= SM(val, AR_PAPRDChainMask);
40809
40810- ACCESS_ONCE(ads->ctl12) = ctl12;
40811- ACCESS_ONCE(ads->ctl17) = ctl17;
40812+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40813+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40814
40815- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40816+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40817 | set11nPktDurRTSCTS(i->rates, 1);
40818
40819- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40820+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40821 | set11nPktDurRTSCTS(i->rates, 3);
40822
40823- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40824+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40825 | set11nRateFlags(i->rates, 1)
40826 | set11nRateFlags(i->rates, 2)
40827 | set11nRateFlags(i->rates, 3)
40828 | SM(i->rtscts_rate, AR_RTSCTSRate);
40829
40830- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40831+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40832 }
40833
40834 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40835diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40836index ae30343..a117806 100644
40837--- a/drivers/net/wireless/ath/ath9k/hw.h
40838+++ b/drivers/net/wireless/ath/ath9k/hw.h
40839@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
40840
40841 /* ANI */
40842 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40843-};
40844+} __no_const;
40845
40846 /**
40847 * struct ath_spec_scan - parameters for Atheros spectral scan
40848@@ -721,7 +721,7 @@ struct ath_hw_ops {
40849 struct ath_spec_scan *param);
40850 void (*spectral_scan_trigger)(struct ath_hw *ah);
40851 void (*spectral_scan_wait)(struct ath_hw *ah);
40852-};
40853+} __no_const;
40854
40855 struct ath_nf_limits {
40856 s16 max;
40857diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40858index b37a582..680835d 100644
40859--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40860+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40861@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40862 */
40863 if (il3945_mod_params.disable_hw_scan) {
40864 D_INFO("Disabling hw_scan\n");
40865- il3945_mac_ops.hw_scan = NULL;
40866+ pax_open_kernel();
40867+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40868+ pax_close_kernel();
40869 }
40870
40871 D_INFO("*** LOAD DRIVER ***\n");
40872diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40873index d532948..a1cb592 100644
40874--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40875+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40876@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40877 {
40878 struct iwl_priv *priv = file->private_data;
40879 char buf[64];
40880- int buf_size;
40881+ size_t buf_size;
40882 u32 offset, len;
40883
40884 memset(buf, 0, sizeof(buf));
40885@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40886 struct iwl_priv *priv = file->private_data;
40887
40888 char buf[8];
40889- int buf_size;
40890+ size_t buf_size;
40891 u32 reset_flag;
40892
40893 memset(buf, 0, sizeof(buf));
40894@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40895 {
40896 struct iwl_priv *priv = file->private_data;
40897 char buf[8];
40898- int buf_size;
40899+ size_t buf_size;
40900 int ht40;
40901
40902 memset(buf, 0, sizeof(buf));
40903@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40904 {
40905 struct iwl_priv *priv = file->private_data;
40906 char buf[8];
40907- int buf_size;
40908+ size_t buf_size;
40909 int value;
40910
40911 memset(buf, 0, sizeof(buf));
40912@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40913 {
40914 struct iwl_priv *priv = file->private_data;
40915 char buf[8];
40916- int buf_size;
40917+ size_t buf_size;
40918 int clear;
40919
40920 memset(buf, 0, sizeof(buf));
40921@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40922 {
40923 struct iwl_priv *priv = file->private_data;
40924 char buf[8];
40925- int buf_size;
40926+ size_t buf_size;
40927 int trace;
40928
40929 memset(buf, 0, sizeof(buf));
40930@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40931 {
40932 struct iwl_priv *priv = file->private_data;
40933 char buf[8];
40934- int buf_size;
40935+ size_t buf_size;
40936 int missed;
40937
40938 memset(buf, 0, sizeof(buf));
40939@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40940
40941 struct iwl_priv *priv = file->private_data;
40942 char buf[8];
40943- int buf_size;
40944+ size_t buf_size;
40945 int plcp;
40946
40947 memset(buf, 0, sizeof(buf));
40948@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40949
40950 struct iwl_priv *priv = file->private_data;
40951 char buf[8];
40952- int buf_size;
40953+ size_t buf_size;
40954 int flush;
40955
40956 memset(buf, 0, sizeof(buf));
40957@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40958
40959 struct iwl_priv *priv = file->private_data;
40960 char buf[8];
40961- int buf_size;
40962+ size_t buf_size;
40963 int rts;
40964
40965 if (!priv->cfg->ht_params)
40966@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
40967 {
40968 struct iwl_priv *priv = file->private_data;
40969 char buf[8];
40970- int buf_size;
40971+ size_t buf_size;
40972
40973 memset(buf, 0, sizeof(buf));
40974 buf_size = min(count, sizeof(buf) - 1);
40975@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
40976 struct iwl_priv *priv = file->private_data;
40977 u32 event_log_flag;
40978 char buf[8];
40979- int buf_size;
40980+ size_t buf_size;
40981
40982 /* check that the interface is up */
40983 if (!iwl_is_ready(priv))
40984@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
40985 struct iwl_priv *priv = file->private_data;
40986 char buf[8];
40987 u32 calib_disabled;
40988- int buf_size;
40989+ size_t buf_size;
40990
40991 memset(buf, 0, sizeof(buf));
40992 buf_size = min(count, sizeof(buf) - 1);
40993diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
40994index 50ba0a4..29424e7 100644
40995--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
40996+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
40997@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
40998 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
40999
41000 char buf[8];
41001- int buf_size;
41002+ size_t buf_size;
41003 u32 reset_flag;
41004
41005 memset(buf, 0, sizeof(buf));
41006@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
41007 {
41008 struct iwl_trans *trans = file->private_data;
41009 char buf[8];
41010- int buf_size;
41011+ size_t buf_size;
41012 int csr;
41013
41014 memset(buf, 0, sizeof(buf));
41015diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
41016index cb34c78..9fec0dc 100644
41017--- a/drivers/net/wireless/mac80211_hwsim.c
41018+++ b/drivers/net/wireless/mac80211_hwsim.c
41019@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
41020
41021 if (channels > 1) {
41022 hwsim_if_comb.num_different_channels = channels;
41023- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
41024- mac80211_hwsim_ops.cancel_hw_scan =
41025- mac80211_hwsim_cancel_hw_scan;
41026- mac80211_hwsim_ops.sw_scan_start = NULL;
41027- mac80211_hwsim_ops.sw_scan_complete = NULL;
41028- mac80211_hwsim_ops.remain_on_channel =
41029- mac80211_hwsim_roc;
41030- mac80211_hwsim_ops.cancel_remain_on_channel =
41031- mac80211_hwsim_croc;
41032- mac80211_hwsim_ops.add_chanctx =
41033- mac80211_hwsim_add_chanctx;
41034- mac80211_hwsim_ops.remove_chanctx =
41035- mac80211_hwsim_remove_chanctx;
41036- mac80211_hwsim_ops.change_chanctx =
41037- mac80211_hwsim_change_chanctx;
41038- mac80211_hwsim_ops.assign_vif_chanctx =
41039- mac80211_hwsim_assign_vif_chanctx;
41040- mac80211_hwsim_ops.unassign_vif_chanctx =
41041- mac80211_hwsim_unassign_vif_chanctx;
41042+ pax_open_kernel();
41043+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
41044+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
41045+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
41046+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
41047+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
41048+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
41049+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
41050+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
41051+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
41052+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
41053+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
41054+ pax_close_kernel();
41055 }
41056
41057 spin_lock_init(&hwsim_radio_lock);
41058diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
41059index 8169a85..7fa3b47 100644
41060--- a/drivers/net/wireless/rndis_wlan.c
41061+++ b/drivers/net/wireless/rndis_wlan.c
41062@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
41063
41064 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
41065
41066- if (rts_threshold < 0 || rts_threshold > 2347)
41067+ if (rts_threshold > 2347)
41068 rts_threshold = 2347;
41069
41070 tmp = cpu_to_le32(rts_threshold);
41071diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
41072index 7510723..5ba37f5 100644
41073--- a/drivers/net/wireless/rt2x00/rt2x00.h
41074+++ b/drivers/net/wireless/rt2x00/rt2x00.h
41075@@ -386,7 +386,7 @@ struct rt2x00_intf {
41076 * for hardware which doesn't support hardware
41077 * sequence counting.
41078 */
41079- atomic_t seqno;
41080+ atomic_unchecked_t seqno;
41081 };
41082
41083 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
41084diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
41085index 2c12311..7b77f24 100644
41086--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
41087+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
41088@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
41089 * sequence counter given by mac80211.
41090 */
41091 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
41092- seqno = atomic_add_return(0x10, &intf->seqno);
41093+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
41094 else
41095- seqno = atomic_read(&intf->seqno);
41096+ seqno = atomic_read_unchecked(&intf->seqno);
41097
41098 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
41099 hdr->seq_ctrl |= cpu_to_le16(seqno);
41100diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
41101index e2b3d9c..67a5184 100644
41102--- a/drivers/net/wireless/ti/wl1251/sdio.c
41103+++ b/drivers/net/wireless/ti/wl1251/sdio.c
41104@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
41105
41106 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
41107
41108- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41109- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41110+ pax_open_kernel();
41111+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41112+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41113+ pax_close_kernel();
41114
41115 wl1251_info("using dedicated interrupt line");
41116 } else {
41117- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41118- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41119+ pax_open_kernel();
41120+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41121+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41122+ pax_close_kernel();
41123
41124 wl1251_info("using SDIO interrupt");
41125 }
41126diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
41127index 1c627da..69f7d17 100644
41128--- a/drivers/net/wireless/ti/wl12xx/main.c
41129+++ b/drivers/net/wireless/ti/wl12xx/main.c
41130@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41131 sizeof(wl->conf.mem));
41132
41133 /* read data preparation is only needed by wl127x */
41134- wl->ops->prepare_read = wl127x_prepare_read;
41135+ pax_open_kernel();
41136+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41137+ pax_close_kernel();
41138
41139 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
41140 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
41141@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41142 sizeof(wl->conf.mem));
41143
41144 /* read data preparation is only needed by wl127x */
41145- wl->ops->prepare_read = wl127x_prepare_read;
41146+ pax_open_kernel();
41147+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41148+ pax_close_kernel();
41149
41150 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
41151 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
41152diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
41153index 9fa692d..b31fee0 100644
41154--- a/drivers/net/wireless/ti/wl18xx/main.c
41155+++ b/drivers/net/wireless/ti/wl18xx/main.c
41156@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
41157 }
41158
41159 if (!checksum_param) {
41160- wl18xx_ops.set_rx_csum = NULL;
41161- wl18xx_ops.init_vif = NULL;
41162+ pax_open_kernel();
41163+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
41164+ *(void **)&wl18xx_ops.init_vif = NULL;
41165+ pax_close_kernel();
41166 }
41167
41168 /* Enable 11a Band only if we have 5G antennas */
41169diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
41170index 7ef0b4a..ff65c28 100644
41171--- a/drivers/net/wireless/zd1211rw/zd_usb.c
41172+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
41173@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
41174 {
41175 struct zd_usb *usb = urb->context;
41176 struct zd_usb_interrupt *intr = &usb->intr;
41177- int len;
41178+ unsigned int len;
41179 u16 int_num;
41180
41181 ZD_ASSERT(in_interrupt());
41182diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
41183index d93b2b6..ae50401 100644
41184--- a/drivers/oprofile/buffer_sync.c
41185+++ b/drivers/oprofile/buffer_sync.c
41186@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
41187 if (cookie == NO_COOKIE)
41188 offset = pc;
41189 if (cookie == INVALID_COOKIE) {
41190- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41191+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41192 offset = pc;
41193 }
41194 if (cookie != last_cookie) {
41195@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
41196 /* add userspace sample */
41197
41198 if (!mm) {
41199- atomic_inc(&oprofile_stats.sample_lost_no_mm);
41200+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
41201 return 0;
41202 }
41203
41204 cookie = lookup_dcookie(mm, s->eip, &offset);
41205
41206 if (cookie == INVALID_COOKIE) {
41207- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41208+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41209 return 0;
41210 }
41211
41212@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
41213 /* ignore backtraces if failed to add a sample */
41214 if (state == sb_bt_start) {
41215 state = sb_bt_ignore;
41216- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
41217+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
41218 }
41219 }
41220 release_mm(mm);
41221diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
41222index c0cc4e7..44d4e54 100644
41223--- a/drivers/oprofile/event_buffer.c
41224+++ b/drivers/oprofile/event_buffer.c
41225@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
41226 }
41227
41228 if (buffer_pos == buffer_size) {
41229- atomic_inc(&oprofile_stats.event_lost_overflow);
41230+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
41231 return;
41232 }
41233
41234diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
41235index ed2c3ec..deda85a 100644
41236--- a/drivers/oprofile/oprof.c
41237+++ b/drivers/oprofile/oprof.c
41238@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
41239 if (oprofile_ops.switch_events())
41240 return;
41241
41242- atomic_inc(&oprofile_stats.multiplex_counter);
41243+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
41244 start_switch_worker();
41245 }
41246
41247diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
41248index 84a208d..d61b0a1 100644
41249--- a/drivers/oprofile/oprofile_files.c
41250+++ b/drivers/oprofile/oprofile_files.c
41251@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
41252
41253 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
41254
41255-static ssize_t timeout_read(struct file *file, char __user *buf,
41256+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
41257 size_t count, loff_t *offset)
41258 {
41259 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
41260diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
41261index 917d28e..d62d981 100644
41262--- a/drivers/oprofile/oprofile_stats.c
41263+++ b/drivers/oprofile/oprofile_stats.c
41264@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
41265 cpu_buf->sample_invalid_eip = 0;
41266 }
41267
41268- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
41269- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
41270- atomic_set(&oprofile_stats.event_lost_overflow, 0);
41271- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
41272- atomic_set(&oprofile_stats.multiplex_counter, 0);
41273+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
41274+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
41275+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
41276+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
41277+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
41278 }
41279
41280
41281diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
41282index 38b6fc0..b5cbfce 100644
41283--- a/drivers/oprofile/oprofile_stats.h
41284+++ b/drivers/oprofile/oprofile_stats.h
41285@@ -13,11 +13,11 @@
41286 #include <linux/atomic.h>
41287
41288 struct oprofile_stat_struct {
41289- atomic_t sample_lost_no_mm;
41290- atomic_t sample_lost_no_mapping;
41291- atomic_t bt_lost_no_mapping;
41292- atomic_t event_lost_overflow;
41293- atomic_t multiplex_counter;
41294+ atomic_unchecked_t sample_lost_no_mm;
41295+ atomic_unchecked_t sample_lost_no_mapping;
41296+ atomic_unchecked_t bt_lost_no_mapping;
41297+ atomic_unchecked_t event_lost_overflow;
41298+ atomic_unchecked_t multiplex_counter;
41299 };
41300
41301 extern struct oprofile_stat_struct oprofile_stats;
41302diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
41303index 7c12d9c..558bf3bb 100644
41304--- a/drivers/oprofile/oprofilefs.c
41305+++ b/drivers/oprofile/oprofilefs.c
41306@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
41307
41308
41309 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
41310- char const *name, atomic_t *val)
41311+ char const *name, atomic_unchecked_t *val)
41312 {
41313 return __oprofilefs_create_file(sb, root, name,
41314 &atomic_ro_fops, 0444, val);
41315diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
41316index 93404f7..4a313d8 100644
41317--- a/drivers/oprofile/timer_int.c
41318+++ b/drivers/oprofile/timer_int.c
41319@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
41320 return NOTIFY_OK;
41321 }
41322
41323-static struct notifier_block __refdata oprofile_cpu_notifier = {
41324+static struct notifier_block oprofile_cpu_notifier = {
41325 .notifier_call = oprofile_cpu_notify,
41326 };
41327
41328diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
41329index 92ed045..62d39bd7 100644
41330--- a/drivers/parport/procfs.c
41331+++ b/drivers/parport/procfs.c
41332@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
41333
41334 *ppos += len;
41335
41336- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
41337+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
41338 }
41339
41340 #ifdef CONFIG_PARPORT_1284
41341@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
41342
41343 *ppos += len;
41344
41345- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
41346+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
41347 }
41348 #endif /* IEEE1284.3 support. */
41349
41350diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
41351index c35e8ad..fc33beb 100644
41352--- a/drivers/pci/hotplug/acpiphp_ibm.c
41353+++ b/drivers/pci/hotplug/acpiphp_ibm.c
41354@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
41355 goto init_cleanup;
41356 }
41357
41358- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41359+ pax_open_kernel();
41360+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41361+ pax_close_kernel();
41362 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
41363
41364 return retval;
41365diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
41366index a6a71c4..c91097b 100644
41367--- a/drivers/pci/hotplug/cpcihp_generic.c
41368+++ b/drivers/pci/hotplug/cpcihp_generic.c
41369@@ -73,7 +73,6 @@ static u16 port;
41370 static unsigned int enum_bit;
41371 static u8 enum_mask;
41372
41373-static struct cpci_hp_controller_ops generic_hpc_ops;
41374 static struct cpci_hp_controller generic_hpc;
41375
41376 static int __init validate_parameters(void)
41377@@ -139,6 +138,10 @@ static int query_enum(void)
41378 return ((value & enum_mask) == enum_mask);
41379 }
41380
41381+static struct cpci_hp_controller_ops generic_hpc_ops = {
41382+ .query_enum = query_enum,
41383+};
41384+
41385 static int __init cpcihp_generic_init(void)
41386 {
41387 int status;
41388@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
41389 pci_dev_put(dev);
41390
41391 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
41392- generic_hpc_ops.query_enum = query_enum;
41393 generic_hpc.ops = &generic_hpc_ops;
41394
41395 status = cpci_hp_register_controller(&generic_hpc);
41396diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
41397index 449b4bb..257e2e8 100644
41398--- a/drivers/pci/hotplug/cpcihp_zt5550.c
41399+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
41400@@ -59,7 +59,6 @@
41401 /* local variables */
41402 static bool debug;
41403 static bool poll;
41404-static struct cpci_hp_controller_ops zt5550_hpc_ops;
41405 static struct cpci_hp_controller zt5550_hpc;
41406
41407 /* Primary cPCI bus bridge device */
41408@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
41409 return 0;
41410 }
41411
41412+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
41413+ .query_enum = zt5550_hc_query_enum,
41414+};
41415+
41416 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
41417 {
41418 int status;
41419@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
41420 dbg("returned from zt5550_hc_config");
41421
41422 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
41423- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
41424 zt5550_hpc.ops = &zt5550_hpc_ops;
41425 if(!poll) {
41426 zt5550_hpc.irq = hc_dev->irq;
41427 zt5550_hpc.irq_flags = IRQF_SHARED;
41428 zt5550_hpc.dev_id = hc_dev;
41429
41430- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41431- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41432- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41433+ pax_open_kernel();
41434+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41435+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41436+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41437+ pax_open_kernel();
41438 } else {
41439 info("using ENUM# polling mode");
41440 }
41441diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41442index 76ba8a1..20ca857 100644
41443--- a/drivers/pci/hotplug/cpqphp_nvram.c
41444+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41445@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41446
41447 void compaq_nvram_init (void __iomem *rom_start)
41448 {
41449+
41450+#ifndef CONFIG_PAX_KERNEXEC
41451 if (rom_start) {
41452 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41453 }
41454+#endif
41455+
41456 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41457
41458 /* initialize our int15 lock */
41459diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41460index ec20f74..c1d961e 100644
41461--- a/drivers/pci/hotplug/pci_hotplug_core.c
41462+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41463@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41464 return -EINVAL;
41465 }
41466
41467- slot->ops->owner = owner;
41468- slot->ops->mod_name = mod_name;
41469+ pax_open_kernel();
41470+ *(struct module **)&slot->ops->owner = owner;
41471+ *(const char **)&slot->ops->mod_name = mod_name;
41472+ pax_close_kernel();
41473
41474 mutex_lock(&pci_hp_mutex);
41475 /*
41476diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41477index 7d72c5e..edce02c 100644
41478--- a/drivers/pci/hotplug/pciehp_core.c
41479+++ b/drivers/pci/hotplug/pciehp_core.c
41480@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41481 struct slot *slot = ctrl->slot;
41482 struct hotplug_slot *hotplug = NULL;
41483 struct hotplug_slot_info *info = NULL;
41484- struct hotplug_slot_ops *ops = NULL;
41485+ hotplug_slot_ops_no_const *ops = NULL;
41486 char name[SLOT_NAME_SIZE];
41487 int retval = -ENOMEM;
41488
41489diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41490index 5b4a9d9..cd5ac1f 100644
41491--- a/drivers/pci/pci-sysfs.c
41492+++ b/drivers/pci/pci-sysfs.c
41493@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41494 {
41495 /* allocate attribute structure, piggyback attribute name */
41496 int name_len = write_combine ? 13 : 10;
41497- struct bin_attribute *res_attr;
41498+ bin_attribute_no_const *res_attr;
41499 int retval;
41500
41501 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41502@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41503 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41504 {
41505 int retval;
41506- struct bin_attribute *attr;
41507+ bin_attribute_no_const *attr;
41508
41509 /* If the device has VPD, try to expose it in sysfs. */
41510 if (dev->vpd) {
41511@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41512 {
41513 int retval;
41514 int rom_size = 0;
41515- struct bin_attribute *attr;
41516+ bin_attribute_no_const *attr;
41517
41518 if (!sysfs_initialized)
41519 return -EACCES;
41520diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41521index d1182c4..2a138ec 100644
41522--- a/drivers/pci/pci.h
41523+++ b/drivers/pci/pci.h
41524@@ -92,7 +92,7 @@ struct pci_vpd_ops {
41525 struct pci_vpd {
41526 unsigned int len;
41527 const struct pci_vpd_ops *ops;
41528- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41529+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41530 };
41531
41532 int pci_vpd_pci22_init(struct pci_dev *dev);
41533diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41534index d320df6..ca9a8f6 100644
41535--- a/drivers/pci/pcie/aspm.c
41536+++ b/drivers/pci/pcie/aspm.c
41537@@ -27,9 +27,9 @@
41538 #define MODULE_PARAM_PREFIX "pcie_aspm."
41539
41540 /* Note: those are not register definitions */
41541-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41542-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41543-#define ASPM_STATE_L1 (4) /* L1 state */
41544+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41545+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41546+#define ASPM_STATE_L1 (4U) /* L1 state */
41547 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41548 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41549
41550diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41551index 70f10fa..cb5e917 100644
41552--- a/drivers/pci/probe.c
41553+++ b/drivers/pci/probe.c
41554@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41555 struct pci_bus_region region;
41556 bool bar_too_big = false, bar_disabled = false;
41557
41558- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41559+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41560
41561 /* No printks while decoding is disabled! */
41562 if (!dev->mmio_always_on) {
41563diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41564index 0812608..b04018c4 100644
41565--- a/drivers/pci/proc.c
41566+++ b/drivers/pci/proc.c
41567@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41568 static int __init pci_proc_init(void)
41569 {
41570 struct pci_dev *dev = NULL;
41571+
41572+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41573+#ifdef CONFIG_GRKERNSEC_PROC_USER
41574+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41575+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41576+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41577+#endif
41578+#else
41579 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41580+#endif
41581 proc_create("devices", 0, proc_bus_pci_dir,
41582 &proc_bus_pci_dev_operations);
41583 proc_initialized = 1;
41584diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
41585index 3e5b4497..dcdfb70 100644
41586--- a/drivers/platform/x86/chromeos_laptop.c
41587+++ b/drivers/platform/x86/chromeos_laptop.c
41588@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
41589 return 0;
41590 }
41591
41592-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
41593+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
41594 {
41595 .ident = "Samsung Series 5 550 - Touchpad",
41596 .matches = {
41597diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41598index 6b22938..bc9700e 100644
41599--- a/drivers/platform/x86/msi-laptop.c
41600+++ b/drivers/platform/x86/msi-laptop.c
41601@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41602
41603 if (!quirks->ec_read_only) {
41604 /* allow userland write sysfs file */
41605- dev_attr_bluetooth.store = store_bluetooth;
41606- dev_attr_wlan.store = store_wlan;
41607- dev_attr_threeg.store = store_threeg;
41608- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41609- dev_attr_wlan.attr.mode |= S_IWUSR;
41610- dev_attr_threeg.attr.mode |= S_IWUSR;
41611+ pax_open_kernel();
41612+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41613+ *(void **)&dev_attr_wlan.store = store_wlan;
41614+ *(void **)&dev_attr_threeg.store = store_threeg;
41615+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41616+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41617+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41618+ pax_close_kernel();
41619 }
41620
41621 /* disable hardware control by fn key */
41622diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41623index 2ac045f..39c443d 100644
41624--- a/drivers/platform/x86/sony-laptop.c
41625+++ b/drivers/platform/x86/sony-laptop.c
41626@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
41627 }
41628
41629 /* High speed charging function */
41630-static struct device_attribute *hsc_handle;
41631+static device_attribute_no_const *hsc_handle;
41632
41633 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
41634 struct device_attribute *attr,
41635diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41636index 54d31c0..3f896d3 100644
41637--- a/drivers/platform/x86/thinkpad_acpi.c
41638+++ b/drivers/platform/x86/thinkpad_acpi.c
41639@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
41640 return 0;
41641 }
41642
41643-void static hotkey_mask_warn_incomplete_mask(void)
41644+static void hotkey_mask_warn_incomplete_mask(void)
41645 {
41646 /* log only what the user can fix... */
41647 const u32 wantedmask = hotkey_driver_mask &
41648@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
41649 }
41650 }
41651
41652-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41653- struct tp_nvram_state *newn,
41654- const u32 event_mask)
41655-{
41656-
41657 #define TPACPI_COMPARE_KEY(__scancode, __member) \
41658 do { \
41659 if ((event_mask & (1 << __scancode)) && \
41660@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41661 tpacpi_hotkey_send_key(__scancode); \
41662 } while (0)
41663
41664- void issue_volchange(const unsigned int oldvol,
41665- const unsigned int newvol)
41666- {
41667- unsigned int i = oldvol;
41668+static void issue_volchange(const unsigned int oldvol,
41669+ const unsigned int newvol,
41670+ const u32 event_mask)
41671+{
41672+ unsigned int i = oldvol;
41673
41674- while (i > newvol) {
41675- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41676- i--;
41677- }
41678- while (i < newvol) {
41679- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41680- i++;
41681- }
41682+ while (i > newvol) {
41683+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41684+ i--;
41685 }
41686+ while (i < newvol) {
41687+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41688+ i++;
41689+ }
41690+}
41691
41692- void issue_brightnesschange(const unsigned int oldbrt,
41693- const unsigned int newbrt)
41694- {
41695- unsigned int i = oldbrt;
41696+static void issue_brightnesschange(const unsigned int oldbrt,
41697+ const unsigned int newbrt,
41698+ const u32 event_mask)
41699+{
41700+ unsigned int i = oldbrt;
41701
41702- while (i > newbrt) {
41703- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41704- i--;
41705- }
41706- while (i < newbrt) {
41707- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41708- i++;
41709- }
41710+ while (i > newbrt) {
41711+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41712+ i--;
41713+ }
41714+ while (i < newbrt) {
41715+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41716+ i++;
41717 }
41718+}
41719
41720+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41721+ struct tp_nvram_state *newn,
41722+ const u32 event_mask)
41723+{
41724 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
41725 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
41726 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
41727@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41728 oldn->volume_level != newn->volume_level) {
41729 /* recently muted, or repeated mute keypress, or
41730 * multiple presses ending in mute */
41731- issue_volchange(oldn->volume_level, newn->volume_level);
41732+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41733 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
41734 }
41735 } else {
41736@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41737 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41738 }
41739 if (oldn->volume_level != newn->volume_level) {
41740- issue_volchange(oldn->volume_level, newn->volume_level);
41741+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41742 } else if (oldn->volume_toggle != newn->volume_toggle) {
41743 /* repeated vol up/down keypress at end of scale ? */
41744 if (newn->volume_level == 0)
41745@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41746 /* handle brightness */
41747 if (oldn->brightness_level != newn->brightness_level) {
41748 issue_brightnesschange(oldn->brightness_level,
41749- newn->brightness_level);
41750+ newn->brightness_level,
41751+ event_mask);
41752 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
41753 /* repeated key presses that didn't change state */
41754 if (newn->brightness_level == 0)
41755@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41756 && !tp_features.bright_unkfw)
41757 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41758 }
41759+}
41760
41761 #undef TPACPI_COMPARE_KEY
41762 #undef TPACPI_MAY_SEND_KEY
41763-}
41764
41765 /*
41766 * Polling driver
41767diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41768index 769d265..a3a05ca 100644
41769--- a/drivers/pnp/pnpbios/bioscalls.c
41770+++ b/drivers/pnp/pnpbios/bioscalls.c
41771@@ -58,7 +58,7 @@ do { \
41772 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41773 } while(0)
41774
41775-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41776+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41777 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41778
41779 /*
41780@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41781
41782 cpu = get_cpu();
41783 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41784+
41785+ pax_open_kernel();
41786 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41787+ pax_close_kernel();
41788
41789 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41790 spin_lock_irqsave(&pnp_bios_lock, flags);
41791@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41792 :"memory");
41793 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41794
41795+ pax_open_kernel();
41796 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41797+ pax_close_kernel();
41798+
41799 put_cpu();
41800
41801 /* If we get here and this is set then the PnP BIOS faulted on us. */
41802@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41803 return status;
41804 }
41805
41806-void pnpbios_calls_init(union pnp_bios_install_struct *header)
41807+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41808 {
41809 int i;
41810
41811@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41812 pnp_bios_callpoint.offset = header->fields.pm16offset;
41813 pnp_bios_callpoint.segment = PNP_CS16;
41814
41815+ pax_open_kernel();
41816+
41817 for_each_possible_cpu(i) {
41818 struct desc_struct *gdt = get_cpu_gdt_table(i);
41819 if (!gdt)
41820@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41821 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41822 (unsigned long)__va(header->fields.pm16dseg));
41823 }
41824+
41825+ pax_close_kernel();
41826 }
41827diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41828index 3e6db1c..1fbbdae 100644
41829--- a/drivers/pnp/resource.c
41830+++ b/drivers/pnp/resource.c
41831@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41832 return 1;
41833
41834 /* check if the resource is valid */
41835- if (*irq < 0 || *irq > 15)
41836+ if (*irq > 15)
41837 return 0;
41838
41839 /* check if the resource is reserved */
41840@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41841 return 1;
41842
41843 /* check if the resource is valid */
41844- if (*dma < 0 || *dma == 4 || *dma > 7)
41845+ if (*dma == 4 || *dma > 7)
41846 return 0;
41847
41848 /* check if the resource is reserved */
41849diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41850index 0c52e2a..3421ab7 100644
41851--- a/drivers/power/pda_power.c
41852+++ b/drivers/power/pda_power.c
41853@@ -37,7 +37,11 @@ static int polling;
41854
41855 #if IS_ENABLED(CONFIG_USB_PHY)
41856 static struct usb_phy *transceiver;
41857-static struct notifier_block otg_nb;
41858+static int otg_handle_notification(struct notifier_block *nb,
41859+ unsigned long event, void *unused);
41860+static struct notifier_block otg_nb = {
41861+ .notifier_call = otg_handle_notification
41862+};
41863 #endif
41864
41865 static struct regulator *ac_draw;
41866@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41867
41868 #if IS_ENABLED(CONFIG_USB_PHY)
41869 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41870- otg_nb.notifier_call = otg_handle_notification;
41871 ret = usb_register_notifier(transceiver, &otg_nb);
41872 if (ret) {
41873 dev_err(dev, "failure to register otg notifier\n");
41874diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41875index cc439fd..8fa30df 100644
41876--- a/drivers/power/power_supply.h
41877+++ b/drivers/power/power_supply.h
41878@@ -16,12 +16,12 @@ struct power_supply;
41879
41880 #ifdef CONFIG_SYSFS
41881
41882-extern void power_supply_init_attrs(struct device_type *dev_type);
41883+extern void power_supply_init_attrs(void);
41884 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41885
41886 #else
41887
41888-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41889+static inline void power_supply_init_attrs(void) {}
41890 #define power_supply_uevent NULL
41891
41892 #endif /* CONFIG_SYSFS */
41893diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41894index 1c517c3..ffa2f17 100644
41895--- a/drivers/power/power_supply_core.c
41896+++ b/drivers/power/power_supply_core.c
41897@@ -24,7 +24,10 @@
41898 struct class *power_supply_class;
41899 EXPORT_SYMBOL_GPL(power_supply_class);
41900
41901-static struct device_type power_supply_dev_type;
41902+extern const struct attribute_group *power_supply_attr_groups[];
41903+static struct device_type power_supply_dev_type = {
41904+ .groups = power_supply_attr_groups,
41905+};
41906
41907 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
41908 struct power_supply *supply)
41909@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
41910 return PTR_ERR(power_supply_class);
41911
41912 power_supply_class->dev_uevent = power_supply_uevent;
41913- power_supply_init_attrs(&power_supply_dev_type);
41914+ power_supply_init_attrs();
41915
41916 return 0;
41917 }
41918diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41919index 29178f7..c65f324 100644
41920--- a/drivers/power/power_supply_sysfs.c
41921+++ b/drivers/power/power_supply_sysfs.c
41922@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
41923 .is_visible = power_supply_attr_is_visible,
41924 };
41925
41926-static const struct attribute_group *power_supply_attr_groups[] = {
41927+const struct attribute_group *power_supply_attr_groups[] = {
41928 &power_supply_attr_group,
41929 NULL,
41930 };
41931
41932-void power_supply_init_attrs(struct device_type *dev_type)
41933+void power_supply_init_attrs(void)
41934 {
41935 int i;
41936
41937- dev_type->groups = power_supply_attr_groups;
41938-
41939 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41940 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41941 }
41942diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41943index d428ef9..fdc0357 100644
41944--- a/drivers/regulator/max8660.c
41945+++ b/drivers/regulator/max8660.c
41946@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41947 max8660->shadow_regs[MAX8660_OVER1] = 5;
41948 } else {
41949 /* Otherwise devices can be toggled via software */
41950- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41951- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41952+ pax_open_kernel();
41953+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41954+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41955+ pax_close_kernel();
41956 }
41957
41958 /*
41959diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41960index adb1414..c13e0ce 100644
41961--- a/drivers/regulator/max8973-regulator.c
41962+++ b/drivers/regulator/max8973-regulator.c
41963@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
41964 if (!pdata->enable_ext_control) {
41965 max->desc.enable_reg = MAX8973_VOUT;
41966 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
41967- max8973_dcdc_ops.enable = regulator_enable_regmap;
41968- max8973_dcdc_ops.disable = regulator_disable_regmap;
41969- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41970+ pax_open_kernel();
41971+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
41972+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
41973+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
41974+ pax_close_kernel();
41975 }
41976
41977 max->enable_external_control = pdata->enable_ext_control;
41978diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
41979index b716283..3cc4349 100644
41980--- a/drivers/regulator/mc13892-regulator.c
41981+++ b/drivers/regulator/mc13892-regulator.c
41982@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
41983 }
41984 mc13xxx_unlock(mc13892);
41985
41986- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41987+ pax_open_kernel();
41988+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
41989 = mc13892_vcam_set_mode;
41990- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41991+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
41992 = mc13892_vcam_get_mode;
41993+ pax_close_kernel();
41994
41995 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
41996 ARRAY_SIZE(mc13892_regulators));
41997diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
41998index f1cb706..4c7832a 100644
41999--- a/drivers/rtc/rtc-cmos.c
42000+++ b/drivers/rtc/rtc-cmos.c
42001@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
42002 hpet_rtc_timer_init();
42003
42004 /* export at least the first block of NVRAM */
42005- nvram.size = address_space - NVRAM_OFFSET;
42006+ pax_open_kernel();
42007+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
42008+ pax_close_kernel();
42009 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
42010 if (retval < 0) {
42011 dev_dbg(dev, "can't create nvram file? %d\n", retval);
42012diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
42013index d049393..bb20be0 100644
42014--- a/drivers/rtc/rtc-dev.c
42015+++ b/drivers/rtc/rtc-dev.c
42016@@ -16,6 +16,7 @@
42017 #include <linux/module.h>
42018 #include <linux/rtc.h>
42019 #include <linux/sched.h>
42020+#include <linux/grsecurity.h>
42021 #include "rtc-core.h"
42022
42023 static dev_t rtc_devt;
42024@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
42025 if (copy_from_user(&tm, uarg, sizeof(tm)))
42026 return -EFAULT;
42027
42028+ gr_log_timechange();
42029+
42030 return rtc_set_time(rtc, &tm);
42031
42032 case RTC_PIE_ON:
42033diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
42034index b53992a..776df84 100644
42035--- a/drivers/rtc/rtc-ds1307.c
42036+++ b/drivers/rtc/rtc-ds1307.c
42037@@ -107,7 +107,7 @@ struct ds1307 {
42038 u8 offset; /* register's offset */
42039 u8 regs[11];
42040 u16 nvram_offset;
42041- struct bin_attribute *nvram;
42042+ bin_attribute_no_const *nvram;
42043 enum ds_type type;
42044 unsigned long flags;
42045 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
42046diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
42047index 130f29a..6179d03 100644
42048--- a/drivers/rtc/rtc-m48t59.c
42049+++ b/drivers/rtc/rtc-m48t59.c
42050@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
42051 goto out;
42052 }
42053
42054- m48t59_nvram_attr.size = pdata->offset;
42055+ pax_open_kernel();
42056+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
42057+ pax_close_kernel();
42058
42059 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
42060 if (ret) {
42061diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
42062index e693af6..2e525b6 100644
42063--- a/drivers/scsi/bfa/bfa_fcpim.h
42064+++ b/drivers/scsi/bfa/bfa_fcpim.h
42065@@ -36,7 +36,7 @@ struct bfa_iotag_s {
42066
42067 struct bfa_itn_s {
42068 bfa_isr_func_t isr;
42069-};
42070+} __no_const;
42071
42072 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
42073 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
42074diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
42075index 23a90e7..9cf04ee 100644
42076--- a/drivers/scsi/bfa/bfa_ioc.h
42077+++ b/drivers/scsi/bfa/bfa_ioc.h
42078@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
42079 bfa_ioc_disable_cbfn_t disable_cbfn;
42080 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
42081 bfa_ioc_reset_cbfn_t reset_cbfn;
42082-};
42083+} __no_const;
42084
42085 /*
42086 * IOC event notification mechanism.
42087@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
42088 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
42089 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
42090 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
42091-};
42092+} __no_const;
42093
42094 /*
42095 * Queue element to wait for room in request queue. FIFO order is
42096diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
42097index df0c3c7..b00e1d0 100644
42098--- a/drivers/scsi/hosts.c
42099+++ b/drivers/scsi/hosts.c
42100@@ -42,7 +42,7 @@
42101 #include "scsi_logging.h"
42102
42103
42104-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42105+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42106
42107
42108 static void scsi_host_cls_release(struct device *dev)
42109@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
42110 * subtract one because we increment first then return, but we need to
42111 * know what the next host number was before increment
42112 */
42113- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
42114+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
42115 shost->dma_channel = 0xff;
42116
42117 /* These three are default values which can be overridden */
42118diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
42119index 7f4f790..b75b92a 100644
42120--- a/drivers/scsi/hpsa.c
42121+++ b/drivers/scsi/hpsa.c
42122@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
42123 unsigned long flags;
42124
42125 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
42126- return h->access.command_completed(h, q);
42127+ return h->access->command_completed(h, q);
42128
42129 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
42130 a = rq->head[rq->current_entry];
42131@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
42132 while (!list_empty(&h->reqQ)) {
42133 c = list_entry(h->reqQ.next, struct CommandList, list);
42134 /* can't do anything if fifo is full */
42135- if ((h->access.fifo_full(h))) {
42136+ if ((h->access->fifo_full(h))) {
42137 dev_warn(&h->pdev->dev, "fifo full\n");
42138 break;
42139 }
42140@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
42141
42142 /* Tell the controller execute command */
42143 spin_unlock_irqrestore(&h->lock, flags);
42144- h->access.submit_command(h, c);
42145+ h->access->submit_command(h, c);
42146 spin_lock_irqsave(&h->lock, flags);
42147 }
42148 spin_unlock_irqrestore(&h->lock, flags);
42149@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
42150
42151 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
42152 {
42153- return h->access.command_completed(h, q);
42154+ return h->access->command_completed(h, q);
42155 }
42156
42157 static inline bool interrupt_pending(struct ctlr_info *h)
42158 {
42159- return h->access.intr_pending(h);
42160+ return h->access->intr_pending(h);
42161 }
42162
42163 static inline long interrupt_not_for_us(struct ctlr_info *h)
42164 {
42165- return (h->access.intr_pending(h) == 0) ||
42166+ return (h->access->intr_pending(h) == 0) ||
42167 (h->interrupts_enabled == 0);
42168 }
42169
42170@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
42171 if (prod_index < 0)
42172 return -ENODEV;
42173 h->product_name = products[prod_index].product_name;
42174- h->access = *(products[prod_index].access);
42175+ h->access = products[prod_index].access;
42176
42177 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
42178 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
42179@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
42180
42181 assert_spin_locked(&lockup_detector_lock);
42182 remove_ctlr_from_lockup_detector_list(h);
42183- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42184+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42185 spin_lock_irqsave(&h->lock, flags);
42186 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
42187 spin_unlock_irqrestore(&h->lock, flags);
42188@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
42189 }
42190
42191 /* make sure the board interrupts are off */
42192- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42193+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42194
42195 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
42196 goto clean2;
42197@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
42198 * fake ones to scoop up any residual completions.
42199 */
42200 spin_lock_irqsave(&h->lock, flags);
42201- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42202+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42203 spin_unlock_irqrestore(&h->lock, flags);
42204 free_irqs(h);
42205 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
42206@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
42207 dev_info(&h->pdev->dev, "Board READY.\n");
42208 dev_info(&h->pdev->dev,
42209 "Waiting for stale completions to drain.\n");
42210- h->access.set_intr_mask(h, HPSA_INTR_ON);
42211+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42212 msleep(10000);
42213- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42214+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42215
42216 rc = controller_reset_failed(h->cfgtable);
42217 if (rc)
42218@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
42219 }
42220
42221 /* Turn the interrupts on so we can service requests */
42222- h->access.set_intr_mask(h, HPSA_INTR_ON);
42223+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42224
42225 hpsa_hba_inquiry(h);
42226 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
42227@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
42228 * To write all data in the battery backed cache to disks
42229 */
42230 hpsa_flush_cache(h);
42231- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42232+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42233 hpsa_free_irqs_and_disable_msix(h);
42234 }
42235
42236@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
42237 return;
42238 }
42239 /* Change the access methods to the performant access methods */
42240- h->access = SA5_performant_access;
42241+ h->access = &SA5_performant_access;
42242 h->transMethod = CFGTBL_Trans_Performant;
42243 }
42244
42245diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
42246index 9816479..c5d4e97 100644
42247--- a/drivers/scsi/hpsa.h
42248+++ b/drivers/scsi/hpsa.h
42249@@ -79,7 +79,7 @@ struct ctlr_info {
42250 unsigned int msix_vector;
42251 unsigned int msi_vector;
42252 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
42253- struct access_method access;
42254+ struct access_method *access;
42255
42256 /* queue and queue Info */
42257 struct list_head reqQ;
42258diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
42259index 8b928c6..9c76300 100644
42260--- a/drivers/scsi/libfc/fc_exch.c
42261+++ b/drivers/scsi/libfc/fc_exch.c
42262@@ -100,12 +100,12 @@ struct fc_exch_mgr {
42263 u16 pool_max_index;
42264
42265 struct {
42266- atomic_t no_free_exch;
42267- atomic_t no_free_exch_xid;
42268- atomic_t xid_not_found;
42269- atomic_t xid_busy;
42270- atomic_t seq_not_found;
42271- atomic_t non_bls_resp;
42272+ atomic_unchecked_t no_free_exch;
42273+ atomic_unchecked_t no_free_exch_xid;
42274+ atomic_unchecked_t xid_not_found;
42275+ atomic_unchecked_t xid_busy;
42276+ atomic_unchecked_t seq_not_found;
42277+ atomic_unchecked_t non_bls_resp;
42278 } stats;
42279 };
42280
42281@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
42282 /* allocate memory for exchange */
42283 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
42284 if (!ep) {
42285- atomic_inc(&mp->stats.no_free_exch);
42286+ atomic_inc_unchecked(&mp->stats.no_free_exch);
42287 goto out;
42288 }
42289 memset(ep, 0, sizeof(*ep));
42290@@ -797,7 +797,7 @@ out:
42291 return ep;
42292 err:
42293 spin_unlock_bh(&pool->lock);
42294- atomic_inc(&mp->stats.no_free_exch_xid);
42295+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
42296 mempool_free(ep, mp->ep_pool);
42297 return NULL;
42298 }
42299@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42300 xid = ntohs(fh->fh_ox_id); /* we originated exch */
42301 ep = fc_exch_find(mp, xid);
42302 if (!ep) {
42303- atomic_inc(&mp->stats.xid_not_found);
42304+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42305 reject = FC_RJT_OX_ID;
42306 goto out;
42307 }
42308@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42309 ep = fc_exch_find(mp, xid);
42310 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
42311 if (ep) {
42312- atomic_inc(&mp->stats.xid_busy);
42313+ atomic_inc_unchecked(&mp->stats.xid_busy);
42314 reject = FC_RJT_RX_ID;
42315 goto rel;
42316 }
42317@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42318 }
42319 xid = ep->xid; /* get our XID */
42320 } else if (!ep) {
42321- atomic_inc(&mp->stats.xid_not_found);
42322+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42323 reject = FC_RJT_RX_ID; /* XID not found */
42324 goto out;
42325 }
42326@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42327 } else {
42328 sp = &ep->seq;
42329 if (sp->id != fh->fh_seq_id) {
42330- atomic_inc(&mp->stats.seq_not_found);
42331+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42332 if (f_ctl & FC_FC_END_SEQ) {
42333 /*
42334 * Update sequence_id based on incoming last
42335@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42336
42337 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
42338 if (!ep) {
42339- atomic_inc(&mp->stats.xid_not_found);
42340+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42341 goto out;
42342 }
42343 if (ep->esb_stat & ESB_ST_COMPLETE) {
42344- atomic_inc(&mp->stats.xid_not_found);
42345+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42346 goto rel;
42347 }
42348 if (ep->rxid == FC_XID_UNKNOWN)
42349 ep->rxid = ntohs(fh->fh_rx_id);
42350 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
42351- atomic_inc(&mp->stats.xid_not_found);
42352+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42353 goto rel;
42354 }
42355 if (ep->did != ntoh24(fh->fh_s_id) &&
42356 ep->did != FC_FID_FLOGI) {
42357- atomic_inc(&mp->stats.xid_not_found);
42358+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42359 goto rel;
42360 }
42361 sof = fr_sof(fp);
42362@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42363 sp->ssb_stat |= SSB_ST_RESP;
42364 sp->id = fh->fh_seq_id;
42365 } else if (sp->id != fh->fh_seq_id) {
42366- atomic_inc(&mp->stats.seq_not_found);
42367+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42368 goto rel;
42369 }
42370
42371@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42372 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
42373
42374 if (!sp)
42375- atomic_inc(&mp->stats.xid_not_found);
42376+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42377 else
42378- atomic_inc(&mp->stats.non_bls_resp);
42379+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
42380
42381 fc_frame_free(fp);
42382 }
42383@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
42384
42385 list_for_each_entry(ema, &lport->ema_list, ema_list) {
42386 mp = ema->mp;
42387- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
42388+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
42389 st->fc_no_free_exch_xid +=
42390- atomic_read(&mp->stats.no_free_exch_xid);
42391- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
42392- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
42393- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
42394- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
42395+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
42396+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
42397+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
42398+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
42399+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
42400 }
42401 }
42402 EXPORT_SYMBOL(fc_exch_update_stats);
42403diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
42404index 161c98e..6d563b3 100644
42405--- a/drivers/scsi/libsas/sas_ata.c
42406+++ b/drivers/scsi/libsas/sas_ata.c
42407@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
42408 .postreset = ata_std_postreset,
42409 .error_handler = ata_std_error_handler,
42410 .post_internal_cmd = sas_ata_post_internal,
42411- .qc_defer = ata_std_qc_defer,
42412+ .qc_defer = ata_std_qc_defer,
42413 .qc_prep = ata_noop_qc_prep,
42414 .qc_issue = sas_ata_qc_issue,
42415 .qc_fill_rtf = sas_ata_qc_fill_rtf,
42416diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
42417index bcc56ca..6f4174a 100644
42418--- a/drivers/scsi/lpfc/lpfc.h
42419+++ b/drivers/scsi/lpfc/lpfc.h
42420@@ -431,7 +431,7 @@ struct lpfc_vport {
42421 struct dentry *debug_nodelist;
42422 struct dentry *vport_debugfs_root;
42423 struct lpfc_debugfs_trc *disc_trc;
42424- atomic_t disc_trc_cnt;
42425+ atomic_unchecked_t disc_trc_cnt;
42426 #endif
42427 uint8_t stat_data_enabled;
42428 uint8_t stat_data_blocked;
42429@@ -865,8 +865,8 @@ struct lpfc_hba {
42430 struct timer_list fabric_block_timer;
42431 unsigned long bit_flags;
42432 #define FABRIC_COMANDS_BLOCKED 0
42433- atomic_t num_rsrc_err;
42434- atomic_t num_cmd_success;
42435+ atomic_unchecked_t num_rsrc_err;
42436+ atomic_unchecked_t num_cmd_success;
42437 unsigned long last_rsrc_error_time;
42438 unsigned long last_ramp_down_time;
42439 unsigned long last_ramp_up_time;
42440@@ -902,7 +902,7 @@ struct lpfc_hba {
42441
42442 struct dentry *debug_slow_ring_trc;
42443 struct lpfc_debugfs_trc *slow_ring_trc;
42444- atomic_t slow_ring_trc_cnt;
42445+ atomic_unchecked_t slow_ring_trc_cnt;
42446 /* iDiag debugfs sub-directory */
42447 struct dentry *idiag_root;
42448 struct dentry *idiag_pci_cfg;
42449diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42450index f525ecb..32549a4 100644
42451--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42452+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42453@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42454
42455 #include <linux/debugfs.h>
42456
42457-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42458+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42459 static unsigned long lpfc_debugfs_start_time = 0L;
42460
42461 /* iDiag */
42462@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42463 lpfc_debugfs_enable = 0;
42464
42465 len = 0;
42466- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42467+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42468 (lpfc_debugfs_max_disc_trc - 1);
42469 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42470 dtp = vport->disc_trc + i;
42471@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42472 lpfc_debugfs_enable = 0;
42473
42474 len = 0;
42475- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42476+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42477 (lpfc_debugfs_max_slow_ring_trc - 1);
42478 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42479 dtp = phba->slow_ring_trc + i;
42480@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42481 !vport || !vport->disc_trc)
42482 return;
42483
42484- index = atomic_inc_return(&vport->disc_trc_cnt) &
42485+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42486 (lpfc_debugfs_max_disc_trc - 1);
42487 dtp = vport->disc_trc + index;
42488 dtp->fmt = fmt;
42489 dtp->data1 = data1;
42490 dtp->data2 = data2;
42491 dtp->data3 = data3;
42492- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42493+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42494 dtp->jif = jiffies;
42495 #endif
42496 return;
42497@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42498 !phba || !phba->slow_ring_trc)
42499 return;
42500
42501- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42502+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42503 (lpfc_debugfs_max_slow_ring_trc - 1);
42504 dtp = phba->slow_ring_trc + index;
42505 dtp->fmt = fmt;
42506 dtp->data1 = data1;
42507 dtp->data2 = data2;
42508 dtp->data3 = data3;
42509- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42510+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42511 dtp->jif = jiffies;
42512 #endif
42513 return;
42514@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42515 "slow_ring buffer\n");
42516 goto debug_failed;
42517 }
42518- atomic_set(&phba->slow_ring_trc_cnt, 0);
42519+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42520 memset(phba->slow_ring_trc, 0,
42521 (sizeof(struct lpfc_debugfs_trc) *
42522 lpfc_debugfs_max_slow_ring_trc));
42523@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42524 "buffer\n");
42525 goto debug_failed;
42526 }
42527- atomic_set(&vport->disc_trc_cnt, 0);
42528+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42529
42530 snprintf(name, sizeof(name), "discovery_trace");
42531 vport->debug_disc_trc =
42532diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42533index cb465b2..2e7b25f 100644
42534--- a/drivers/scsi/lpfc/lpfc_init.c
42535+++ b/drivers/scsi/lpfc/lpfc_init.c
42536@@ -10950,8 +10950,10 @@ lpfc_init(void)
42537 "misc_register returned with status %d", error);
42538
42539 if (lpfc_enable_npiv) {
42540- lpfc_transport_functions.vport_create = lpfc_vport_create;
42541- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42542+ pax_open_kernel();
42543+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42544+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42545+ pax_close_kernel();
42546 }
42547 lpfc_transport_template =
42548 fc_attach_transport(&lpfc_transport_functions);
42549diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42550index 8523b278e..ce1d812 100644
42551--- a/drivers/scsi/lpfc/lpfc_scsi.c
42552+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42553@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42554 uint32_t evt_posted;
42555
42556 spin_lock_irqsave(&phba->hbalock, flags);
42557- atomic_inc(&phba->num_rsrc_err);
42558+ atomic_inc_unchecked(&phba->num_rsrc_err);
42559 phba->last_rsrc_error_time = jiffies;
42560
42561 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42562@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42563 unsigned long flags;
42564 struct lpfc_hba *phba = vport->phba;
42565 uint32_t evt_posted;
42566- atomic_inc(&phba->num_cmd_success);
42567+ atomic_inc_unchecked(&phba->num_cmd_success);
42568
42569 if (vport->cfg_lun_queue_depth <= queue_depth)
42570 return;
42571@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42572 unsigned long num_rsrc_err, num_cmd_success;
42573 int i;
42574
42575- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42576- num_cmd_success = atomic_read(&phba->num_cmd_success);
42577+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42578+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42579
42580 /*
42581 * The error and success command counters are global per
42582@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42583 }
42584 }
42585 lpfc_destroy_vport_work_array(phba, vports);
42586- atomic_set(&phba->num_rsrc_err, 0);
42587- atomic_set(&phba->num_cmd_success, 0);
42588+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42589+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42590 }
42591
42592 /**
42593@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42594 }
42595 }
42596 lpfc_destroy_vport_work_array(phba, vports);
42597- atomic_set(&phba->num_rsrc_err, 0);
42598- atomic_set(&phba->num_cmd_success, 0);
42599+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42600+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42601 }
42602
42603 /**
42604diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42605index 8e1b737..50ff510 100644
42606--- a/drivers/scsi/pmcraid.c
42607+++ b/drivers/scsi/pmcraid.c
42608@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42609 res->scsi_dev = scsi_dev;
42610 scsi_dev->hostdata = res;
42611 res->change_detected = 0;
42612- atomic_set(&res->read_failures, 0);
42613- atomic_set(&res->write_failures, 0);
42614+ atomic_set_unchecked(&res->read_failures, 0);
42615+ atomic_set_unchecked(&res->write_failures, 0);
42616 rc = 0;
42617 }
42618 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42619@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
42620
42621 /* If this was a SCSI read/write command keep count of errors */
42622 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
42623- atomic_inc(&res->read_failures);
42624+ atomic_inc_unchecked(&res->read_failures);
42625 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
42626- atomic_inc(&res->write_failures);
42627+ atomic_inc_unchecked(&res->write_failures);
42628
42629 if (!RES_IS_GSCSI(res->cfg_entry) &&
42630 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42631@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
42632 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42633 * hrrq_id assigned here in queuecommand
42634 */
42635- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42636+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42637 pinstance->num_hrrq;
42638 cmd->cmd_done = pmcraid_io_done;
42639
42640@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
42641 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42642 * hrrq_id assigned here in queuecommand
42643 */
42644- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42645+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42646 pinstance->num_hrrq;
42647
42648 if (request_size) {
42649@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42650
42651 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42652 /* add resources only after host is added into system */
42653- if (!atomic_read(&pinstance->expose_resources))
42654+ if (!atomic_read_unchecked(&pinstance->expose_resources))
42655 return;
42656
42657 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
42658@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
42659 init_waitqueue_head(&pinstance->reset_wait_q);
42660
42661 atomic_set(&pinstance->outstanding_cmds, 0);
42662- atomic_set(&pinstance->last_message_id, 0);
42663- atomic_set(&pinstance->expose_resources, 0);
42664+ atomic_set_unchecked(&pinstance->last_message_id, 0);
42665+ atomic_set_unchecked(&pinstance->expose_resources, 0);
42666
42667 INIT_LIST_HEAD(&pinstance->free_res_q);
42668 INIT_LIST_HEAD(&pinstance->used_res_q);
42669@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
42670 /* Schedule worker thread to handle CCN and take care of adding and
42671 * removing devices to OS
42672 */
42673- atomic_set(&pinstance->expose_resources, 1);
42674+ atomic_set_unchecked(&pinstance->expose_resources, 1);
42675 schedule_work(&pinstance->worker_q);
42676 return rc;
42677
42678diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42679index e1d150f..6c6df44 100644
42680--- a/drivers/scsi/pmcraid.h
42681+++ b/drivers/scsi/pmcraid.h
42682@@ -748,7 +748,7 @@ struct pmcraid_instance {
42683 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
42684
42685 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
42686- atomic_t last_message_id;
42687+ atomic_unchecked_t last_message_id;
42688
42689 /* configuration table */
42690 struct pmcraid_config_table *cfg_table;
42691@@ -777,7 +777,7 @@ struct pmcraid_instance {
42692 atomic_t outstanding_cmds;
42693
42694 /* should add/delete resources to mid-layer now ?*/
42695- atomic_t expose_resources;
42696+ atomic_unchecked_t expose_resources;
42697
42698
42699
42700@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
42701 struct pmcraid_config_table_entry_ext cfg_entry_ext;
42702 };
42703 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42704- atomic_t read_failures; /* count of failed READ commands */
42705- atomic_t write_failures; /* count of failed WRITE commands */
42706+ atomic_unchecked_t read_failures; /* count of failed READ commands */
42707+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42708
42709 /* To indicate add/delete/modify during CCN */
42710 u8 change_detected;
42711diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
42712index bf60c63..74d4dce 100644
42713--- a/drivers/scsi/qla2xxx/qla_attr.c
42714+++ b/drivers/scsi/qla2xxx/qla_attr.c
42715@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
42716 return 0;
42717 }
42718
42719-struct fc_function_template qla2xxx_transport_functions = {
42720+fc_function_template_no_const qla2xxx_transport_functions = {
42721
42722 .show_host_node_name = 1,
42723 .show_host_port_name = 1,
42724@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
42725 .bsg_timeout = qla24xx_bsg_timeout,
42726 };
42727
42728-struct fc_function_template qla2xxx_transport_vport_functions = {
42729+fc_function_template_no_const qla2xxx_transport_vport_functions = {
42730
42731 .show_host_node_name = 1,
42732 .show_host_port_name = 1,
42733diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
42734index 026bfde..90c4018 100644
42735--- a/drivers/scsi/qla2xxx/qla_gbl.h
42736+++ b/drivers/scsi/qla2xxx/qla_gbl.h
42737@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
42738 struct device_attribute;
42739 extern struct device_attribute *qla2x00_host_attrs[];
42740 struct fc_function_template;
42741-extern struct fc_function_template qla2xxx_transport_functions;
42742-extern struct fc_function_template qla2xxx_transport_vport_functions;
42743+extern fc_function_template_no_const qla2xxx_transport_functions;
42744+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
42745 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
42746 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
42747 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
42748diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
42749index ad72c1d..afc9a98 100644
42750--- a/drivers/scsi/qla2xxx/qla_os.c
42751+++ b/drivers/scsi/qla2xxx/qla_os.c
42752@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
42753 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
42754 /* Ok, a 64bit DMA mask is applicable. */
42755 ha->flags.enable_64bit_addressing = 1;
42756- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42757- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42758+ pax_open_kernel();
42759+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42760+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42761+ pax_close_kernel();
42762 return;
42763 }
42764 }
42765diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42766index ddf16a8..80f4dd0 100644
42767--- a/drivers/scsi/qla4xxx/ql4_def.h
42768+++ b/drivers/scsi/qla4xxx/ql4_def.h
42769@@ -291,7 +291,7 @@ struct ddb_entry {
42770 * (4000 only) */
42771 atomic_t relogin_timer; /* Max Time to wait for
42772 * relogin to complete */
42773- atomic_t relogin_retry_count; /* Num of times relogin has been
42774+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42775 * retried */
42776 uint32_t default_time2wait; /* Default Min time between
42777 * relogins (+aens) */
42778diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42779index 4d231c1..2892c37 100644
42780--- a/drivers/scsi/qla4xxx/ql4_os.c
42781+++ b/drivers/scsi/qla4xxx/ql4_os.c
42782@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42783 */
42784 if (!iscsi_is_session_online(cls_sess)) {
42785 /* Reset retry relogin timer */
42786- atomic_inc(&ddb_entry->relogin_retry_count);
42787+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42788 DEBUG2(ql4_printk(KERN_INFO, ha,
42789 "%s: index[%d] relogin timed out-retrying"
42790 " relogin (%d), retry (%d)\n", __func__,
42791 ddb_entry->fw_ddb_index,
42792- atomic_read(&ddb_entry->relogin_retry_count),
42793+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42794 ddb_entry->default_time2wait + 4));
42795 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42796 atomic_set(&ddb_entry->retry_relogin_timer,
42797@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42798
42799 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42800 atomic_set(&ddb_entry->relogin_timer, 0);
42801- atomic_set(&ddb_entry->relogin_retry_count, 0);
42802+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42803 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42804 ddb_entry->default_relogin_timeout =
42805 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42806diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42807index 2c0d0ec..4e8681a 100644
42808--- a/drivers/scsi/scsi.c
42809+++ b/drivers/scsi/scsi.c
42810@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42811 unsigned long timeout;
42812 int rtn = 0;
42813
42814- atomic_inc(&cmd->device->iorequest_cnt);
42815+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42816
42817 /* check if the device is still usable */
42818 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42819diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42820index 86d5220..f22c51a 100644
42821--- a/drivers/scsi/scsi_lib.c
42822+++ b/drivers/scsi/scsi_lib.c
42823@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42824 shost = sdev->host;
42825 scsi_init_cmd_errh(cmd);
42826 cmd->result = DID_NO_CONNECT << 16;
42827- atomic_inc(&cmd->device->iorequest_cnt);
42828+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42829
42830 /*
42831 * SCSI request completion path will do scsi_device_unbusy(),
42832@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
42833
42834 INIT_LIST_HEAD(&cmd->eh_entry);
42835
42836- atomic_inc(&cmd->device->iodone_cnt);
42837+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
42838 if (cmd->result)
42839- atomic_inc(&cmd->device->ioerr_cnt);
42840+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42841
42842 disposition = scsi_decide_disposition(cmd);
42843 if (disposition != SUCCESS &&
42844diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42845index 931a7d9..0c2a754 100644
42846--- a/drivers/scsi/scsi_sysfs.c
42847+++ b/drivers/scsi/scsi_sysfs.c
42848@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42849 char *buf) \
42850 { \
42851 struct scsi_device *sdev = to_scsi_device(dev); \
42852- unsigned long long count = atomic_read(&sdev->field); \
42853+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
42854 return snprintf(buf, 20, "0x%llx\n", count); \
42855 } \
42856 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42857diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42858index 84a1fdf..693b0d6 100644
42859--- a/drivers/scsi/scsi_tgt_lib.c
42860+++ b/drivers/scsi/scsi_tgt_lib.c
42861@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42862 int err;
42863
42864 dprintk("%lx %u\n", uaddr, len);
42865- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42866+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42867 if (err) {
42868 /*
42869 * TODO: need to fixup sg_tablesize, max_segment_size,
42870diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42871index e106c27..11a380e 100644
42872--- a/drivers/scsi/scsi_transport_fc.c
42873+++ b/drivers/scsi/scsi_transport_fc.c
42874@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42875 * Netlink Infrastructure
42876 */
42877
42878-static atomic_t fc_event_seq;
42879+static atomic_unchecked_t fc_event_seq;
42880
42881 /**
42882 * fc_get_event_number - Obtain the next sequential FC event number
42883@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
42884 u32
42885 fc_get_event_number(void)
42886 {
42887- return atomic_add_return(1, &fc_event_seq);
42888+ return atomic_add_return_unchecked(1, &fc_event_seq);
42889 }
42890 EXPORT_SYMBOL(fc_get_event_number);
42891
42892@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
42893 {
42894 int error;
42895
42896- atomic_set(&fc_event_seq, 0);
42897+ atomic_set_unchecked(&fc_event_seq, 0);
42898
42899 error = transport_class_register(&fc_host_class);
42900 if (error)
42901@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42902 char *cp;
42903
42904 *val = simple_strtoul(buf, &cp, 0);
42905- if ((*cp && (*cp != '\n')) || (*val < 0))
42906+ if (*cp && (*cp != '\n'))
42907 return -EINVAL;
42908 /*
42909 * Check for overflow; dev_loss_tmo is u32
42910diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42911index 133926b..903000d 100644
42912--- a/drivers/scsi/scsi_transport_iscsi.c
42913+++ b/drivers/scsi/scsi_transport_iscsi.c
42914@@ -80,7 +80,7 @@ struct iscsi_internal {
42915 struct transport_container session_cont;
42916 };
42917
42918-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42919+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42920 static struct workqueue_struct *iscsi_eh_timer_workq;
42921
42922 static DEFINE_IDA(iscsi_sess_ida);
42923@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42924 int err;
42925
42926 ihost = shost->shost_data;
42927- session->sid = atomic_add_return(1, &iscsi_session_nr);
42928+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42929
42930 if (target_id == ISCSI_MAX_TARGET) {
42931 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42932@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
42933 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42934 ISCSI_TRANSPORT_VERSION);
42935
42936- atomic_set(&iscsi_session_nr, 0);
42937+ atomic_set_unchecked(&iscsi_session_nr, 0);
42938
42939 err = class_register(&iscsi_transport_class);
42940 if (err)
42941diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42942index f379c7f..e8fc69c 100644
42943--- a/drivers/scsi/scsi_transport_srp.c
42944+++ b/drivers/scsi/scsi_transport_srp.c
42945@@ -33,7 +33,7 @@
42946 #include "scsi_transport_srp_internal.h"
42947
42948 struct srp_host_attrs {
42949- atomic_t next_port_id;
42950+ atomic_unchecked_t next_port_id;
42951 };
42952 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42953
42954@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42955 struct Scsi_Host *shost = dev_to_shost(dev);
42956 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42957
42958- atomic_set(&srp_host->next_port_id, 0);
42959+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42960 return 0;
42961 }
42962
42963@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
42964 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
42965 rport->roles = ids->roles;
42966
42967- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
42968+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
42969 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
42970
42971 transport_setup_device(&rport->dev);
42972diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
42973index 6f6a1b4..80704a9 100644
42974--- a/drivers/scsi/sd.c
42975+++ b/drivers/scsi/sd.c
42976@@ -2918,7 +2918,7 @@ static int sd_probe(struct device *dev)
42977 sdkp->disk = gd;
42978 sdkp->index = index;
42979 atomic_set(&sdkp->openers, 0);
42980- atomic_set(&sdkp->device->ioerr_cnt, 0);
42981+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
42982
42983 if (!sdp->request_queue->rq_timeout) {
42984 if (sdp->type != TYPE_MOD)
42985diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
42986index df5e961..df6b97f 100644
42987--- a/drivers/scsi/sg.c
42988+++ b/drivers/scsi/sg.c
42989@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
42990 sdp->disk->disk_name,
42991 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
42992 NULL,
42993- (char *)arg);
42994+ (char __user *)arg);
42995 case BLKTRACESTART:
42996 return blk_trace_startstop(sdp->device->request_queue, 1);
42997 case BLKTRACESTOP:
42998diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
42999index 32b7bb1..2f1c4bd 100644
43000--- a/drivers/spi/spi.c
43001+++ b/drivers/spi/spi.c
43002@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
43003 EXPORT_SYMBOL_GPL(spi_bus_unlock);
43004
43005 /* portable code must never pass more than 32 bytes */
43006-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
43007+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
43008
43009 static u8 *buf;
43010
43011diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
43012index 3675020..e80d92c 100644
43013--- a/drivers/staging/media/solo6x10/solo6x10-core.c
43014+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
43015@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
43016
43017 static int solo_sysfs_init(struct solo_dev *solo_dev)
43018 {
43019- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
43020+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
43021 struct device *dev = &solo_dev->dev;
43022 const char *driver;
43023 int i;
43024diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
43025index 34afc16..ffe44dd 100644
43026--- a/drivers/staging/octeon/ethernet-rx.c
43027+++ b/drivers/staging/octeon/ethernet-rx.c
43028@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
43029 /* Increment RX stats for virtual ports */
43030 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
43031 #ifdef CONFIG_64BIT
43032- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
43033- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
43034+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
43035+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
43036 #else
43037- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
43038- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
43039+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
43040+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
43041 #endif
43042 }
43043 netif_receive_skb(skb);
43044@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
43045 dev->name);
43046 */
43047 #ifdef CONFIG_64BIT
43048- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
43049+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43050 #else
43051- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
43052+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
43053 #endif
43054 dev_kfree_skb_irq(skb);
43055 }
43056diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
43057index c3a90e7..023619a 100644
43058--- a/drivers/staging/octeon/ethernet.c
43059+++ b/drivers/staging/octeon/ethernet.c
43060@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
43061 * since the RX tasklet also increments it.
43062 */
43063 #ifdef CONFIG_64BIT
43064- atomic64_add(rx_status.dropped_packets,
43065- (atomic64_t *)&priv->stats.rx_dropped);
43066+ atomic64_add_unchecked(rx_status.dropped_packets,
43067+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43068 #else
43069- atomic_add(rx_status.dropped_packets,
43070- (atomic_t *)&priv->stats.rx_dropped);
43071+ atomic_add_unchecked(rx_status.dropped_packets,
43072+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
43073 #endif
43074 }
43075
43076diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
43077index dc23395..cf7e9b1 100644
43078--- a/drivers/staging/rtl8712/rtl871x_io.h
43079+++ b/drivers/staging/rtl8712/rtl871x_io.h
43080@@ -108,7 +108,7 @@ struct _io_ops {
43081 u8 *pmem);
43082 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
43083 u8 *pmem);
43084-};
43085+} __no_const;
43086
43087 struct io_req {
43088 struct list_head list;
43089diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
43090index 1f5088b..0e59820 100644
43091--- a/drivers/staging/sbe-2t3e3/netdev.c
43092+++ b/drivers/staging/sbe-2t3e3/netdev.c
43093@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43094 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
43095
43096 if (rlen)
43097- if (copy_to_user(data, &resp, rlen))
43098+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
43099 return -EFAULT;
43100
43101 return 0;
43102diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
43103index a863a98..d272795 100644
43104--- a/drivers/staging/usbip/vhci.h
43105+++ b/drivers/staging/usbip/vhci.h
43106@@ -83,7 +83,7 @@ struct vhci_hcd {
43107 unsigned resuming:1;
43108 unsigned long re_timeout;
43109
43110- atomic_t seqnum;
43111+ atomic_unchecked_t seqnum;
43112
43113 /*
43114 * NOTE:
43115diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
43116index d7974cb..d78076b 100644
43117--- a/drivers/staging/usbip/vhci_hcd.c
43118+++ b/drivers/staging/usbip/vhci_hcd.c
43119@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
43120
43121 spin_lock(&vdev->priv_lock);
43122
43123- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
43124+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43125 if (priv->seqnum == 0xffff)
43126 dev_info(&urb->dev->dev, "seqnum max\n");
43127
43128@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
43129 return -ENOMEM;
43130 }
43131
43132- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
43133+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43134 if (unlink->seqnum == 0xffff)
43135 pr_info("seqnum max\n");
43136
43137@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
43138 vdev->rhport = rhport;
43139 }
43140
43141- atomic_set(&vhci->seqnum, 0);
43142+ atomic_set_unchecked(&vhci->seqnum, 0);
43143 spin_lock_init(&vhci->lock);
43144
43145 hcd->power_budget = 0; /* no limit */
43146diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
43147index d07fcb5..358e1e1 100644
43148--- a/drivers/staging/usbip/vhci_rx.c
43149+++ b/drivers/staging/usbip/vhci_rx.c
43150@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
43151 if (!urb) {
43152 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
43153 pr_info("max seqnum %d\n",
43154- atomic_read(&the_controller->seqnum));
43155+ atomic_read_unchecked(&the_controller->seqnum));
43156 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
43157 return;
43158 }
43159diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
43160index 8417c2f..ef5ebd6 100644
43161--- a/drivers/staging/vt6655/hostap.c
43162+++ b/drivers/staging/vt6655/hostap.c
43163@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
43164 *
43165 */
43166
43167+static net_device_ops_no_const apdev_netdev_ops;
43168+
43169 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43170 {
43171 PSDevice apdev_priv;
43172 struct net_device *dev = pDevice->dev;
43173 int ret;
43174- const struct net_device_ops apdev_netdev_ops = {
43175- .ndo_start_xmit = pDevice->tx_80211,
43176- };
43177
43178 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43179
43180@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43181 *apdev_priv = *pDevice;
43182 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43183
43184+ /* only half broken now */
43185+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43186 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43187
43188 pDevice->apdev->type = ARPHRD_IEEE80211;
43189diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43190index c699a30..b90a5fd 100644
43191--- a/drivers/staging/vt6656/hostap.c
43192+++ b/drivers/staging/vt6656/hostap.c
43193@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
43194 *
43195 */
43196
43197+static net_device_ops_no_const apdev_netdev_ops;
43198+
43199 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
43200 {
43201 struct vnt_private *apdev_priv;
43202 struct net_device *dev = pDevice->dev;
43203 int ret;
43204- const struct net_device_ops apdev_netdev_ops = {
43205- .ndo_start_xmit = pDevice->tx_80211,
43206- };
43207
43208 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43209
43210@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
43211 *apdev_priv = *pDevice;
43212 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43213
43214+ /* only half broken now */
43215+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43216 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43217
43218 pDevice->apdev->type = ARPHRD_IEEE80211;
43219diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43220index d7e51e4..d07eaab 100644
43221--- a/drivers/staging/zcache/tmem.c
43222+++ b/drivers/staging/zcache/tmem.c
43223@@ -51,7 +51,7 @@
43224 * A tmem host implementation must use this function to register callbacks
43225 * for memory allocation.
43226 */
43227-static struct tmem_hostops tmem_hostops;
43228+static tmem_hostops_no_const tmem_hostops;
43229
43230 static void tmem_objnode_tree_init(void);
43231
43232@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43233 * A tmem host implementation must use this function to register
43234 * callbacks for a page-accessible memory (PAM) implementation.
43235 */
43236-static struct tmem_pamops tmem_pamops;
43237+static tmem_pamops_no_const tmem_pamops;
43238
43239 void tmem_register_pamops(struct tmem_pamops *m)
43240 {
43241diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43242index d128ce2..a43980c 100644
43243--- a/drivers/staging/zcache/tmem.h
43244+++ b/drivers/staging/zcache/tmem.h
43245@@ -226,6 +226,7 @@ struct tmem_pamops {
43246 int (*replace_in_obj)(void *, struct tmem_obj *);
43247 #endif
43248 };
43249+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43250 extern void tmem_register_pamops(struct tmem_pamops *m);
43251
43252 /* memory allocation methods provided by the host implementation */
43253@@ -235,6 +236,7 @@ struct tmem_hostops {
43254 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43255 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43256 };
43257+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43258 extern void tmem_register_hostops(struct tmem_hostops *m);
43259
43260 /* core tmem accessor functions */
43261diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43262index 4630481..c26782a 100644
43263--- a/drivers/target/target_core_device.c
43264+++ b/drivers/target/target_core_device.c
43265@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43266 spin_lock_init(&dev->se_port_lock);
43267 spin_lock_init(&dev->se_tmr_lock);
43268 spin_lock_init(&dev->qf_cmd_lock);
43269- atomic_set(&dev->dev_ordered_id, 0);
43270+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43271 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43272 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43273 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43274diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43275index 21e3158..43c6004 100644
43276--- a/drivers/target/target_core_transport.c
43277+++ b/drivers/target/target_core_transport.c
43278@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43279 * Used to determine when ORDERED commands should go from
43280 * Dormant to Active status.
43281 */
43282- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43283+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43284 smp_mb__after_atomic_inc();
43285 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43286 cmd->se_ordered_id, cmd->sam_task_attr,
43287diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43288index 33f83fe..d80f8e1 100644
43289--- a/drivers/tty/cyclades.c
43290+++ b/drivers/tty/cyclades.c
43291@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43292 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43293 info->port.count);
43294 #endif
43295- info->port.count++;
43296+ atomic_inc(&info->port.count);
43297 #ifdef CY_DEBUG_COUNT
43298 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43299- current->pid, info->port.count);
43300+ current->pid, atomic_read(&info->port.count));
43301 #endif
43302
43303 /*
43304@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43305 for (j = 0; j < cy_card[i].nports; j++) {
43306 info = &cy_card[i].ports[j];
43307
43308- if (info->port.count) {
43309+ if (atomic_read(&info->port.count)) {
43310 /* XXX is the ldisc num worth this? */
43311 struct tty_struct *tty;
43312 struct tty_ldisc *ld;
43313diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43314index eb255e8..f637a57 100644
43315--- a/drivers/tty/hvc/hvc_console.c
43316+++ b/drivers/tty/hvc/hvc_console.c
43317@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43318
43319 spin_lock_irqsave(&hp->port.lock, flags);
43320 /* Check and then increment for fast path open. */
43321- if (hp->port.count++ > 0) {
43322+ if (atomic_inc_return(&hp->port.count) > 1) {
43323 spin_unlock_irqrestore(&hp->port.lock, flags);
43324 hvc_kick();
43325 return 0;
43326@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43327
43328 spin_lock_irqsave(&hp->port.lock, flags);
43329
43330- if (--hp->port.count == 0) {
43331+ if (atomic_dec_return(&hp->port.count) == 0) {
43332 spin_unlock_irqrestore(&hp->port.lock, flags);
43333 /* We are done with the tty pointer now. */
43334 tty_port_tty_set(&hp->port, NULL);
43335@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43336 */
43337 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43338 } else {
43339- if (hp->port.count < 0)
43340+ if (atomic_read(&hp->port.count) < 0)
43341 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43342- hp->vtermno, hp->port.count);
43343+ hp->vtermno, atomic_read(&hp->port.count));
43344 spin_unlock_irqrestore(&hp->port.lock, flags);
43345 }
43346 }
43347@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43348 * open->hangup case this can be called after the final close so prevent
43349 * that from happening for now.
43350 */
43351- if (hp->port.count <= 0) {
43352+ if (atomic_read(&hp->port.count) <= 0) {
43353 spin_unlock_irqrestore(&hp->port.lock, flags);
43354 return;
43355 }
43356
43357- hp->port.count = 0;
43358+ atomic_set(&hp->port.count, 0);
43359 spin_unlock_irqrestore(&hp->port.lock, flags);
43360 tty_port_tty_set(&hp->port, NULL);
43361
43362@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43363 return -EPIPE;
43364
43365 /* FIXME what's this (unprotected) check for? */
43366- if (hp->port.count <= 0)
43367+ if (atomic_read(&hp->port.count) <= 0)
43368 return -EIO;
43369
43370 spin_lock_irqsave(&hp->lock, flags);
43371diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43372index 81e939e..95ead10 100644
43373--- a/drivers/tty/hvc/hvcs.c
43374+++ b/drivers/tty/hvc/hvcs.c
43375@@ -83,6 +83,7 @@
43376 #include <asm/hvcserver.h>
43377 #include <asm/uaccess.h>
43378 #include <asm/vio.h>
43379+#include <asm/local.h>
43380
43381 /*
43382 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43383@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43384
43385 spin_lock_irqsave(&hvcsd->lock, flags);
43386
43387- if (hvcsd->port.count > 0) {
43388+ if (atomic_read(&hvcsd->port.count) > 0) {
43389 spin_unlock_irqrestore(&hvcsd->lock, flags);
43390 printk(KERN_INFO "HVCS: vterm state unchanged. "
43391 "The hvcs device node is still in use.\n");
43392@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43393 }
43394 }
43395
43396- hvcsd->port.count = 0;
43397+ atomic_set(&hvcsd->port.count, 0);
43398 hvcsd->port.tty = tty;
43399 tty->driver_data = hvcsd;
43400
43401@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
43402 unsigned long flags;
43403
43404 spin_lock_irqsave(&hvcsd->lock, flags);
43405- hvcsd->port.count++;
43406+ atomic_inc(&hvcsd->port.count);
43407 hvcsd->todo_mask |= HVCS_SCHED_READ;
43408 spin_unlock_irqrestore(&hvcsd->lock, flags);
43409
43410@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43411 hvcsd = tty->driver_data;
43412
43413 spin_lock_irqsave(&hvcsd->lock, flags);
43414- if (--hvcsd->port.count == 0) {
43415+ if (atomic_dec_and_test(&hvcsd->port.count)) {
43416
43417 vio_disable_interrupts(hvcsd->vdev);
43418
43419@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43420
43421 free_irq(irq, hvcsd);
43422 return;
43423- } else if (hvcsd->port.count < 0) {
43424+ } else if (atomic_read(&hvcsd->port.count) < 0) {
43425 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
43426 " is missmanaged.\n",
43427- hvcsd->vdev->unit_address, hvcsd->port.count);
43428+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
43429 }
43430
43431 spin_unlock_irqrestore(&hvcsd->lock, flags);
43432@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43433
43434 spin_lock_irqsave(&hvcsd->lock, flags);
43435 /* Preserve this so that we know how many kref refs to put */
43436- temp_open_count = hvcsd->port.count;
43437+ temp_open_count = atomic_read(&hvcsd->port.count);
43438
43439 /*
43440 * Don't kref put inside the spinlock because the destruction
43441@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43442 tty->driver_data = NULL;
43443 hvcsd->port.tty = NULL;
43444
43445- hvcsd->port.count = 0;
43446+ atomic_set(&hvcsd->port.count, 0);
43447
43448 /* This will drop any buffered data on the floor which is OK in a hangup
43449 * scenario. */
43450@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
43451 * the middle of a write operation? This is a crummy place to do this
43452 * but we want to keep it all in the spinlock.
43453 */
43454- if (hvcsd->port.count <= 0) {
43455+ if (atomic_read(&hvcsd->port.count) <= 0) {
43456 spin_unlock_irqrestore(&hvcsd->lock, flags);
43457 return -ENODEV;
43458 }
43459@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
43460 {
43461 struct hvcs_struct *hvcsd = tty->driver_data;
43462
43463- if (!hvcsd || hvcsd->port.count <= 0)
43464+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
43465 return 0;
43466
43467 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
43468diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
43469index 8fd72ff..34a0bed 100644
43470--- a/drivers/tty/ipwireless/tty.c
43471+++ b/drivers/tty/ipwireless/tty.c
43472@@ -29,6 +29,7 @@
43473 #include <linux/tty_driver.h>
43474 #include <linux/tty_flip.h>
43475 #include <linux/uaccess.h>
43476+#include <asm/local.h>
43477
43478 #include "tty.h"
43479 #include "network.h"
43480@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43481 mutex_unlock(&tty->ipw_tty_mutex);
43482 return -ENODEV;
43483 }
43484- if (tty->port.count == 0)
43485+ if (atomic_read(&tty->port.count) == 0)
43486 tty->tx_bytes_queued = 0;
43487
43488- tty->port.count++;
43489+ atomic_inc(&tty->port.count);
43490
43491 tty->port.tty = linux_tty;
43492 linux_tty->driver_data = tty;
43493@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43494
43495 static void do_ipw_close(struct ipw_tty *tty)
43496 {
43497- tty->port.count--;
43498-
43499- if (tty->port.count == 0) {
43500+ if (atomic_dec_return(&tty->port.count) == 0) {
43501 struct tty_struct *linux_tty = tty->port.tty;
43502
43503 if (linux_tty != NULL) {
43504@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
43505 return;
43506
43507 mutex_lock(&tty->ipw_tty_mutex);
43508- if (tty->port.count == 0) {
43509+ if (atomic_read(&tty->port.count) == 0) {
43510 mutex_unlock(&tty->ipw_tty_mutex);
43511 return;
43512 }
43513@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
43514
43515 mutex_lock(&tty->ipw_tty_mutex);
43516
43517- if (!tty->port.count) {
43518+ if (!atomic_read(&tty->port.count)) {
43519 mutex_unlock(&tty->ipw_tty_mutex);
43520 return;
43521 }
43522@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
43523 return -ENODEV;
43524
43525 mutex_lock(&tty->ipw_tty_mutex);
43526- if (!tty->port.count) {
43527+ if (!atomic_read(&tty->port.count)) {
43528 mutex_unlock(&tty->ipw_tty_mutex);
43529 return -EINVAL;
43530 }
43531@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
43532 if (!tty)
43533 return -ENODEV;
43534
43535- if (!tty->port.count)
43536+ if (!atomic_read(&tty->port.count))
43537 return -EINVAL;
43538
43539 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
43540@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
43541 if (!tty)
43542 return 0;
43543
43544- if (!tty->port.count)
43545+ if (!atomic_read(&tty->port.count))
43546 return 0;
43547
43548 return tty->tx_bytes_queued;
43549@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
43550 if (!tty)
43551 return -ENODEV;
43552
43553- if (!tty->port.count)
43554+ if (!atomic_read(&tty->port.count))
43555 return -EINVAL;
43556
43557 return get_control_lines(tty);
43558@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
43559 if (!tty)
43560 return -ENODEV;
43561
43562- if (!tty->port.count)
43563+ if (!atomic_read(&tty->port.count))
43564 return -EINVAL;
43565
43566 return set_control_lines(tty, set, clear);
43567@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
43568 if (!tty)
43569 return -ENODEV;
43570
43571- if (!tty->port.count)
43572+ if (!atomic_read(&tty->port.count))
43573 return -EINVAL;
43574
43575 /* FIXME: Exactly how is the tty object locked here .. */
43576@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
43577 * are gone */
43578 mutex_lock(&ttyj->ipw_tty_mutex);
43579 }
43580- while (ttyj->port.count)
43581+ while (atomic_read(&ttyj->port.count))
43582 do_ipw_close(ttyj);
43583 ipwireless_disassociate_network_ttys(network,
43584 ttyj->channel_idx);
43585diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
43586index 1deaca4..c8582d4 100644
43587--- a/drivers/tty/moxa.c
43588+++ b/drivers/tty/moxa.c
43589@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
43590 }
43591
43592 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
43593- ch->port.count++;
43594+ atomic_inc(&ch->port.count);
43595 tty->driver_data = ch;
43596 tty_port_tty_set(&ch->port, tty);
43597 mutex_lock(&ch->port.mutex);
43598diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
43599index 6422390..49003ac8 100644
43600--- a/drivers/tty/n_gsm.c
43601+++ b/drivers/tty/n_gsm.c
43602@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
43603 spin_lock_init(&dlci->lock);
43604 mutex_init(&dlci->mutex);
43605 dlci->fifo = &dlci->_fifo;
43606- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
43607+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
43608 kfree(dlci);
43609 return NULL;
43610 }
43611@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
43612 struct gsm_dlci *dlci = tty->driver_data;
43613 struct tty_port *port = &dlci->port;
43614
43615- port->count++;
43616+ atomic_inc(&port->count);
43617 dlci_get(dlci);
43618 dlci_get(dlci->gsm->dlci[0]);
43619 mux_get(dlci->gsm);
43620diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
43621index 6c7fe90..9241dab 100644
43622--- a/drivers/tty/n_tty.c
43623+++ b/drivers/tty/n_tty.c
43624@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
43625 {
43626 *ops = tty_ldisc_N_TTY;
43627 ops->owner = NULL;
43628- ops->refcount = ops->flags = 0;
43629+ atomic_set(&ops->refcount, 0);
43630+ ops->flags = 0;
43631 }
43632 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
43633diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
43634index abfd990..5ab5da9 100644
43635--- a/drivers/tty/pty.c
43636+++ b/drivers/tty/pty.c
43637@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
43638 panic("Couldn't register Unix98 pts driver");
43639
43640 /* Now create the /dev/ptmx special device */
43641+ pax_open_kernel();
43642 tty_default_fops(&ptmx_fops);
43643- ptmx_fops.open = ptmx_open;
43644+ *(void **)&ptmx_fops.open = ptmx_open;
43645+ pax_close_kernel();
43646
43647 cdev_init(&ptmx_cdev, &ptmx_fops);
43648 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43649diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43650index 354564e..fe50d9a 100644
43651--- a/drivers/tty/rocket.c
43652+++ b/drivers/tty/rocket.c
43653@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43654 tty->driver_data = info;
43655 tty_port_tty_set(port, tty);
43656
43657- if (port->count++ == 0) {
43658+ if (atomic_inc_return(&port->count) == 1) {
43659 atomic_inc(&rp_num_ports_open);
43660
43661 #ifdef ROCKET_DEBUG_OPEN
43662@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43663 #endif
43664 }
43665 #ifdef ROCKET_DEBUG_OPEN
43666- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43667+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43668 #endif
43669
43670 /*
43671@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
43672 spin_unlock_irqrestore(&info->port.lock, flags);
43673 return;
43674 }
43675- if (info->port.count)
43676+ if (atomic_read(&info->port.count))
43677 atomic_dec(&rp_num_ports_open);
43678 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43679 spin_unlock_irqrestore(&info->port.lock, flags);
43680diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43681index 1002054..dd644a8 100644
43682--- a/drivers/tty/serial/kgdboc.c
43683+++ b/drivers/tty/serial/kgdboc.c
43684@@ -24,8 +24,9 @@
43685 #define MAX_CONFIG_LEN 40
43686
43687 static struct kgdb_io kgdboc_io_ops;
43688+static struct kgdb_io kgdboc_io_ops_console;
43689
43690-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43691+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43692 static int configured = -1;
43693
43694 static char config[MAX_CONFIG_LEN];
43695@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43696 kgdboc_unregister_kbd();
43697 if (configured == 1)
43698 kgdb_unregister_io_module(&kgdboc_io_ops);
43699+ else if (configured == 2)
43700+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43701 }
43702
43703 static int configure_kgdboc(void)
43704@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43705 int err;
43706 char *cptr = config;
43707 struct console *cons;
43708+ int is_console = 0;
43709
43710 err = kgdboc_option_setup(config);
43711 if (err || !strlen(config) || isspace(config[0]))
43712 goto noconfig;
43713
43714 err = -ENODEV;
43715- kgdboc_io_ops.is_console = 0;
43716 kgdb_tty_driver = NULL;
43717
43718 kgdboc_use_kms = 0;
43719@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43720 int idx;
43721 if (cons->device && cons->device(cons, &idx) == p &&
43722 idx == tty_line) {
43723- kgdboc_io_ops.is_console = 1;
43724+ is_console = 1;
43725 break;
43726 }
43727 cons = cons->next;
43728@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43729 kgdb_tty_line = tty_line;
43730
43731 do_register:
43732- err = kgdb_register_io_module(&kgdboc_io_ops);
43733+ if (is_console) {
43734+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43735+ configured = 2;
43736+ } else {
43737+ err = kgdb_register_io_module(&kgdboc_io_ops);
43738+ configured = 1;
43739+ }
43740 if (err)
43741 goto noconfig;
43742
43743@@ -205,8 +214,6 @@ do_register:
43744 if (err)
43745 goto nmi_con_failed;
43746
43747- configured = 1;
43748-
43749 return 0;
43750
43751 nmi_con_failed:
43752@@ -223,7 +230,7 @@ noconfig:
43753 static int __init init_kgdboc(void)
43754 {
43755 /* Already configured? */
43756- if (configured == 1)
43757+ if (configured >= 1)
43758 return 0;
43759
43760 return configure_kgdboc();
43761@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43762 if (config[len - 1] == '\n')
43763 config[len - 1] = '\0';
43764
43765- if (configured == 1)
43766+ if (configured >= 1)
43767 cleanup_kgdboc();
43768
43769 /* Go and configure with the new params. */
43770@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43771 .post_exception = kgdboc_post_exp_handler,
43772 };
43773
43774+static struct kgdb_io kgdboc_io_ops_console = {
43775+ .name = "kgdboc",
43776+ .read_char = kgdboc_get_char,
43777+ .write_char = kgdboc_put_char,
43778+ .pre_exception = kgdboc_pre_exp_handler,
43779+ .post_exception = kgdboc_post_exp_handler,
43780+ .is_console = 1
43781+};
43782+
43783 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43784 /* This is only available if kgdboc is a built in for early debugging */
43785 static int __init kgdboc_early_init(char *opt)
43786diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43787index 0c8a9fa..234a95f 100644
43788--- a/drivers/tty/serial/samsung.c
43789+++ b/drivers/tty/serial/samsung.c
43790@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43791 }
43792 }
43793
43794+static int s3c64xx_serial_startup(struct uart_port *port);
43795 static int s3c24xx_serial_startup(struct uart_port *port)
43796 {
43797 struct s3c24xx_uart_port *ourport = to_ourport(port);
43798 int ret;
43799
43800+ /* Startup sequence is different for s3c64xx and higher SoC's */
43801+ if (s3c24xx_serial_has_interrupt_mask(port))
43802+ return s3c64xx_serial_startup(port);
43803+
43804 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43805 port->mapbase, port->membase);
43806
43807@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43808 /* setup info for port */
43809 port->dev = &platdev->dev;
43810
43811- /* Startup sequence is different for s3c64xx and higher SoC's */
43812- if (s3c24xx_serial_has_interrupt_mask(port))
43813- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43814-
43815 port->uartclk = 1;
43816
43817 if (cfg->uart_flags & UPF_CONS_FLOW) {
43818diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43819index f87dbfd..42ad4b1 100644
43820--- a/drivers/tty/serial/serial_core.c
43821+++ b/drivers/tty/serial/serial_core.c
43822@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
43823 uart_flush_buffer(tty);
43824 uart_shutdown(tty, state);
43825 spin_lock_irqsave(&port->lock, flags);
43826- port->count = 0;
43827+ atomic_set(&port->count, 0);
43828 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43829 spin_unlock_irqrestore(&port->lock, flags);
43830 tty_port_tty_set(port, NULL);
43831@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43832 goto end;
43833 }
43834
43835- port->count++;
43836+ atomic_inc(&port->count);
43837 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43838 retval = -ENXIO;
43839 goto err_dec_count;
43840@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43841 /*
43842 * Make sure the device is in D0 state.
43843 */
43844- if (port->count == 1)
43845+ if (atomic_read(&port->count) == 1)
43846 uart_change_pm(state, UART_PM_STATE_ON);
43847
43848 /*
43849@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43850 end:
43851 return retval;
43852 err_dec_count:
43853- port->count--;
43854+ atomic_inc(&port->count);
43855 mutex_unlock(&port->mutex);
43856 goto end;
43857 }
43858diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43859index 8eaf1ab..85c030d 100644
43860--- a/drivers/tty/synclink.c
43861+++ b/drivers/tty/synclink.c
43862@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43863
43864 if (debug_level >= DEBUG_LEVEL_INFO)
43865 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43866- __FILE__,__LINE__, info->device_name, info->port.count);
43867+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43868
43869 if (tty_port_close_start(&info->port, tty, filp) == 0)
43870 goto cleanup;
43871@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43872 cleanup:
43873 if (debug_level >= DEBUG_LEVEL_INFO)
43874 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43875- tty->driver->name, info->port.count);
43876+ tty->driver->name, atomic_read(&info->port.count));
43877
43878 } /* end of mgsl_close() */
43879
43880@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43881
43882 mgsl_flush_buffer(tty);
43883 shutdown(info);
43884-
43885- info->port.count = 0;
43886+
43887+ atomic_set(&info->port.count, 0);
43888 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43889 info->port.tty = NULL;
43890
43891@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43892
43893 if (debug_level >= DEBUG_LEVEL_INFO)
43894 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43895- __FILE__,__LINE__, tty->driver->name, port->count );
43896+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43897
43898 spin_lock_irqsave(&info->irq_spinlock, flags);
43899 if (!tty_hung_up_p(filp)) {
43900 extra_count = true;
43901- port->count--;
43902+ atomic_dec(&port->count);
43903 }
43904 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43905 port->blocked_open++;
43906@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43907
43908 if (debug_level >= DEBUG_LEVEL_INFO)
43909 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43910- __FILE__,__LINE__, tty->driver->name, port->count );
43911+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43912
43913 tty_unlock(tty);
43914 schedule();
43915@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43916
43917 /* FIXME: Racy on hangup during close wait */
43918 if (extra_count)
43919- port->count++;
43920+ atomic_inc(&port->count);
43921 port->blocked_open--;
43922
43923 if (debug_level >= DEBUG_LEVEL_INFO)
43924 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43925- __FILE__,__LINE__, tty->driver->name, port->count );
43926+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43927
43928 if (!retval)
43929 port->flags |= ASYNC_NORMAL_ACTIVE;
43930@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43931
43932 if (debug_level >= DEBUG_LEVEL_INFO)
43933 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43934- __FILE__,__LINE__,tty->driver->name, info->port.count);
43935+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43936
43937 /* If port is closing, signal caller to try again */
43938 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43939@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43940 spin_unlock_irqrestore(&info->netlock, flags);
43941 goto cleanup;
43942 }
43943- info->port.count++;
43944+ atomic_inc(&info->port.count);
43945 spin_unlock_irqrestore(&info->netlock, flags);
43946
43947- if (info->port.count == 1) {
43948+ if (atomic_read(&info->port.count) == 1) {
43949 /* 1st open on this device, init hardware */
43950 retval = startup(info);
43951 if (retval < 0)
43952@@ -3446,8 +3446,8 @@ cleanup:
43953 if (retval) {
43954 if (tty->count == 1)
43955 info->port.tty = NULL; /* tty layer will release tty struct */
43956- if(info->port.count)
43957- info->port.count--;
43958+ if (atomic_read(&info->port.count))
43959+ atomic_dec(&info->port.count);
43960 }
43961
43962 return retval;
43963@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
43964 unsigned short new_crctype;
43965
43966 /* return error if TTY interface open */
43967- if (info->port.count)
43968+ if (atomic_read(&info->port.count))
43969 return -EBUSY;
43970
43971 switch (encoding)
43972@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
43973
43974 /* arbitrate between network and tty opens */
43975 spin_lock_irqsave(&info->netlock, flags);
43976- if (info->port.count != 0 || info->netcount != 0) {
43977+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
43978 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
43979 spin_unlock_irqrestore(&info->netlock, flags);
43980 return -EBUSY;
43981@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43982 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
43983
43984 /* return error if TTY interface open */
43985- if (info->port.count)
43986+ if (atomic_read(&info->port.count))
43987 return -EBUSY;
43988
43989 if (cmd != SIOCWANDEV)
43990diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
43991index 1abf946..1ee34fc 100644
43992--- a/drivers/tty/synclink_gt.c
43993+++ b/drivers/tty/synclink_gt.c
43994@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
43995 tty->driver_data = info;
43996 info->port.tty = tty;
43997
43998- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
43999+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
44000
44001 /* If port is closing, signal caller to try again */
44002 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44003@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44004 mutex_unlock(&info->port.mutex);
44005 goto cleanup;
44006 }
44007- info->port.count++;
44008+ atomic_inc(&info->port.count);
44009 spin_unlock_irqrestore(&info->netlock, flags);
44010
44011- if (info->port.count == 1) {
44012+ if (atomic_read(&info->port.count) == 1) {
44013 /* 1st open on this device, init hardware */
44014 retval = startup(info);
44015 if (retval < 0) {
44016@@ -715,8 +715,8 @@ cleanup:
44017 if (retval) {
44018 if (tty->count == 1)
44019 info->port.tty = NULL; /* tty layer will release tty struct */
44020- if(info->port.count)
44021- info->port.count--;
44022+ if(atomic_read(&info->port.count))
44023+ atomic_dec(&info->port.count);
44024 }
44025
44026 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
44027@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44028
44029 if (sanity_check(info, tty->name, "close"))
44030 return;
44031- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
44032+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
44033
44034 if (tty_port_close_start(&info->port, tty, filp) == 0)
44035 goto cleanup;
44036@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44037 tty_port_close_end(&info->port, tty);
44038 info->port.tty = NULL;
44039 cleanup:
44040- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
44041+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
44042 }
44043
44044 static void hangup(struct tty_struct *tty)
44045@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
44046 shutdown(info);
44047
44048 spin_lock_irqsave(&info->port.lock, flags);
44049- info->port.count = 0;
44050+ atomic_set(&info->port.count, 0);
44051 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44052 info->port.tty = NULL;
44053 spin_unlock_irqrestore(&info->port.lock, flags);
44054@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44055 unsigned short new_crctype;
44056
44057 /* return error if TTY interface open */
44058- if (info->port.count)
44059+ if (atomic_read(&info->port.count))
44060 return -EBUSY;
44061
44062 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
44063@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
44064
44065 /* arbitrate between network and tty opens */
44066 spin_lock_irqsave(&info->netlock, flags);
44067- if (info->port.count != 0 || info->netcount != 0) {
44068+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44069 DBGINFO(("%s hdlc_open busy\n", dev->name));
44070 spin_unlock_irqrestore(&info->netlock, flags);
44071 return -EBUSY;
44072@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44073 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
44074
44075 /* return error if TTY interface open */
44076- if (info->port.count)
44077+ if (atomic_read(&info->port.count))
44078 return -EBUSY;
44079
44080 if (cmd != SIOCWANDEV)
44081@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
44082 if (port == NULL)
44083 continue;
44084 spin_lock(&port->lock);
44085- if ((port->port.count || port->netcount) &&
44086+ if ((atomic_read(&port->port.count) || port->netcount) &&
44087 port->pending_bh && !port->bh_running &&
44088 !port->bh_requested) {
44089 DBGISR(("%s bh queued\n", port->device_name));
44090@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44091 spin_lock_irqsave(&info->lock, flags);
44092 if (!tty_hung_up_p(filp)) {
44093 extra_count = true;
44094- port->count--;
44095+ atomic_dec(&port->count);
44096 }
44097 spin_unlock_irqrestore(&info->lock, flags);
44098 port->blocked_open++;
44099@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44100 remove_wait_queue(&port->open_wait, &wait);
44101
44102 if (extra_count)
44103- port->count++;
44104+ atomic_inc(&port->count);
44105 port->blocked_open--;
44106
44107 if (!retval)
44108diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
44109index ff17138..e38b41e 100644
44110--- a/drivers/tty/synclinkmp.c
44111+++ b/drivers/tty/synclinkmp.c
44112@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
44113
44114 if (debug_level >= DEBUG_LEVEL_INFO)
44115 printk("%s(%d):%s open(), old ref count = %d\n",
44116- __FILE__,__LINE__,tty->driver->name, info->port.count);
44117+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
44118
44119 /* If port is closing, signal caller to try again */
44120 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44121@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44122 spin_unlock_irqrestore(&info->netlock, flags);
44123 goto cleanup;
44124 }
44125- info->port.count++;
44126+ atomic_inc(&info->port.count);
44127 spin_unlock_irqrestore(&info->netlock, flags);
44128
44129- if (info->port.count == 1) {
44130+ if (atomic_read(&info->port.count) == 1) {
44131 /* 1st open on this device, init hardware */
44132 retval = startup(info);
44133 if (retval < 0)
44134@@ -796,8 +796,8 @@ cleanup:
44135 if (retval) {
44136 if (tty->count == 1)
44137 info->port.tty = NULL; /* tty layer will release tty struct */
44138- if(info->port.count)
44139- info->port.count--;
44140+ if(atomic_read(&info->port.count))
44141+ atomic_dec(&info->port.count);
44142 }
44143
44144 return retval;
44145@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44146
44147 if (debug_level >= DEBUG_LEVEL_INFO)
44148 printk("%s(%d):%s close() entry, count=%d\n",
44149- __FILE__,__LINE__, info->device_name, info->port.count);
44150+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
44151
44152 if (tty_port_close_start(&info->port, tty, filp) == 0)
44153 goto cleanup;
44154@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44155 cleanup:
44156 if (debug_level >= DEBUG_LEVEL_INFO)
44157 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
44158- tty->driver->name, info->port.count);
44159+ tty->driver->name, atomic_read(&info->port.count));
44160 }
44161
44162 /* Called by tty_hangup() when a hangup is signaled.
44163@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
44164 shutdown(info);
44165
44166 spin_lock_irqsave(&info->port.lock, flags);
44167- info->port.count = 0;
44168+ atomic_set(&info->port.count, 0);
44169 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44170 info->port.tty = NULL;
44171 spin_unlock_irqrestore(&info->port.lock, flags);
44172@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44173 unsigned short new_crctype;
44174
44175 /* return error if TTY interface open */
44176- if (info->port.count)
44177+ if (atomic_read(&info->port.count))
44178 return -EBUSY;
44179
44180 switch (encoding)
44181@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
44182
44183 /* arbitrate between network and tty opens */
44184 spin_lock_irqsave(&info->netlock, flags);
44185- if (info->port.count != 0 || info->netcount != 0) {
44186+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44187 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44188 spin_unlock_irqrestore(&info->netlock, flags);
44189 return -EBUSY;
44190@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44191 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44192
44193 /* return error if TTY interface open */
44194- if (info->port.count)
44195+ if (atomic_read(&info->port.count))
44196 return -EBUSY;
44197
44198 if (cmd != SIOCWANDEV)
44199@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
44200 * do not request bottom half processing if the
44201 * device is not open in a normal mode.
44202 */
44203- if ( port && (port->port.count || port->netcount) &&
44204+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
44205 port->pending_bh && !port->bh_running &&
44206 !port->bh_requested ) {
44207 if ( debug_level >= DEBUG_LEVEL_ISR )
44208@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44209
44210 if (debug_level >= DEBUG_LEVEL_INFO)
44211 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
44212- __FILE__,__LINE__, tty->driver->name, port->count );
44213+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44214
44215 spin_lock_irqsave(&info->lock, flags);
44216 if (!tty_hung_up_p(filp)) {
44217 extra_count = true;
44218- port->count--;
44219+ atomic_dec(&port->count);
44220 }
44221 spin_unlock_irqrestore(&info->lock, flags);
44222 port->blocked_open++;
44223@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44224
44225 if (debug_level >= DEBUG_LEVEL_INFO)
44226 printk("%s(%d):%s block_til_ready() count=%d\n",
44227- __FILE__,__LINE__, tty->driver->name, port->count );
44228+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44229
44230 tty_unlock(tty);
44231 schedule();
44232@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44233 remove_wait_queue(&port->open_wait, &wait);
44234
44235 if (extra_count)
44236- port->count++;
44237+ atomic_inc(&port->count);
44238 port->blocked_open--;
44239
44240 if (debug_level >= DEBUG_LEVEL_INFO)
44241 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44242- __FILE__,__LINE__, tty->driver->name, port->count );
44243+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44244
44245 if (!retval)
44246 port->flags |= ASYNC_NORMAL_ACTIVE;
44247diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44248index b51c154..17d55d1 100644
44249--- a/drivers/tty/sysrq.c
44250+++ b/drivers/tty/sysrq.c
44251@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44252 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44253 size_t count, loff_t *ppos)
44254 {
44255- if (count) {
44256+ if (count && capable(CAP_SYS_ADMIN)) {
44257 char c;
44258
44259 if (get_user(c, buf))
44260diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44261index 4476682..d77e748 100644
44262--- a/drivers/tty/tty_io.c
44263+++ b/drivers/tty/tty_io.c
44264@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44265
44266 void tty_default_fops(struct file_operations *fops)
44267 {
44268- *fops = tty_fops;
44269+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44270 }
44271
44272 /*
44273diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44274index 1afe192..73d2c20 100644
44275--- a/drivers/tty/tty_ldisc.c
44276+++ b/drivers/tty/tty_ldisc.c
44277@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44278 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44279 tty_ldiscs[disc] = new_ldisc;
44280 new_ldisc->num = disc;
44281- new_ldisc->refcount = 0;
44282+ atomic_set(&new_ldisc->refcount, 0);
44283 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44284
44285 return ret;
44286@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
44287 return -EINVAL;
44288
44289 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44290- if (tty_ldiscs[disc]->refcount)
44291+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44292 ret = -EBUSY;
44293 else
44294 tty_ldiscs[disc] = NULL;
44295@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44296 if (ldops) {
44297 ret = ERR_PTR(-EAGAIN);
44298 if (try_module_get(ldops->owner)) {
44299- ldops->refcount++;
44300+ atomic_inc(&ldops->refcount);
44301 ret = ldops;
44302 }
44303 }
44304@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44305 unsigned long flags;
44306
44307 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44308- ldops->refcount--;
44309+ atomic_dec(&ldops->refcount);
44310 module_put(ldops->owner);
44311 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44312 }
44313@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
44314 /* unreleased reader reference(s) will cause this WARN */
44315 WARN_ON(!atomic_dec_and_test(&ld->users));
44316
44317- ld->ops->refcount--;
44318+ atomic_dec(&ld->ops->refcount);
44319 module_put(ld->ops->owner);
44320 kfree(ld);
44321 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44322diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44323index 121aeb9..0d2c4b9 100644
44324--- a/drivers/tty/tty_port.c
44325+++ b/drivers/tty/tty_port.c
44326@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
44327 unsigned long flags;
44328
44329 spin_lock_irqsave(&port->lock, flags);
44330- port->count = 0;
44331+ atomic_set(&port->count, 0);
44332 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44333 tty = port->tty;
44334 if (tty)
44335@@ -391,7 +391,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44336 /* The port lock protects the port counts */
44337 spin_lock_irqsave(&port->lock, flags);
44338 if (!tty_hung_up_p(filp))
44339- port->count--;
44340+ atomic_dec(&port->count);
44341 port->blocked_open++;
44342 spin_unlock_irqrestore(&port->lock, flags);
44343
44344@@ -433,7 +433,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44345 we must not mess that up further */
44346 spin_lock_irqsave(&port->lock, flags);
44347 if (!tty_hung_up_p(filp))
44348- port->count++;
44349+ atomic_inc(&port->count);
44350 port->blocked_open--;
44351 if (retval == 0)
44352 port->flags |= ASYNC_NORMAL_ACTIVE;
44353@@ -467,19 +467,19 @@ int tty_port_close_start(struct tty_port *port,
44354 return 0;
44355 }
44356
44357- if (tty->count == 1 && port->count != 1) {
44358+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44359 printk(KERN_WARNING
44360 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44361- port->count);
44362- port->count = 1;
44363+ atomic_read(&port->count));
44364+ atomic_set(&port->count, 1);
44365 }
44366- if (--port->count < 0) {
44367+ if (atomic_dec_return(&port->count) < 0) {
44368 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
44369- port->count);
44370- port->count = 0;
44371+ atomic_read(&port->count));
44372+ atomic_set(&port->count, 0);
44373 }
44374
44375- if (port->count) {
44376+ if (atomic_read(&port->count)) {
44377 spin_unlock_irqrestore(&port->lock, flags);
44378 if (port->ops->drop)
44379 port->ops->drop(port);
44380@@ -565,7 +565,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
44381 {
44382 spin_lock_irq(&port->lock);
44383 if (!tty_hung_up_p(filp))
44384- ++port->count;
44385+ atomic_inc(&port->count);
44386 spin_unlock_irq(&port->lock);
44387 tty_port_tty_set(port, tty);
44388
44389diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
44390index a9af1b9a..1e08e7f 100644
44391--- a/drivers/tty/vt/keyboard.c
44392+++ b/drivers/tty/vt/keyboard.c
44393@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
44394 kbd->kbdmode == VC_OFF) &&
44395 value != KVAL(K_SAK))
44396 return; /* SAK is allowed even in raw mode */
44397+
44398+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44399+ {
44400+ void *func = fn_handler[value];
44401+ if (func == fn_show_state || func == fn_show_ptregs ||
44402+ func == fn_show_mem)
44403+ return;
44404+ }
44405+#endif
44406+
44407 fn_handler[value](vc);
44408 }
44409
44410@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44411 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
44412 return -EFAULT;
44413
44414- if (!capable(CAP_SYS_TTY_CONFIG))
44415- perm = 0;
44416-
44417 switch (cmd) {
44418 case KDGKBENT:
44419 /* Ensure another thread doesn't free it under us */
44420@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44421 spin_unlock_irqrestore(&kbd_event_lock, flags);
44422 return put_user(val, &user_kbe->kb_value);
44423 case KDSKBENT:
44424+ if (!capable(CAP_SYS_TTY_CONFIG))
44425+ perm = 0;
44426+
44427 if (!perm)
44428 return -EPERM;
44429 if (!i && v == K_NOSUCHMAP) {
44430@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44431 int i, j, k;
44432 int ret;
44433
44434- if (!capable(CAP_SYS_TTY_CONFIG))
44435- perm = 0;
44436-
44437 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
44438 if (!kbs) {
44439 ret = -ENOMEM;
44440@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44441 kfree(kbs);
44442 return ((p && *p) ? -EOVERFLOW : 0);
44443 case KDSKBSENT:
44444+ if (!capable(CAP_SYS_TTY_CONFIG))
44445+ perm = 0;
44446+
44447 if (!perm) {
44448 ret = -EPERM;
44449 goto reterr;
44450diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
44451index b645c47..a55c182 100644
44452--- a/drivers/uio/uio.c
44453+++ b/drivers/uio/uio.c
44454@@ -25,6 +25,7 @@
44455 #include <linux/kobject.h>
44456 #include <linux/cdev.h>
44457 #include <linux/uio_driver.h>
44458+#include <asm/local.h>
44459
44460 #define UIO_MAX_DEVICES (1U << MINORBITS)
44461
44462@@ -32,10 +33,10 @@ struct uio_device {
44463 struct module *owner;
44464 struct device *dev;
44465 int minor;
44466- atomic_t event;
44467+ atomic_unchecked_t event;
44468 struct fasync_struct *async_queue;
44469 wait_queue_head_t wait;
44470- int vma_count;
44471+ local_t vma_count;
44472 struct uio_info *info;
44473 struct kobject *map_dir;
44474 struct kobject *portio_dir;
44475@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
44476 struct device_attribute *attr, char *buf)
44477 {
44478 struct uio_device *idev = dev_get_drvdata(dev);
44479- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
44480+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
44481 }
44482
44483 static struct device_attribute uio_class_attributes[] = {
44484@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
44485 {
44486 struct uio_device *idev = info->uio_dev;
44487
44488- atomic_inc(&idev->event);
44489+ atomic_inc_unchecked(&idev->event);
44490 wake_up_interruptible(&idev->wait);
44491 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
44492 }
44493@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
44494 }
44495
44496 listener->dev = idev;
44497- listener->event_count = atomic_read(&idev->event);
44498+ listener->event_count = atomic_read_unchecked(&idev->event);
44499 filep->private_data = listener;
44500
44501 if (idev->info->open) {
44502@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
44503 return -EIO;
44504
44505 poll_wait(filep, &idev->wait, wait);
44506- if (listener->event_count != atomic_read(&idev->event))
44507+ if (listener->event_count != atomic_read_unchecked(&idev->event))
44508 return POLLIN | POLLRDNORM;
44509 return 0;
44510 }
44511@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
44512 do {
44513 set_current_state(TASK_INTERRUPTIBLE);
44514
44515- event_count = atomic_read(&idev->event);
44516+ event_count = atomic_read_unchecked(&idev->event);
44517 if (event_count != listener->event_count) {
44518 if (copy_to_user(buf, &event_count, count))
44519 retval = -EFAULT;
44520@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
44521 static void uio_vma_open(struct vm_area_struct *vma)
44522 {
44523 struct uio_device *idev = vma->vm_private_data;
44524- idev->vma_count++;
44525+ local_inc(&idev->vma_count);
44526 }
44527
44528 static void uio_vma_close(struct vm_area_struct *vma)
44529 {
44530 struct uio_device *idev = vma->vm_private_data;
44531- idev->vma_count--;
44532+ local_dec(&idev->vma_count);
44533 }
44534
44535 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
44536@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
44537 idev->owner = owner;
44538 idev->info = info;
44539 init_waitqueue_head(&idev->wait);
44540- atomic_set(&idev->event, 0);
44541+ atomic_set_unchecked(&idev->event, 0);
44542
44543 ret = uio_get_minor(idev);
44544 if (ret)
44545diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
44546index 8a7eb77..c00402f 100644
44547--- a/drivers/usb/atm/cxacru.c
44548+++ b/drivers/usb/atm/cxacru.c
44549@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
44550 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
44551 if (ret < 2)
44552 return -EINVAL;
44553- if (index < 0 || index > 0x7f)
44554+ if (index > 0x7f)
44555 return -EINVAL;
44556 pos += tmp;
44557
44558diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
44559index d3527dd..26effa2 100644
44560--- a/drivers/usb/atm/usbatm.c
44561+++ b/drivers/usb/atm/usbatm.c
44562@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44563 if (printk_ratelimit())
44564 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
44565 __func__, vpi, vci);
44566- atomic_inc(&vcc->stats->rx_err);
44567+ atomic_inc_unchecked(&vcc->stats->rx_err);
44568 return;
44569 }
44570
44571@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44572 if (length > ATM_MAX_AAL5_PDU) {
44573 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
44574 __func__, length, vcc);
44575- atomic_inc(&vcc->stats->rx_err);
44576+ atomic_inc_unchecked(&vcc->stats->rx_err);
44577 goto out;
44578 }
44579
44580@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44581 if (sarb->len < pdu_length) {
44582 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
44583 __func__, pdu_length, sarb->len, vcc);
44584- atomic_inc(&vcc->stats->rx_err);
44585+ atomic_inc_unchecked(&vcc->stats->rx_err);
44586 goto out;
44587 }
44588
44589 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
44590 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
44591 __func__, vcc);
44592- atomic_inc(&vcc->stats->rx_err);
44593+ atomic_inc_unchecked(&vcc->stats->rx_err);
44594 goto out;
44595 }
44596
44597@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44598 if (printk_ratelimit())
44599 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
44600 __func__, length);
44601- atomic_inc(&vcc->stats->rx_drop);
44602+ atomic_inc_unchecked(&vcc->stats->rx_drop);
44603 goto out;
44604 }
44605
44606@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44607
44608 vcc->push(vcc, skb);
44609
44610- atomic_inc(&vcc->stats->rx);
44611+ atomic_inc_unchecked(&vcc->stats->rx);
44612 out:
44613 skb_trim(sarb, 0);
44614 }
44615@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
44616 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
44617
44618 usbatm_pop(vcc, skb);
44619- atomic_inc(&vcc->stats->tx);
44620+ atomic_inc_unchecked(&vcc->stats->tx);
44621
44622 skb = skb_dequeue(&instance->sndqueue);
44623 }
44624@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
44625 if (!left--)
44626 return sprintf(page,
44627 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
44628- atomic_read(&atm_dev->stats.aal5.tx),
44629- atomic_read(&atm_dev->stats.aal5.tx_err),
44630- atomic_read(&atm_dev->stats.aal5.rx),
44631- atomic_read(&atm_dev->stats.aal5.rx_err),
44632- atomic_read(&atm_dev->stats.aal5.rx_drop));
44633+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
44634+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
44635+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
44636+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
44637+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
44638
44639 if (!left--) {
44640 if (instance->disconnected)
44641diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
44642index 2a3bbdf..91d72cf 100644
44643--- a/drivers/usb/core/devices.c
44644+++ b/drivers/usb/core/devices.c
44645@@ -126,7 +126,7 @@ static const char format_endpt[] =
44646 * time it gets called.
44647 */
44648 static struct device_connect_event {
44649- atomic_t count;
44650+ atomic_unchecked_t count;
44651 wait_queue_head_t wait;
44652 } device_event = {
44653 .count = ATOMIC_INIT(1),
44654@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44655
44656 void usbfs_conn_disc_event(void)
44657 {
44658- atomic_add(2, &device_event.count);
44659+ atomic_add_unchecked(2, &device_event.count);
44660 wake_up(&device_event.wait);
44661 }
44662
44663@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
44664
44665 poll_wait(file, &device_event.wait, wait);
44666
44667- event_count = atomic_read(&device_event.count);
44668+ event_count = atomic_read_unchecked(&device_event.count);
44669 if (file->f_version != event_count) {
44670 file->f_version = event_count;
44671 return POLLIN | POLLRDNORM;
44672diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44673index d53547d..6a22d02 100644
44674--- a/drivers/usb/core/hcd.c
44675+++ b/drivers/usb/core/hcd.c
44676@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44677 */
44678 usb_get_urb(urb);
44679 atomic_inc(&urb->use_count);
44680- atomic_inc(&urb->dev->urbnum);
44681+ atomic_inc_unchecked(&urb->dev->urbnum);
44682 usbmon_urb_submit(&hcd->self, urb);
44683
44684 /* NOTE requirements on root-hub callers (usbfs and the hub
44685@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44686 urb->hcpriv = NULL;
44687 INIT_LIST_HEAD(&urb->urb_list);
44688 atomic_dec(&urb->use_count);
44689- atomic_dec(&urb->dev->urbnum);
44690+ atomic_dec_unchecked(&urb->dev->urbnum);
44691 if (atomic_read(&urb->reject))
44692 wake_up(&usb_kill_urb_queue);
44693 usb_put_urb(urb);
44694diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44695index 444d30e..f15c850 100644
44696--- a/drivers/usb/core/message.c
44697+++ b/drivers/usb/core/message.c
44698@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44699 * method can wait for it to complete. Since you don't have a handle on the
44700 * URB used, you can't cancel the request.
44701 */
44702-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44703+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44704 __u8 requesttype, __u16 value, __u16 index, void *data,
44705 __u16 size, int timeout)
44706 {
44707diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44708index aa38db4..0a08682 100644
44709--- a/drivers/usb/core/sysfs.c
44710+++ b/drivers/usb/core/sysfs.c
44711@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44712 struct usb_device *udev;
44713
44714 udev = to_usb_device(dev);
44715- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44716+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44717 }
44718 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44719
44720diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44721index b10da72..43aa0b2 100644
44722--- a/drivers/usb/core/usb.c
44723+++ b/drivers/usb/core/usb.c
44724@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44725 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44726 dev->state = USB_STATE_ATTACHED;
44727 dev->lpm_disable_count = 1;
44728- atomic_set(&dev->urbnum, 0);
44729+ atomic_set_unchecked(&dev->urbnum, 0);
44730
44731 INIT_LIST_HEAD(&dev->ep0.urb_list);
44732 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44733diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44734index 5e29dde..eca992f 100644
44735--- a/drivers/usb/early/ehci-dbgp.c
44736+++ b/drivers/usb/early/ehci-dbgp.c
44737@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44738
44739 #ifdef CONFIG_KGDB
44740 static struct kgdb_io kgdbdbgp_io_ops;
44741-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44742+static struct kgdb_io kgdbdbgp_io_ops_console;
44743+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44744 #else
44745 #define dbgp_kgdb_mode (0)
44746 #endif
44747@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44748 .write_char = kgdbdbgp_write_char,
44749 };
44750
44751+static struct kgdb_io kgdbdbgp_io_ops_console = {
44752+ .name = "kgdbdbgp",
44753+ .read_char = kgdbdbgp_read_char,
44754+ .write_char = kgdbdbgp_write_char,
44755+ .is_console = 1
44756+};
44757+
44758 static int kgdbdbgp_wait_time;
44759
44760 static int __init kgdbdbgp_parse_config(char *str)
44761@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44762 ptr++;
44763 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44764 }
44765- kgdb_register_io_module(&kgdbdbgp_io_ops);
44766- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44767+ if (early_dbgp_console.index != -1)
44768+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44769+ else
44770+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44771
44772 return 0;
44773 }
44774diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44775index b369292..9f3ba40 100644
44776--- a/drivers/usb/gadget/u_serial.c
44777+++ b/drivers/usb/gadget/u_serial.c
44778@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44779 spin_lock_irq(&port->port_lock);
44780
44781 /* already open? Great. */
44782- if (port->port.count) {
44783+ if (atomic_read(&port->port.count)) {
44784 status = 0;
44785- port->port.count++;
44786+ atomic_inc(&port->port.count);
44787
44788 /* currently opening/closing? wait ... */
44789 } else if (port->openclose) {
44790@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44791 tty->driver_data = port;
44792 port->port.tty = tty;
44793
44794- port->port.count = 1;
44795+ atomic_set(&port->port.count, 1);
44796 port->openclose = false;
44797
44798 /* if connected, start the I/O stream */
44799@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44800
44801 spin_lock_irq(&port->port_lock);
44802
44803- if (port->port.count != 1) {
44804- if (port->port.count == 0)
44805+ if (atomic_read(&port->port.count) != 1) {
44806+ if (atomic_read(&port->port.count) == 0)
44807 WARN_ON(1);
44808 else
44809- --port->port.count;
44810+ atomic_dec(&port->port.count);
44811 goto exit;
44812 }
44813
44814@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44815 * and sleep if necessary
44816 */
44817 port->openclose = true;
44818- port->port.count = 0;
44819+ atomic_set(&port->port.count, 0);
44820
44821 gser = port->port_usb;
44822 if (gser && gser->disconnect)
44823@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
44824 int cond;
44825
44826 spin_lock_irq(&port->port_lock);
44827- cond = (port->port.count == 0) && !port->openclose;
44828+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44829 spin_unlock_irq(&port->port_lock);
44830 return cond;
44831 }
44832@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44833 /* if it's already open, start I/O ... and notify the serial
44834 * protocol about open/close status (connect/disconnect).
44835 */
44836- if (port->port.count) {
44837+ if (atomic_read(&port->port.count)) {
44838 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44839 gs_start_io(port);
44840 if (gser->connect)
44841@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
44842
44843 port->port_usb = NULL;
44844 gser->ioport = NULL;
44845- if (port->port.count > 0 || port->openclose) {
44846+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44847 wake_up_interruptible(&port->drain_wait);
44848 if (port->port.tty)
44849 tty_hangup(port->port.tty);
44850@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
44851
44852 /* finally, free any unused/unusable I/O buffers */
44853 spin_lock_irqsave(&port->port_lock, flags);
44854- if (port->port.count == 0 && !port->openclose)
44855+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44856 gs_buf_free(&port->port_write_buf);
44857 gs_free_requests(gser->out, &port->read_pool, NULL);
44858 gs_free_requests(gser->out, &port->read_queue, NULL);
44859diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44860index 5f3bcd3..bfca43f 100644
44861--- a/drivers/usb/serial/console.c
44862+++ b/drivers/usb/serial/console.c
44863@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44864
44865 info->port = port;
44866
44867- ++port->port.count;
44868+ atomic_inc(&port->port.count);
44869 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44870 if (serial->type->set_termios) {
44871 /*
44872@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44873 }
44874 /* Now that any required fake tty operations are completed restore
44875 * the tty port count */
44876- --port->port.count;
44877+ atomic_dec(&port->port.count);
44878 /* The console is special in terms of closing the device so
44879 * indicate this port is now acting as a system console. */
44880 port->port.console = 1;
44881@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44882 free_tty:
44883 kfree(tty);
44884 reset_open_count:
44885- port->port.count = 0;
44886+ atomic_set(&port->port.count, 0);
44887 usb_autopm_put_interface(serial->interface);
44888 error_get_interface:
44889 usb_serial_put(serial);
44890diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44891index 75f70f0..d467e1a 100644
44892--- a/drivers/usb/storage/usb.h
44893+++ b/drivers/usb/storage/usb.h
44894@@ -63,7 +63,7 @@ struct us_unusual_dev {
44895 __u8 useProtocol;
44896 __u8 useTransport;
44897 int (*initFunction)(struct us_data *);
44898-};
44899+} __do_const;
44900
44901
44902 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44903diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44904index d6bea3e..60b250e 100644
44905--- a/drivers/usb/wusbcore/wa-hc.h
44906+++ b/drivers/usb/wusbcore/wa-hc.h
44907@@ -192,7 +192,7 @@ struct wahc {
44908 struct list_head xfer_delayed_list;
44909 spinlock_t xfer_list_lock;
44910 struct work_struct xfer_work;
44911- atomic_t xfer_id_count;
44912+ atomic_unchecked_t xfer_id_count;
44913 };
44914
44915
44916@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44917 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44918 spin_lock_init(&wa->xfer_list_lock);
44919 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44920- atomic_set(&wa->xfer_id_count, 1);
44921+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44922 }
44923
44924 /**
44925diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44926index 6ef94bc..1b41265 100644
44927--- a/drivers/usb/wusbcore/wa-xfer.c
44928+++ b/drivers/usb/wusbcore/wa-xfer.c
44929@@ -296,7 +296,7 @@ out:
44930 */
44931 static void wa_xfer_id_init(struct wa_xfer *xfer)
44932 {
44933- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44934+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44935 }
44936
44937 /*
44938diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
44939index f80d3dd..8ca5ac7 100644
44940--- a/drivers/vhost/net.c
44941+++ b/drivers/vhost/net.c
44942@@ -150,6 +150,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
44943 {
44944 kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
44945 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
44946+}
44947+
44948+static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
44949+{
44950+ vhost_net_ubuf_put_and_wait(ubufs);
44951 kfree(ubufs);
44952 }
44953
44954@@ -948,7 +953,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
44955 mutex_unlock(&vq->mutex);
44956
44957 if (oldubufs) {
44958- vhost_net_ubuf_put_and_wait(oldubufs);
44959+ vhost_net_ubuf_put_wait_and_free(oldubufs);
44960 mutex_lock(&vq->mutex);
44961 vhost_zerocopy_signal_used(n, vq);
44962 mutex_unlock(&vq->mutex);
44963@@ -966,7 +971,7 @@ err_used:
44964 rcu_assign_pointer(vq->private_data, oldsock);
44965 vhost_net_enable_vq(n, vq);
44966 if (ubufs)
44967- vhost_net_ubuf_put_and_wait(ubufs);
44968+ vhost_net_ubuf_put_wait_and_free(ubufs);
44969 err_ubufs:
44970 fput(sock->file);
44971 err_vq:
44972diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
44973index 5174eba..86e764a 100644
44974--- a/drivers/vhost/vringh.c
44975+++ b/drivers/vhost/vringh.c
44976@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
44977
44978 static inline int putu16_kern(u16 *p, u16 val)
44979 {
44980- ACCESS_ONCE(*p) = val;
44981+ ACCESS_ONCE_RW(*p) = val;
44982 return 0;
44983 }
44984
44985diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
44986index 8c55011..eed4ae1a 100644
44987--- a/drivers/video/aty/aty128fb.c
44988+++ b/drivers/video/aty/aty128fb.c
44989@@ -149,7 +149,7 @@ enum {
44990 };
44991
44992 /* Must match above enum */
44993-static char * const r128_family[] = {
44994+static const char * const r128_family[] = {
44995 "AGP",
44996 "PCI",
44997 "PRO AGP",
44998diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
44999index 4f27fdc..d3537e6 100644
45000--- a/drivers/video/aty/atyfb_base.c
45001+++ b/drivers/video/aty/atyfb_base.c
45002@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
45003 par->accel_flags = var->accel_flags; /* hack */
45004
45005 if (var->accel_flags) {
45006- info->fbops->fb_sync = atyfb_sync;
45007+ pax_open_kernel();
45008+ *(void **)&info->fbops->fb_sync = atyfb_sync;
45009+ pax_close_kernel();
45010 info->flags &= ~FBINFO_HWACCEL_DISABLED;
45011 } else {
45012- info->fbops->fb_sync = NULL;
45013+ pax_open_kernel();
45014+ *(void **)&info->fbops->fb_sync = NULL;
45015+ pax_close_kernel();
45016 info->flags |= FBINFO_HWACCEL_DISABLED;
45017 }
45018
45019diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
45020index 95ec042..e6affdd 100644
45021--- a/drivers/video/aty/mach64_cursor.c
45022+++ b/drivers/video/aty/mach64_cursor.c
45023@@ -7,6 +7,7 @@
45024 #include <linux/string.h>
45025
45026 #include <asm/io.h>
45027+#include <asm/pgtable.h>
45028
45029 #ifdef __sparc__
45030 #include <asm/fbio.h>
45031@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
45032 info->sprite.buf_align = 16; /* and 64 lines tall. */
45033 info->sprite.flags = FB_PIXMAP_IO;
45034
45035- info->fbops->fb_cursor = atyfb_cursor;
45036+ pax_open_kernel();
45037+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
45038+ pax_close_kernel();
45039
45040 return 0;
45041 }
45042diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
45043index bca6ccc..252107e 100644
45044--- a/drivers/video/backlight/kb3886_bl.c
45045+++ b/drivers/video/backlight/kb3886_bl.c
45046@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
45047 static unsigned long kb3886bl_flags;
45048 #define KB3886BL_SUSPENDED 0x01
45049
45050-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
45051+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
45052 {
45053 .ident = "Sahara Touch-iT",
45054 .matches = {
45055diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
45056index 900aa4e..6d49418 100644
45057--- a/drivers/video/fb_defio.c
45058+++ b/drivers/video/fb_defio.c
45059@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
45060
45061 BUG_ON(!fbdefio);
45062 mutex_init(&fbdefio->lock);
45063- info->fbops->fb_mmap = fb_deferred_io_mmap;
45064+ pax_open_kernel();
45065+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
45066+ pax_close_kernel();
45067 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
45068 INIT_LIST_HEAD(&fbdefio->pagelist);
45069 if (fbdefio->delay == 0) /* set a default of 1 s */
45070@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
45071 page->mapping = NULL;
45072 }
45073
45074- info->fbops->fb_mmap = NULL;
45075+ *(void **)&info->fbops->fb_mmap = NULL;
45076 mutex_destroy(&fbdefio->lock);
45077 }
45078 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
45079diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
45080index 5c3960d..15cf8fc 100644
45081--- a/drivers/video/fbcmap.c
45082+++ b/drivers/video/fbcmap.c
45083@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
45084 rc = -ENODEV;
45085 goto out;
45086 }
45087- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
45088- !info->fbops->fb_setcmap)) {
45089+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
45090 rc = -EINVAL;
45091 goto out1;
45092 }
45093diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
45094index 098bfc6..796841d 100644
45095--- a/drivers/video/fbmem.c
45096+++ b/drivers/video/fbmem.c
45097@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45098 image->dx += image->width + 8;
45099 }
45100 } else if (rotate == FB_ROTATE_UD) {
45101- for (x = 0; x < num && image->dx >= 0; x++) {
45102+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
45103 info->fbops->fb_imageblit(info, image);
45104 image->dx -= image->width + 8;
45105 }
45106@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45107 image->dy += image->height + 8;
45108 }
45109 } else if (rotate == FB_ROTATE_CCW) {
45110- for (x = 0; x < num && image->dy >= 0; x++) {
45111+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
45112 info->fbops->fb_imageblit(info, image);
45113 image->dy -= image->height + 8;
45114 }
45115@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
45116 return -EFAULT;
45117 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
45118 return -EINVAL;
45119- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
45120+ if (con2fb.framebuffer >= FB_MAX)
45121 return -EINVAL;
45122 if (!registered_fb[con2fb.framebuffer])
45123 request_module("fb%d", con2fb.framebuffer);
45124diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
45125index 7672d2e..b56437f 100644
45126--- a/drivers/video/i810/i810_accel.c
45127+++ b/drivers/video/i810/i810_accel.c
45128@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
45129 }
45130 }
45131 printk("ringbuffer lockup!!!\n");
45132+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
45133 i810_report_error(mmio);
45134 par->dev_flags |= LOCKUP;
45135 info->pixmap.scan_align = 1;
45136diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
45137index 3c14e43..eafa544 100644
45138--- a/drivers/video/logo/logo_linux_clut224.ppm
45139+++ b/drivers/video/logo/logo_linux_clut224.ppm
45140@@ -1,1604 +1,1123 @@
45141 P3
45142-# Standard 224-color Linux logo
45143 80 80
45144 255
45145- 0 0 0 0 0 0 0 0 0 0 0 0
45146- 0 0 0 0 0 0 0 0 0 0 0 0
45147- 0 0 0 0 0 0 0 0 0 0 0 0
45148- 0 0 0 0 0 0 0 0 0 0 0 0
45149- 0 0 0 0 0 0 0 0 0 0 0 0
45150- 0 0 0 0 0 0 0 0 0 0 0 0
45151- 0 0 0 0 0 0 0 0 0 0 0 0
45152- 0 0 0 0 0 0 0 0 0 0 0 0
45153- 0 0 0 0 0 0 0 0 0 0 0 0
45154- 6 6 6 6 6 6 10 10 10 10 10 10
45155- 10 10 10 6 6 6 6 6 6 6 6 6
45156- 0 0 0 0 0 0 0 0 0 0 0 0
45157- 0 0 0 0 0 0 0 0 0 0 0 0
45158- 0 0 0 0 0 0 0 0 0 0 0 0
45159- 0 0 0 0 0 0 0 0 0 0 0 0
45160- 0 0 0 0 0 0 0 0 0 0 0 0
45161- 0 0 0 0 0 0 0 0 0 0 0 0
45162- 0 0 0 0 0 0 0 0 0 0 0 0
45163- 0 0 0 0 0 0 0 0 0 0 0 0
45164- 0 0 0 0 0 0 0 0 0 0 0 0
45165- 0 0 0 0 0 0 0 0 0 0 0 0
45166- 0 0 0 0 0 0 0 0 0 0 0 0
45167- 0 0 0 0 0 0 0 0 0 0 0 0
45168- 0 0 0 0 0 0 0 0 0 0 0 0
45169- 0 0 0 0 0 0 0 0 0 0 0 0
45170- 0 0 0 0 0 0 0 0 0 0 0 0
45171- 0 0 0 0 0 0 0 0 0 0 0 0
45172- 0 0 0 0 0 0 0 0 0 0 0 0
45173- 0 0 0 6 6 6 10 10 10 14 14 14
45174- 22 22 22 26 26 26 30 30 30 34 34 34
45175- 30 30 30 30 30 30 26 26 26 18 18 18
45176- 14 14 14 10 10 10 6 6 6 0 0 0
45177- 0 0 0 0 0 0 0 0 0 0 0 0
45178- 0 0 0 0 0 0 0 0 0 0 0 0
45179- 0 0 0 0 0 0 0 0 0 0 0 0
45180- 0 0 0 0 0 0 0 0 0 0 0 0
45181- 0 0 0 0 0 0 0 0 0 0 0 0
45182- 0 0 0 0 0 0 0 0 0 0 0 0
45183- 0 0 0 0 0 0 0 0 0 0 0 0
45184- 0 0 0 0 0 0 0 0 0 0 0 0
45185- 0 0 0 0 0 0 0 0 0 0 0 0
45186- 0 0 0 0 0 1 0 0 1 0 0 0
45187- 0 0 0 0 0 0 0 0 0 0 0 0
45188- 0 0 0 0 0 0 0 0 0 0 0 0
45189- 0 0 0 0 0 0 0 0 0 0 0 0
45190- 0 0 0 0 0 0 0 0 0 0 0 0
45191- 0 0 0 0 0 0 0 0 0 0 0 0
45192- 0 0 0 0 0 0 0 0 0 0 0 0
45193- 6 6 6 14 14 14 26 26 26 42 42 42
45194- 54 54 54 66 66 66 78 78 78 78 78 78
45195- 78 78 78 74 74 74 66 66 66 54 54 54
45196- 42 42 42 26 26 26 18 18 18 10 10 10
45197- 6 6 6 0 0 0 0 0 0 0 0 0
45198- 0 0 0 0 0 0 0 0 0 0 0 0
45199- 0 0 0 0 0 0 0 0 0 0 0 0
45200- 0 0 0 0 0 0 0 0 0 0 0 0
45201- 0 0 0 0 0 0 0 0 0 0 0 0
45202- 0 0 0 0 0 0 0 0 0 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 0 0 0
45205- 0 0 0 0 0 0 0 0 0 0 0 0
45206- 0 0 1 0 0 0 0 0 0 0 0 0
45207- 0 0 0 0 0 0 0 0 0 0 0 0
45208- 0 0 0 0 0 0 0 0 0 0 0 0
45209- 0 0 0 0 0 0 0 0 0 0 0 0
45210- 0 0 0 0 0 0 0 0 0 0 0 0
45211- 0 0 0 0 0 0 0 0 0 0 0 0
45212- 0 0 0 0 0 0 0 0 0 10 10 10
45213- 22 22 22 42 42 42 66 66 66 86 86 86
45214- 66 66 66 38 38 38 38 38 38 22 22 22
45215- 26 26 26 34 34 34 54 54 54 66 66 66
45216- 86 86 86 70 70 70 46 46 46 26 26 26
45217- 14 14 14 6 6 6 0 0 0 0 0 0
45218- 0 0 0 0 0 0 0 0 0 0 0 0
45219- 0 0 0 0 0 0 0 0 0 0 0 0
45220- 0 0 0 0 0 0 0 0 0 0 0 0
45221- 0 0 0 0 0 0 0 0 0 0 0 0
45222- 0 0 0 0 0 0 0 0 0 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 0 0 0
45225- 0 0 0 0 0 0 0 0 0 0 0 0
45226- 0 0 1 0 0 1 0 0 1 0 0 0
45227- 0 0 0 0 0 0 0 0 0 0 0 0
45228- 0 0 0 0 0 0 0 0 0 0 0 0
45229- 0 0 0 0 0 0 0 0 0 0 0 0
45230- 0 0 0 0 0 0 0 0 0 0 0 0
45231- 0 0 0 0 0 0 0 0 0 0 0 0
45232- 0 0 0 0 0 0 10 10 10 26 26 26
45233- 50 50 50 82 82 82 58 58 58 6 6 6
45234- 2 2 6 2 2 6 2 2 6 2 2 6
45235- 2 2 6 2 2 6 2 2 6 2 2 6
45236- 6 6 6 54 54 54 86 86 86 66 66 66
45237- 38 38 38 18 18 18 6 6 6 0 0 0
45238- 0 0 0 0 0 0 0 0 0 0 0 0
45239- 0 0 0 0 0 0 0 0 0 0 0 0
45240- 0 0 0 0 0 0 0 0 0 0 0 0
45241- 0 0 0 0 0 0 0 0 0 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 0 0 0
45245- 0 0 0 0 0 0 0 0 0 0 0 0
45246- 0 0 0 0 0 0 0 0 0 0 0 0
45247- 0 0 0 0 0 0 0 0 0 0 0 0
45248- 0 0 0 0 0 0 0 0 0 0 0 0
45249- 0 0 0 0 0 0 0 0 0 0 0 0
45250- 0 0 0 0 0 0 0 0 0 0 0 0
45251- 0 0 0 0 0 0 0 0 0 0 0 0
45252- 0 0 0 6 6 6 22 22 22 50 50 50
45253- 78 78 78 34 34 34 2 2 6 2 2 6
45254- 2 2 6 2 2 6 2 2 6 2 2 6
45255- 2 2 6 2 2 6 2 2 6 2 2 6
45256- 2 2 6 2 2 6 6 6 6 70 70 70
45257- 78 78 78 46 46 46 22 22 22 6 6 6
45258- 0 0 0 0 0 0 0 0 0 0 0 0
45259- 0 0 0 0 0 0 0 0 0 0 0 0
45260- 0 0 0 0 0 0 0 0 0 0 0 0
45261- 0 0 0 0 0 0 0 0 0 0 0 0
45262- 0 0 0 0 0 0 0 0 0 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 0 0 0
45265- 0 0 0 0 0 0 0 0 0 0 0 0
45266- 0 0 1 0 0 1 0 0 1 0 0 0
45267- 0 0 0 0 0 0 0 0 0 0 0 0
45268- 0 0 0 0 0 0 0 0 0 0 0 0
45269- 0 0 0 0 0 0 0 0 0 0 0 0
45270- 0 0 0 0 0 0 0 0 0 0 0 0
45271- 0 0 0 0 0 0 0 0 0 0 0 0
45272- 6 6 6 18 18 18 42 42 42 82 82 82
45273- 26 26 26 2 2 6 2 2 6 2 2 6
45274- 2 2 6 2 2 6 2 2 6 2 2 6
45275- 2 2 6 2 2 6 2 2 6 14 14 14
45276- 46 46 46 34 34 34 6 6 6 2 2 6
45277- 42 42 42 78 78 78 42 42 42 18 18 18
45278- 6 6 6 0 0 0 0 0 0 0 0 0
45279- 0 0 0 0 0 0 0 0 0 0 0 0
45280- 0 0 0 0 0 0 0 0 0 0 0 0
45281- 0 0 0 0 0 0 0 0 0 0 0 0
45282- 0 0 0 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 0 0 0
45285- 0 0 0 0 0 0 0 0 0 0 0 0
45286- 0 0 1 0 0 0 0 0 1 0 0 0
45287- 0 0 0 0 0 0 0 0 0 0 0 0
45288- 0 0 0 0 0 0 0 0 0 0 0 0
45289- 0 0 0 0 0 0 0 0 0 0 0 0
45290- 0 0 0 0 0 0 0 0 0 0 0 0
45291- 0 0 0 0 0 0 0 0 0 0 0 0
45292- 10 10 10 30 30 30 66 66 66 58 58 58
45293- 2 2 6 2 2 6 2 2 6 2 2 6
45294- 2 2 6 2 2 6 2 2 6 2 2 6
45295- 2 2 6 2 2 6 2 2 6 26 26 26
45296- 86 86 86 101 101 101 46 46 46 10 10 10
45297- 2 2 6 58 58 58 70 70 70 34 34 34
45298- 10 10 10 0 0 0 0 0 0 0 0 0
45299- 0 0 0 0 0 0 0 0 0 0 0 0
45300- 0 0 0 0 0 0 0 0 0 0 0 0
45301- 0 0 0 0 0 0 0 0 0 0 0 0
45302- 0 0 0 0 0 0 0 0 0 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 0 0 0
45305- 0 0 0 0 0 0 0 0 0 0 0 0
45306- 0 0 1 0 0 1 0 0 1 0 0 0
45307- 0 0 0 0 0 0 0 0 0 0 0 0
45308- 0 0 0 0 0 0 0 0 0 0 0 0
45309- 0 0 0 0 0 0 0 0 0 0 0 0
45310- 0 0 0 0 0 0 0 0 0 0 0 0
45311- 0 0 0 0 0 0 0 0 0 0 0 0
45312- 14 14 14 42 42 42 86 86 86 10 10 10
45313- 2 2 6 2 2 6 2 2 6 2 2 6
45314- 2 2 6 2 2 6 2 2 6 2 2 6
45315- 2 2 6 2 2 6 2 2 6 30 30 30
45316- 94 94 94 94 94 94 58 58 58 26 26 26
45317- 2 2 6 6 6 6 78 78 78 54 54 54
45318- 22 22 22 6 6 6 0 0 0 0 0 0
45319- 0 0 0 0 0 0 0 0 0 0 0 0
45320- 0 0 0 0 0 0 0 0 0 0 0 0
45321- 0 0 0 0 0 0 0 0 0 0 0 0
45322- 0 0 0 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 0 0 0 0 0 0
45325- 0 0 0 0 0 0 0 0 0 0 0 0
45326- 0 0 0 0 0 0 0 0 0 0 0 0
45327- 0 0 0 0 0 0 0 0 0 0 0 0
45328- 0 0 0 0 0 0 0 0 0 0 0 0
45329- 0 0 0 0 0 0 0 0 0 0 0 0
45330- 0 0 0 0 0 0 0 0 0 0 0 0
45331- 0 0 0 0 0 0 0 0 0 6 6 6
45332- 22 22 22 62 62 62 62 62 62 2 2 6
45333- 2 2 6 2 2 6 2 2 6 2 2 6
45334- 2 2 6 2 2 6 2 2 6 2 2 6
45335- 2 2 6 2 2 6 2 2 6 26 26 26
45336- 54 54 54 38 38 38 18 18 18 10 10 10
45337- 2 2 6 2 2 6 34 34 34 82 82 82
45338- 38 38 38 14 14 14 0 0 0 0 0 0
45339- 0 0 0 0 0 0 0 0 0 0 0 0
45340- 0 0 0 0 0 0 0 0 0 0 0 0
45341- 0 0 0 0 0 0 0 0 0 0 0 0
45342- 0 0 0 0 0 0 0 0 0 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 0 0 0 0 0 0
45345- 0 0 0 0 0 0 0 0 0 0 0 0
45346- 0 0 0 0 0 1 0 0 1 0 0 0
45347- 0 0 0 0 0 0 0 0 0 0 0 0
45348- 0 0 0 0 0 0 0 0 0 0 0 0
45349- 0 0 0 0 0 0 0 0 0 0 0 0
45350- 0 0 0 0 0 0 0 0 0 0 0 0
45351- 0 0 0 0 0 0 0 0 0 6 6 6
45352- 30 30 30 78 78 78 30 30 30 2 2 6
45353- 2 2 6 2 2 6 2 2 6 2 2 6
45354- 2 2 6 2 2 6 2 2 6 2 2 6
45355- 2 2 6 2 2 6 2 2 6 10 10 10
45356- 10 10 10 2 2 6 2 2 6 2 2 6
45357- 2 2 6 2 2 6 2 2 6 78 78 78
45358- 50 50 50 18 18 18 6 6 6 0 0 0
45359- 0 0 0 0 0 0 0 0 0 0 0 0
45360- 0 0 0 0 0 0 0 0 0 0 0 0
45361- 0 0 0 0 0 0 0 0 0 0 0 0
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 0 0 0 0 0 0
45365- 0 0 0 0 0 0 0 0 0 0 0 0
45366- 0 0 1 0 0 0 0 0 0 0 0 0
45367- 0 0 0 0 0 0 0 0 0 0 0 0
45368- 0 0 0 0 0 0 0 0 0 0 0 0
45369- 0 0 0 0 0 0 0 0 0 0 0 0
45370- 0 0 0 0 0 0 0 0 0 0 0 0
45371- 0 0 0 0 0 0 0 0 0 10 10 10
45372- 38 38 38 86 86 86 14 14 14 2 2 6
45373- 2 2 6 2 2 6 2 2 6 2 2 6
45374- 2 2 6 2 2 6 2 2 6 2 2 6
45375- 2 2 6 2 2 6 2 2 6 2 2 6
45376- 2 2 6 2 2 6 2 2 6 2 2 6
45377- 2 2 6 2 2 6 2 2 6 54 54 54
45378- 66 66 66 26 26 26 6 6 6 0 0 0
45379- 0 0 0 0 0 0 0 0 0 0 0 0
45380- 0 0 0 0 0 0 0 0 0 0 0 0
45381- 0 0 0 0 0 0 0 0 0 0 0 0
45382- 0 0 0 0 0 0 0 0 0 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 0 0 0 0 0 0
45385- 0 0 0 0 0 0 0 0 0 0 0 0
45386- 0 0 0 0 0 1 0 0 1 0 0 0
45387- 0 0 0 0 0 0 0 0 0 0 0 0
45388- 0 0 0 0 0 0 0 0 0 0 0 0
45389- 0 0 0 0 0 0 0 0 0 0 0 0
45390- 0 0 0 0 0 0 0 0 0 0 0 0
45391- 0 0 0 0 0 0 0 0 0 14 14 14
45392- 42 42 42 82 82 82 2 2 6 2 2 6
45393- 2 2 6 6 6 6 10 10 10 2 2 6
45394- 2 2 6 2 2 6 2 2 6 2 2 6
45395- 2 2 6 2 2 6 2 2 6 6 6 6
45396- 14 14 14 10 10 10 2 2 6 2 2 6
45397- 2 2 6 2 2 6 2 2 6 18 18 18
45398- 82 82 82 34 34 34 10 10 10 0 0 0
45399- 0 0 0 0 0 0 0 0 0 0 0 0
45400- 0 0 0 0 0 0 0 0 0 0 0 0
45401- 0 0 0 0 0 0 0 0 0 0 0 0
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 0 0 0 0 0 0
45405- 0 0 0 0 0 0 0 0 0 0 0 0
45406- 0 0 1 0 0 0 0 0 0 0 0 0
45407- 0 0 0 0 0 0 0 0 0 0 0 0
45408- 0 0 0 0 0 0 0 0 0 0 0 0
45409- 0 0 0 0 0 0 0 0 0 0 0 0
45410- 0 0 0 0 0 0 0 0 0 0 0 0
45411- 0 0 0 0 0 0 0 0 0 14 14 14
45412- 46 46 46 86 86 86 2 2 6 2 2 6
45413- 6 6 6 6 6 6 22 22 22 34 34 34
45414- 6 6 6 2 2 6 2 2 6 2 2 6
45415- 2 2 6 2 2 6 18 18 18 34 34 34
45416- 10 10 10 50 50 50 22 22 22 2 2 6
45417- 2 2 6 2 2 6 2 2 6 10 10 10
45418- 86 86 86 42 42 42 14 14 14 0 0 0
45419- 0 0 0 0 0 0 0 0 0 0 0 0
45420- 0 0 0 0 0 0 0 0 0 0 0 0
45421- 0 0 0 0 0 0 0 0 0 0 0 0
45422- 0 0 0 0 0 0 0 0 0 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 0 0 0
45425- 0 0 0 0 0 0 0 0 0 0 0 0
45426- 0 0 1 0 0 1 0 0 1 0 0 0
45427- 0 0 0 0 0 0 0 0 0 0 0 0
45428- 0 0 0 0 0 0 0 0 0 0 0 0
45429- 0 0 0 0 0 0 0 0 0 0 0 0
45430- 0 0 0 0 0 0 0 0 0 0 0 0
45431- 0 0 0 0 0 0 0 0 0 14 14 14
45432- 46 46 46 86 86 86 2 2 6 2 2 6
45433- 38 38 38 116 116 116 94 94 94 22 22 22
45434- 22 22 22 2 2 6 2 2 6 2 2 6
45435- 14 14 14 86 86 86 138 138 138 162 162 162
45436-154 154 154 38 38 38 26 26 26 6 6 6
45437- 2 2 6 2 2 6 2 2 6 2 2 6
45438- 86 86 86 46 46 46 14 14 14 0 0 0
45439- 0 0 0 0 0 0 0 0 0 0 0 0
45440- 0 0 0 0 0 0 0 0 0 0 0 0
45441- 0 0 0 0 0 0 0 0 0 0 0 0
45442- 0 0 0 0 0 0 0 0 0 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 0 0 0
45445- 0 0 0 0 0 0 0 0 0 0 0 0
45446- 0 0 0 0 0 0 0 0 0 0 0 0
45447- 0 0 0 0 0 0 0 0 0 0 0 0
45448- 0 0 0 0 0 0 0 0 0 0 0 0
45449- 0 0 0 0 0 0 0 0 0 0 0 0
45450- 0 0 0 0 0 0 0 0 0 0 0 0
45451- 0 0 0 0 0 0 0 0 0 14 14 14
45452- 46 46 46 86 86 86 2 2 6 14 14 14
45453-134 134 134 198 198 198 195 195 195 116 116 116
45454- 10 10 10 2 2 6 2 2 6 6 6 6
45455-101 98 89 187 187 187 210 210 210 218 218 218
45456-214 214 214 134 134 134 14 14 14 6 6 6
45457- 2 2 6 2 2 6 2 2 6 2 2 6
45458- 86 86 86 50 50 50 18 18 18 6 6 6
45459- 0 0 0 0 0 0 0 0 0 0 0 0
45460- 0 0 0 0 0 0 0 0 0 0 0 0
45461- 0 0 0 0 0 0 0 0 0 0 0 0
45462- 0 0 0 0 0 0 0 0 0 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 0 0 0 0 0 0 0 0 1 0 0 0
45466- 0 0 1 0 0 1 0 0 1 0 0 0
45467- 0 0 0 0 0 0 0 0 0 0 0 0
45468- 0 0 0 0 0 0 0 0 0 0 0 0
45469- 0 0 0 0 0 0 0 0 0 0 0 0
45470- 0 0 0 0 0 0 0 0 0 0 0 0
45471- 0 0 0 0 0 0 0 0 0 14 14 14
45472- 46 46 46 86 86 86 2 2 6 54 54 54
45473-218 218 218 195 195 195 226 226 226 246 246 246
45474- 58 58 58 2 2 6 2 2 6 30 30 30
45475-210 210 210 253 253 253 174 174 174 123 123 123
45476-221 221 221 234 234 234 74 74 74 2 2 6
45477- 2 2 6 2 2 6 2 2 6 2 2 6
45478- 70 70 70 58 58 58 22 22 22 6 6 6
45479- 0 0 0 0 0 0 0 0 0 0 0 0
45480- 0 0 0 0 0 0 0 0 0 0 0 0
45481- 0 0 0 0 0 0 0 0 0 0 0 0
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 0 0 0 0 0 0
45486- 0 0 0 0 0 0 0 0 0 0 0 0
45487- 0 0 0 0 0 0 0 0 0 0 0 0
45488- 0 0 0 0 0 0 0 0 0 0 0 0
45489- 0 0 0 0 0 0 0 0 0 0 0 0
45490- 0 0 0 0 0 0 0 0 0 0 0 0
45491- 0 0 0 0 0 0 0 0 0 14 14 14
45492- 46 46 46 82 82 82 2 2 6 106 106 106
45493-170 170 170 26 26 26 86 86 86 226 226 226
45494-123 123 123 10 10 10 14 14 14 46 46 46
45495-231 231 231 190 190 190 6 6 6 70 70 70
45496- 90 90 90 238 238 238 158 158 158 2 2 6
45497- 2 2 6 2 2 6 2 2 6 2 2 6
45498- 70 70 70 58 58 58 22 22 22 6 6 6
45499- 0 0 0 0 0 0 0 0 0 0 0 0
45500- 0 0 0 0 0 0 0 0 0 0 0 0
45501- 0 0 0 0 0 0 0 0 0 0 0 0
45502- 0 0 0 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 0 0 0 0
45505- 0 0 0 0 0 0 0 0 1 0 0 0
45506- 0 0 1 0 0 1 0 0 1 0 0 0
45507- 0 0 0 0 0 0 0 0 0 0 0 0
45508- 0 0 0 0 0 0 0 0 0 0 0 0
45509- 0 0 0 0 0 0 0 0 0 0 0 0
45510- 0 0 0 0 0 0 0 0 0 0 0 0
45511- 0 0 0 0 0 0 0 0 0 14 14 14
45512- 42 42 42 86 86 86 6 6 6 116 116 116
45513-106 106 106 6 6 6 70 70 70 149 149 149
45514-128 128 128 18 18 18 38 38 38 54 54 54
45515-221 221 221 106 106 106 2 2 6 14 14 14
45516- 46 46 46 190 190 190 198 198 198 2 2 6
45517- 2 2 6 2 2 6 2 2 6 2 2 6
45518- 74 74 74 62 62 62 22 22 22 6 6 6
45519- 0 0 0 0 0 0 0 0 0 0 0 0
45520- 0 0 0 0 0 0 0 0 0 0 0 0
45521- 0 0 0 0 0 0 0 0 0 0 0 0
45522- 0 0 0 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 1 0 0 0
45526- 0 0 1 0 0 0 0 0 1 0 0 0
45527- 0 0 0 0 0 0 0 0 0 0 0 0
45528- 0 0 0 0 0 0 0 0 0 0 0 0
45529- 0 0 0 0 0 0 0 0 0 0 0 0
45530- 0 0 0 0 0 0 0 0 0 0 0 0
45531- 0 0 0 0 0 0 0 0 0 14 14 14
45532- 42 42 42 94 94 94 14 14 14 101 101 101
45533-128 128 128 2 2 6 18 18 18 116 116 116
45534-118 98 46 121 92 8 121 92 8 98 78 10
45535-162 162 162 106 106 106 2 2 6 2 2 6
45536- 2 2 6 195 195 195 195 195 195 6 6 6
45537- 2 2 6 2 2 6 2 2 6 2 2 6
45538- 74 74 74 62 62 62 22 22 22 6 6 6
45539- 0 0 0 0 0 0 0 0 0 0 0 0
45540- 0 0 0 0 0 0 0 0 0 0 0 0
45541- 0 0 0 0 0 0 0 0 0 0 0 0
45542- 0 0 0 0 0 0 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 0 0 0 0
45545- 0 0 0 0 0 0 0 0 1 0 0 1
45546- 0 0 1 0 0 0 0 0 1 0 0 0
45547- 0 0 0 0 0 0 0 0 0 0 0 0
45548- 0 0 0 0 0 0 0 0 0 0 0 0
45549- 0 0 0 0 0 0 0 0 0 0 0 0
45550- 0 0 0 0 0 0 0 0 0 0 0 0
45551- 0 0 0 0 0 0 0 0 0 10 10 10
45552- 38 38 38 90 90 90 14 14 14 58 58 58
45553-210 210 210 26 26 26 54 38 6 154 114 10
45554-226 170 11 236 186 11 225 175 15 184 144 12
45555-215 174 15 175 146 61 37 26 9 2 2 6
45556- 70 70 70 246 246 246 138 138 138 2 2 6
45557- 2 2 6 2 2 6 2 2 6 2 2 6
45558- 70 70 70 66 66 66 26 26 26 6 6 6
45559- 0 0 0 0 0 0 0 0 0 0 0 0
45560- 0 0 0 0 0 0 0 0 0 0 0 0
45561- 0 0 0 0 0 0 0 0 0 0 0 0
45562- 0 0 0 0 0 0 0 0 0 0 0 0
45563- 0 0 0 0 0 0 0 0 0 0 0 0
45564- 0 0 0 0 0 0 0 0 0 0 0 0
45565- 0 0 0 0 0 0 0 0 0 0 0 0
45566- 0 0 0 0 0 0 0 0 0 0 0 0
45567- 0 0 0 0 0 0 0 0 0 0 0 0
45568- 0 0 0 0 0 0 0 0 0 0 0 0
45569- 0 0 0 0 0 0 0 0 0 0 0 0
45570- 0 0 0 0 0 0 0 0 0 0 0 0
45571- 0 0 0 0 0 0 0 0 0 10 10 10
45572- 38 38 38 86 86 86 14 14 14 10 10 10
45573-195 195 195 188 164 115 192 133 9 225 175 15
45574-239 182 13 234 190 10 232 195 16 232 200 30
45575-245 207 45 241 208 19 232 195 16 184 144 12
45576-218 194 134 211 206 186 42 42 42 2 2 6
45577- 2 2 6 2 2 6 2 2 6 2 2 6
45578- 50 50 50 74 74 74 30 30 30 6 6 6
45579- 0 0 0 0 0 0 0 0 0 0 0 0
45580- 0 0 0 0 0 0 0 0 0 0 0 0
45581- 0 0 0 0 0 0 0 0 0 0 0 0
45582- 0 0 0 0 0 0 0 0 0 0 0 0
45583- 0 0 0 0 0 0 0 0 0 0 0 0
45584- 0 0 0 0 0 0 0 0 0 0 0 0
45585- 0 0 0 0 0 0 0 0 0 0 0 0
45586- 0 0 0 0 0 0 0 0 0 0 0 0
45587- 0 0 0 0 0 0 0 0 0 0 0 0
45588- 0 0 0 0 0 0 0 0 0 0 0 0
45589- 0 0 0 0 0 0 0 0 0 0 0 0
45590- 0 0 0 0 0 0 0 0 0 0 0 0
45591- 0 0 0 0 0 0 0 0 0 10 10 10
45592- 34 34 34 86 86 86 14 14 14 2 2 6
45593-121 87 25 192 133 9 219 162 10 239 182 13
45594-236 186 11 232 195 16 241 208 19 244 214 54
45595-246 218 60 246 218 38 246 215 20 241 208 19
45596-241 208 19 226 184 13 121 87 25 2 2 6
45597- 2 2 6 2 2 6 2 2 6 2 2 6
45598- 50 50 50 82 82 82 34 34 34 10 10 10
45599- 0 0 0 0 0 0 0 0 0 0 0 0
45600- 0 0 0 0 0 0 0 0 0 0 0 0
45601- 0 0 0 0 0 0 0 0 0 0 0 0
45602- 0 0 0 0 0 0 0 0 0 0 0 0
45603- 0 0 0 0 0 0 0 0 0 0 0 0
45604- 0 0 0 0 0 0 0 0 0 0 0 0
45605- 0 0 0 0 0 0 0 0 0 0 0 0
45606- 0 0 0 0 0 0 0 0 0 0 0 0
45607- 0 0 0 0 0 0 0 0 0 0 0 0
45608- 0 0 0 0 0 0 0 0 0 0 0 0
45609- 0 0 0 0 0 0 0 0 0 0 0 0
45610- 0 0 0 0 0 0 0 0 0 0 0 0
45611- 0 0 0 0 0 0 0 0 0 10 10 10
45612- 34 34 34 82 82 82 30 30 30 61 42 6
45613-180 123 7 206 145 10 230 174 11 239 182 13
45614-234 190 10 238 202 15 241 208 19 246 218 74
45615-246 218 38 246 215 20 246 215 20 246 215 20
45616-226 184 13 215 174 15 184 144 12 6 6 6
45617- 2 2 6 2 2 6 2 2 6 2 2 6
45618- 26 26 26 94 94 94 42 42 42 14 14 14
45619- 0 0 0 0 0 0 0 0 0 0 0 0
45620- 0 0 0 0 0 0 0 0 0 0 0 0
45621- 0 0 0 0 0 0 0 0 0 0 0 0
45622- 0 0 0 0 0 0 0 0 0 0 0 0
45623- 0 0 0 0 0 0 0 0 0 0 0 0
45624- 0 0 0 0 0 0 0 0 0 0 0 0
45625- 0 0 0 0 0 0 0 0 0 0 0 0
45626- 0 0 0 0 0 0 0 0 0 0 0 0
45627- 0 0 0 0 0 0 0 0 0 0 0 0
45628- 0 0 0 0 0 0 0 0 0 0 0 0
45629- 0 0 0 0 0 0 0 0 0 0 0 0
45630- 0 0 0 0 0 0 0 0 0 0 0 0
45631- 0 0 0 0 0 0 0 0 0 10 10 10
45632- 30 30 30 78 78 78 50 50 50 104 69 6
45633-192 133 9 216 158 10 236 178 12 236 186 11
45634-232 195 16 241 208 19 244 214 54 245 215 43
45635-246 215 20 246 215 20 241 208 19 198 155 10
45636-200 144 11 216 158 10 156 118 10 2 2 6
45637- 2 2 6 2 2 6 2 2 6 2 2 6
45638- 6 6 6 90 90 90 54 54 54 18 18 18
45639- 6 6 6 0 0 0 0 0 0 0 0 0
45640- 0 0 0 0 0 0 0 0 0 0 0 0
45641- 0 0 0 0 0 0 0 0 0 0 0 0
45642- 0 0 0 0 0 0 0 0 0 0 0 0
45643- 0 0 0 0 0 0 0 0 0 0 0 0
45644- 0 0 0 0 0 0 0 0 0 0 0 0
45645- 0 0 0 0 0 0 0 0 0 0 0 0
45646- 0 0 0 0 0 0 0 0 0 0 0 0
45647- 0 0 0 0 0 0 0 0 0 0 0 0
45648- 0 0 0 0 0 0 0 0 0 0 0 0
45649- 0 0 0 0 0 0 0 0 0 0 0 0
45650- 0 0 0 0 0 0 0 0 0 0 0 0
45651- 0 0 0 0 0 0 0 0 0 10 10 10
45652- 30 30 30 78 78 78 46 46 46 22 22 22
45653-137 92 6 210 162 10 239 182 13 238 190 10
45654-238 202 15 241 208 19 246 215 20 246 215 20
45655-241 208 19 203 166 17 185 133 11 210 150 10
45656-216 158 10 210 150 10 102 78 10 2 2 6
45657- 6 6 6 54 54 54 14 14 14 2 2 6
45658- 2 2 6 62 62 62 74 74 74 30 30 30
45659- 10 10 10 0 0 0 0 0 0 0 0 0
45660- 0 0 0 0 0 0 0 0 0 0 0 0
45661- 0 0 0 0 0 0 0 0 0 0 0 0
45662- 0 0 0 0 0 0 0 0 0 0 0 0
45663- 0 0 0 0 0 0 0 0 0 0 0 0
45664- 0 0 0 0 0 0 0 0 0 0 0 0
45665- 0 0 0 0 0 0 0 0 0 0 0 0
45666- 0 0 0 0 0 0 0 0 0 0 0 0
45667- 0 0 0 0 0 0 0 0 0 0 0 0
45668- 0 0 0 0 0 0 0 0 0 0 0 0
45669- 0 0 0 0 0 0 0 0 0 0 0 0
45670- 0 0 0 0 0 0 0 0 0 0 0 0
45671- 0 0 0 0 0 0 0 0 0 10 10 10
45672- 34 34 34 78 78 78 50 50 50 6 6 6
45673- 94 70 30 139 102 15 190 146 13 226 184 13
45674-232 200 30 232 195 16 215 174 15 190 146 13
45675-168 122 10 192 133 9 210 150 10 213 154 11
45676-202 150 34 182 157 106 101 98 89 2 2 6
45677- 2 2 6 78 78 78 116 116 116 58 58 58
45678- 2 2 6 22 22 22 90 90 90 46 46 46
45679- 18 18 18 6 6 6 0 0 0 0 0 0
45680- 0 0 0 0 0 0 0 0 0 0 0 0
45681- 0 0 0 0 0 0 0 0 0 0 0 0
45682- 0 0 0 0 0 0 0 0 0 0 0 0
45683- 0 0 0 0 0 0 0 0 0 0 0 0
45684- 0 0 0 0 0 0 0 0 0 0 0 0
45685- 0 0 0 0 0 0 0 0 0 0 0 0
45686- 0 0 0 0 0 0 0 0 0 0 0 0
45687- 0 0 0 0 0 0 0 0 0 0 0 0
45688- 0 0 0 0 0 0 0 0 0 0 0 0
45689- 0 0 0 0 0 0 0 0 0 0 0 0
45690- 0 0 0 0 0 0 0 0 0 0 0 0
45691- 0 0 0 0 0 0 0 0 0 10 10 10
45692- 38 38 38 86 86 86 50 50 50 6 6 6
45693-128 128 128 174 154 114 156 107 11 168 122 10
45694-198 155 10 184 144 12 197 138 11 200 144 11
45695-206 145 10 206 145 10 197 138 11 188 164 115
45696-195 195 195 198 198 198 174 174 174 14 14 14
45697- 2 2 6 22 22 22 116 116 116 116 116 116
45698- 22 22 22 2 2 6 74 74 74 70 70 70
45699- 30 30 30 10 10 10 0 0 0 0 0 0
45700- 0 0 0 0 0 0 0 0 0 0 0 0
45701- 0 0 0 0 0 0 0 0 0 0 0 0
45702- 0 0 0 0 0 0 0 0 0 0 0 0
45703- 0 0 0 0 0 0 0 0 0 0 0 0
45704- 0 0 0 0 0 0 0 0 0 0 0 0
45705- 0 0 0 0 0 0 0 0 0 0 0 0
45706- 0 0 0 0 0 0 0 0 0 0 0 0
45707- 0 0 0 0 0 0 0 0 0 0 0 0
45708- 0 0 0 0 0 0 0 0 0 0 0 0
45709- 0 0 0 0 0 0 0 0 0 0 0 0
45710- 0 0 0 0 0 0 0 0 0 0 0 0
45711- 0 0 0 0 0 0 6 6 6 18 18 18
45712- 50 50 50 101 101 101 26 26 26 10 10 10
45713-138 138 138 190 190 190 174 154 114 156 107 11
45714-197 138 11 200 144 11 197 138 11 192 133 9
45715-180 123 7 190 142 34 190 178 144 187 187 187
45716-202 202 202 221 221 221 214 214 214 66 66 66
45717- 2 2 6 2 2 6 50 50 50 62 62 62
45718- 6 6 6 2 2 6 10 10 10 90 90 90
45719- 50 50 50 18 18 18 6 6 6 0 0 0
45720- 0 0 0 0 0 0 0 0 0 0 0 0
45721- 0 0 0 0 0 0 0 0 0 0 0 0
45722- 0 0 0 0 0 0 0 0 0 0 0 0
45723- 0 0 0 0 0 0 0 0 0 0 0 0
45724- 0 0 0 0 0 0 0 0 0 0 0 0
45725- 0 0 0 0 0 0 0 0 0 0 0 0
45726- 0 0 0 0 0 0 0 0 0 0 0 0
45727- 0 0 0 0 0 0 0 0 0 0 0 0
45728- 0 0 0 0 0 0 0 0 0 0 0 0
45729- 0 0 0 0 0 0 0 0 0 0 0 0
45730- 0 0 0 0 0 0 0 0 0 0 0 0
45731- 0 0 0 0 0 0 10 10 10 34 34 34
45732- 74 74 74 74 74 74 2 2 6 6 6 6
45733-144 144 144 198 198 198 190 190 190 178 166 146
45734-154 121 60 156 107 11 156 107 11 168 124 44
45735-174 154 114 187 187 187 190 190 190 210 210 210
45736-246 246 246 253 253 253 253 253 253 182 182 182
45737- 6 6 6 2 2 6 2 2 6 2 2 6
45738- 2 2 6 2 2 6 2 2 6 62 62 62
45739- 74 74 74 34 34 34 14 14 14 0 0 0
45740- 0 0 0 0 0 0 0 0 0 0 0 0
45741- 0 0 0 0 0 0 0 0 0 0 0 0
45742- 0 0 0 0 0 0 0 0 0 0 0 0
45743- 0 0 0 0 0 0 0 0 0 0 0 0
45744- 0 0 0 0 0 0 0 0 0 0 0 0
45745- 0 0 0 0 0 0 0 0 0 0 0 0
45746- 0 0 0 0 0 0 0 0 0 0 0 0
45747- 0 0 0 0 0 0 0 0 0 0 0 0
45748- 0 0 0 0 0 0 0 0 0 0 0 0
45749- 0 0 0 0 0 0 0 0 0 0 0 0
45750- 0 0 0 0 0 0 0 0 0 0 0 0
45751- 0 0 0 10 10 10 22 22 22 54 54 54
45752- 94 94 94 18 18 18 2 2 6 46 46 46
45753-234 234 234 221 221 221 190 190 190 190 190 190
45754-190 190 190 187 187 187 187 187 187 190 190 190
45755-190 190 190 195 195 195 214 214 214 242 242 242
45756-253 253 253 253 253 253 253 253 253 253 253 253
45757- 82 82 82 2 2 6 2 2 6 2 2 6
45758- 2 2 6 2 2 6 2 2 6 14 14 14
45759- 86 86 86 54 54 54 22 22 22 6 6 6
45760- 0 0 0 0 0 0 0 0 0 0 0 0
45761- 0 0 0 0 0 0 0 0 0 0 0 0
45762- 0 0 0 0 0 0 0 0 0 0 0 0
45763- 0 0 0 0 0 0 0 0 0 0 0 0
45764- 0 0 0 0 0 0 0 0 0 0 0 0
45765- 0 0 0 0 0 0 0 0 0 0 0 0
45766- 0 0 0 0 0 0 0 0 0 0 0 0
45767- 0 0 0 0 0 0 0 0 0 0 0 0
45768- 0 0 0 0 0 0 0 0 0 0 0 0
45769- 0 0 0 0 0 0 0 0 0 0 0 0
45770- 0 0 0 0 0 0 0 0 0 0 0 0
45771- 6 6 6 18 18 18 46 46 46 90 90 90
45772- 46 46 46 18 18 18 6 6 6 182 182 182
45773-253 253 253 246 246 246 206 206 206 190 190 190
45774-190 190 190 190 190 190 190 190 190 190 190 190
45775-206 206 206 231 231 231 250 250 250 253 253 253
45776-253 253 253 253 253 253 253 253 253 253 253 253
45777-202 202 202 14 14 14 2 2 6 2 2 6
45778- 2 2 6 2 2 6 2 2 6 2 2 6
45779- 42 42 42 86 86 86 42 42 42 18 18 18
45780- 6 6 6 0 0 0 0 0 0 0 0 0
45781- 0 0 0 0 0 0 0 0 0 0 0 0
45782- 0 0 0 0 0 0 0 0 0 0 0 0
45783- 0 0 0 0 0 0 0 0 0 0 0 0
45784- 0 0 0 0 0 0 0 0 0 0 0 0
45785- 0 0 0 0 0 0 0 0 0 0 0 0
45786- 0 0 0 0 0 0 0 0 0 0 0 0
45787- 0 0 0 0 0 0 0 0 0 0 0 0
45788- 0 0 0 0 0 0 0 0 0 0 0 0
45789- 0 0 0 0 0 0 0 0 0 0 0 0
45790- 0 0 0 0 0 0 0 0 0 6 6 6
45791- 14 14 14 38 38 38 74 74 74 66 66 66
45792- 2 2 6 6 6 6 90 90 90 250 250 250
45793-253 253 253 253 253 253 238 238 238 198 198 198
45794-190 190 190 190 190 190 195 195 195 221 221 221
45795-246 246 246 253 253 253 253 253 253 253 253 253
45796-253 253 253 253 253 253 253 253 253 253 253 253
45797-253 253 253 82 82 82 2 2 6 2 2 6
45798- 2 2 6 2 2 6 2 2 6 2 2 6
45799- 2 2 6 78 78 78 70 70 70 34 34 34
45800- 14 14 14 6 6 6 0 0 0 0 0 0
45801- 0 0 0 0 0 0 0 0 0 0 0 0
45802- 0 0 0 0 0 0 0 0 0 0 0 0
45803- 0 0 0 0 0 0 0 0 0 0 0 0
45804- 0 0 0 0 0 0 0 0 0 0 0 0
45805- 0 0 0 0 0 0 0 0 0 0 0 0
45806- 0 0 0 0 0 0 0 0 0 0 0 0
45807- 0 0 0 0 0 0 0 0 0 0 0 0
45808- 0 0 0 0 0 0 0 0 0 0 0 0
45809- 0 0 0 0 0 0 0 0 0 0 0 0
45810- 0 0 0 0 0 0 0 0 0 14 14 14
45811- 34 34 34 66 66 66 78 78 78 6 6 6
45812- 2 2 6 18 18 18 218 218 218 253 253 253
45813-253 253 253 253 253 253 253 253 253 246 246 246
45814-226 226 226 231 231 231 246 246 246 253 253 253
45815-253 253 253 253 253 253 253 253 253 253 253 253
45816-253 253 253 253 253 253 253 253 253 253 253 253
45817-253 253 253 178 178 178 2 2 6 2 2 6
45818- 2 2 6 2 2 6 2 2 6 2 2 6
45819- 2 2 6 18 18 18 90 90 90 62 62 62
45820- 30 30 30 10 10 10 0 0 0 0 0 0
45821- 0 0 0 0 0 0 0 0 0 0 0 0
45822- 0 0 0 0 0 0 0 0 0 0 0 0
45823- 0 0 0 0 0 0 0 0 0 0 0 0
45824- 0 0 0 0 0 0 0 0 0 0 0 0
45825- 0 0 0 0 0 0 0 0 0 0 0 0
45826- 0 0 0 0 0 0 0 0 0 0 0 0
45827- 0 0 0 0 0 0 0 0 0 0 0 0
45828- 0 0 0 0 0 0 0 0 0 0 0 0
45829- 0 0 0 0 0 0 0 0 0 0 0 0
45830- 0 0 0 0 0 0 10 10 10 26 26 26
45831- 58 58 58 90 90 90 18 18 18 2 2 6
45832- 2 2 6 110 110 110 253 253 253 253 253 253
45833-253 253 253 253 253 253 253 253 253 253 253 253
45834-250 250 250 253 253 253 253 253 253 253 253 253
45835-253 253 253 253 253 253 253 253 253 253 253 253
45836-253 253 253 253 253 253 253 253 253 253 253 253
45837-253 253 253 231 231 231 18 18 18 2 2 6
45838- 2 2 6 2 2 6 2 2 6 2 2 6
45839- 2 2 6 2 2 6 18 18 18 94 94 94
45840- 54 54 54 26 26 26 10 10 10 0 0 0
45841- 0 0 0 0 0 0 0 0 0 0 0 0
45842- 0 0 0 0 0 0 0 0 0 0 0 0
45843- 0 0 0 0 0 0 0 0 0 0 0 0
45844- 0 0 0 0 0 0 0 0 0 0 0 0
45845- 0 0 0 0 0 0 0 0 0 0 0 0
45846- 0 0 0 0 0 0 0 0 0 0 0 0
45847- 0 0 0 0 0 0 0 0 0 0 0 0
45848- 0 0 0 0 0 0 0 0 0 0 0 0
45849- 0 0 0 0 0 0 0 0 0 0 0 0
45850- 0 0 0 6 6 6 22 22 22 50 50 50
45851- 90 90 90 26 26 26 2 2 6 2 2 6
45852- 14 14 14 195 195 195 250 250 250 253 253 253
45853-253 253 253 253 253 253 253 253 253 253 253 253
45854-253 253 253 253 253 253 253 253 253 253 253 253
45855-253 253 253 253 253 253 253 253 253 253 253 253
45856-253 253 253 253 253 253 253 253 253 253 253 253
45857-250 250 250 242 242 242 54 54 54 2 2 6
45858- 2 2 6 2 2 6 2 2 6 2 2 6
45859- 2 2 6 2 2 6 2 2 6 38 38 38
45860- 86 86 86 50 50 50 22 22 22 6 6 6
45861- 0 0 0 0 0 0 0 0 0 0 0 0
45862- 0 0 0 0 0 0 0 0 0 0 0 0
45863- 0 0 0 0 0 0 0 0 0 0 0 0
45864- 0 0 0 0 0 0 0 0 0 0 0 0
45865- 0 0 0 0 0 0 0 0 0 0 0 0
45866- 0 0 0 0 0 0 0 0 0 0 0 0
45867- 0 0 0 0 0 0 0 0 0 0 0 0
45868- 0 0 0 0 0 0 0 0 0 0 0 0
45869- 0 0 0 0 0 0 0 0 0 0 0 0
45870- 6 6 6 14 14 14 38 38 38 82 82 82
45871- 34 34 34 2 2 6 2 2 6 2 2 6
45872- 42 42 42 195 195 195 246 246 246 253 253 253
45873-253 253 253 253 253 253 253 253 253 250 250 250
45874-242 242 242 242 242 242 250 250 250 253 253 253
45875-253 253 253 253 253 253 253 253 253 253 253 253
45876-253 253 253 250 250 250 246 246 246 238 238 238
45877-226 226 226 231 231 231 101 101 101 6 6 6
45878- 2 2 6 2 2 6 2 2 6 2 2 6
45879- 2 2 6 2 2 6 2 2 6 2 2 6
45880- 38 38 38 82 82 82 42 42 42 14 14 14
45881- 6 6 6 0 0 0 0 0 0 0 0 0
45882- 0 0 0 0 0 0 0 0 0 0 0 0
45883- 0 0 0 0 0 0 0 0 0 0 0 0
45884- 0 0 0 0 0 0 0 0 0 0 0 0
45885- 0 0 0 0 0 0 0 0 0 0 0 0
45886- 0 0 0 0 0 0 0 0 0 0 0 0
45887- 0 0 0 0 0 0 0 0 0 0 0 0
45888- 0 0 0 0 0 0 0 0 0 0 0 0
45889- 0 0 0 0 0 0 0 0 0 0 0 0
45890- 10 10 10 26 26 26 62 62 62 66 66 66
45891- 2 2 6 2 2 6 2 2 6 6 6 6
45892- 70 70 70 170 170 170 206 206 206 234 234 234
45893-246 246 246 250 250 250 250 250 250 238 238 238
45894-226 226 226 231 231 231 238 238 238 250 250 250
45895-250 250 250 250 250 250 246 246 246 231 231 231
45896-214 214 214 206 206 206 202 202 202 202 202 202
45897-198 198 198 202 202 202 182 182 182 18 18 18
45898- 2 2 6 2 2 6 2 2 6 2 2 6
45899- 2 2 6 2 2 6 2 2 6 2 2 6
45900- 2 2 6 62 62 62 66 66 66 30 30 30
45901- 10 10 10 0 0 0 0 0 0 0 0 0
45902- 0 0 0 0 0 0 0 0 0 0 0 0
45903- 0 0 0 0 0 0 0 0 0 0 0 0
45904- 0 0 0 0 0 0 0 0 0 0 0 0
45905- 0 0 0 0 0 0 0 0 0 0 0 0
45906- 0 0 0 0 0 0 0 0 0 0 0 0
45907- 0 0 0 0 0 0 0 0 0 0 0 0
45908- 0 0 0 0 0 0 0 0 0 0 0 0
45909- 0 0 0 0 0 0 0 0 0 0 0 0
45910- 14 14 14 42 42 42 82 82 82 18 18 18
45911- 2 2 6 2 2 6 2 2 6 10 10 10
45912- 94 94 94 182 182 182 218 218 218 242 242 242
45913-250 250 250 253 253 253 253 253 253 250 250 250
45914-234 234 234 253 253 253 253 253 253 253 253 253
45915-253 253 253 253 253 253 253 253 253 246 246 246
45916-238 238 238 226 226 226 210 210 210 202 202 202
45917-195 195 195 195 195 195 210 210 210 158 158 158
45918- 6 6 6 14 14 14 50 50 50 14 14 14
45919- 2 2 6 2 2 6 2 2 6 2 2 6
45920- 2 2 6 6 6 6 86 86 86 46 46 46
45921- 18 18 18 6 6 6 0 0 0 0 0 0
45922- 0 0 0 0 0 0 0 0 0 0 0 0
45923- 0 0 0 0 0 0 0 0 0 0 0 0
45924- 0 0 0 0 0 0 0 0 0 0 0 0
45925- 0 0 0 0 0 0 0 0 0 0 0 0
45926- 0 0 0 0 0 0 0 0 0 0 0 0
45927- 0 0 0 0 0 0 0 0 0 0 0 0
45928- 0 0 0 0 0 0 0 0 0 0 0 0
45929- 0 0 0 0 0 0 0 0 0 6 6 6
45930- 22 22 22 54 54 54 70 70 70 2 2 6
45931- 2 2 6 10 10 10 2 2 6 22 22 22
45932-166 166 166 231 231 231 250 250 250 253 253 253
45933-253 253 253 253 253 253 253 253 253 250 250 250
45934-242 242 242 253 253 253 253 253 253 253 253 253
45935-253 253 253 253 253 253 253 253 253 253 253 253
45936-253 253 253 253 253 253 253 253 253 246 246 246
45937-231 231 231 206 206 206 198 198 198 226 226 226
45938- 94 94 94 2 2 6 6 6 6 38 38 38
45939- 30 30 30 2 2 6 2 2 6 2 2 6
45940- 2 2 6 2 2 6 62 62 62 66 66 66
45941- 26 26 26 10 10 10 0 0 0 0 0 0
45942- 0 0 0 0 0 0 0 0 0 0 0 0
45943- 0 0 0 0 0 0 0 0 0 0 0 0
45944- 0 0 0 0 0 0 0 0 0 0 0 0
45945- 0 0 0 0 0 0 0 0 0 0 0 0
45946- 0 0 0 0 0 0 0 0 0 0 0 0
45947- 0 0 0 0 0 0 0 0 0 0 0 0
45948- 0 0 0 0 0 0 0 0 0 0 0 0
45949- 0 0 0 0 0 0 0 0 0 10 10 10
45950- 30 30 30 74 74 74 50 50 50 2 2 6
45951- 26 26 26 26 26 26 2 2 6 106 106 106
45952-238 238 238 253 253 253 253 253 253 253 253 253
45953-253 253 253 253 253 253 253 253 253 253 253 253
45954-253 253 253 253 253 253 253 253 253 253 253 253
45955-253 253 253 253 253 253 253 253 253 253 253 253
45956-253 253 253 253 253 253 253 253 253 253 253 253
45957-253 253 253 246 246 246 218 218 218 202 202 202
45958-210 210 210 14 14 14 2 2 6 2 2 6
45959- 30 30 30 22 22 22 2 2 6 2 2 6
45960- 2 2 6 2 2 6 18 18 18 86 86 86
45961- 42 42 42 14 14 14 0 0 0 0 0 0
45962- 0 0 0 0 0 0 0 0 0 0 0 0
45963- 0 0 0 0 0 0 0 0 0 0 0 0
45964- 0 0 0 0 0 0 0 0 0 0 0 0
45965- 0 0 0 0 0 0 0 0 0 0 0 0
45966- 0 0 0 0 0 0 0 0 0 0 0 0
45967- 0 0 0 0 0 0 0 0 0 0 0 0
45968- 0 0 0 0 0 0 0 0 0 0 0 0
45969- 0 0 0 0 0 0 0 0 0 14 14 14
45970- 42 42 42 90 90 90 22 22 22 2 2 6
45971- 42 42 42 2 2 6 18 18 18 218 218 218
45972-253 253 253 253 253 253 253 253 253 253 253 253
45973-253 253 253 253 253 253 253 253 253 253 253 253
45974-253 253 253 253 253 253 253 253 253 253 253 253
45975-253 253 253 253 253 253 253 253 253 253 253 253
45976-253 253 253 253 253 253 253 253 253 253 253 253
45977-253 253 253 253 253 253 250 250 250 221 221 221
45978-218 218 218 101 101 101 2 2 6 14 14 14
45979- 18 18 18 38 38 38 10 10 10 2 2 6
45980- 2 2 6 2 2 6 2 2 6 78 78 78
45981- 58 58 58 22 22 22 6 6 6 0 0 0
45982- 0 0 0 0 0 0 0 0 0 0 0 0
45983- 0 0 0 0 0 0 0 0 0 0 0 0
45984- 0 0 0 0 0 0 0 0 0 0 0 0
45985- 0 0 0 0 0 0 0 0 0 0 0 0
45986- 0 0 0 0 0 0 0 0 0 0 0 0
45987- 0 0 0 0 0 0 0 0 0 0 0 0
45988- 0 0 0 0 0 0 0 0 0 0 0 0
45989- 0 0 0 0 0 0 6 6 6 18 18 18
45990- 54 54 54 82 82 82 2 2 6 26 26 26
45991- 22 22 22 2 2 6 123 123 123 253 253 253
45992-253 253 253 253 253 253 253 253 253 253 253 253
45993-253 253 253 253 253 253 253 253 253 253 253 253
45994-253 253 253 253 253 253 253 253 253 253 253 253
45995-253 253 253 253 253 253 253 253 253 253 253 253
45996-253 253 253 253 253 253 253 253 253 253 253 253
45997-253 253 253 253 253 253 253 253 253 250 250 250
45998-238 238 238 198 198 198 6 6 6 38 38 38
45999- 58 58 58 26 26 26 38 38 38 2 2 6
46000- 2 2 6 2 2 6 2 2 6 46 46 46
46001- 78 78 78 30 30 30 10 10 10 0 0 0
46002- 0 0 0 0 0 0 0 0 0 0 0 0
46003- 0 0 0 0 0 0 0 0 0 0 0 0
46004- 0 0 0 0 0 0 0 0 0 0 0 0
46005- 0 0 0 0 0 0 0 0 0 0 0 0
46006- 0 0 0 0 0 0 0 0 0 0 0 0
46007- 0 0 0 0 0 0 0 0 0 0 0 0
46008- 0 0 0 0 0 0 0 0 0 0 0 0
46009- 0 0 0 0 0 0 10 10 10 30 30 30
46010- 74 74 74 58 58 58 2 2 6 42 42 42
46011- 2 2 6 22 22 22 231 231 231 253 253 253
46012-253 253 253 253 253 253 253 253 253 253 253 253
46013-253 253 253 253 253 253 253 253 253 250 250 250
46014-253 253 253 253 253 253 253 253 253 253 253 253
46015-253 253 253 253 253 253 253 253 253 253 253 253
46016-253 253 253 253 253 253 253 253 253 253 253 253
46017-253 253 253 253 253 253 253 253 253 253 253 253
46018-253 253 253 246 246 246 46 46 46 38 38 38
46019- 42 42 42 14 14 14 38 38 38 14 14 14
46020- 2 2 6 2 2 6 2 2 6 6 6 6
46021- 86 86 86 46 46 46 14 14 14 0 0 0
46022- 0 0 0 0 0 0 0 0 0 0 0 0
46023- 0 0 0 0 0 0 0 0 0 0 0 0
46024- 0 0 0 0 0 0 0 0 0 0 0 0
46025- 0 0 0 0 0 0 0 0 0 0 0 0
46026- 0 0 0 0 0 0 0 0 0 0 0 0
46027- 0 0 0 0 0 0 0 0 0 0 0 0
46028- 0 0 0 0 0 0 0 0 0 0 0 0
46029- 0 0 0 6 6 6 14 14 14 42 42 42
46030- 90 90 90 18 18 18 18 18 18 26 26 26
46031- 2 2 6 116 116 116 253 253 253 253 253 253
46032-253 253 253 253 253 253 253 253 253 253 253 253
46033-253 253 253 253 253 253 250 250 250 238 238 238
46034-253 253 253 253 253 253 253 253 253 253 253 253
46035-253 253 253 253 253 253 253 253 253 253 253 253
46036-253 253 253 253 253 253 253 253 253 253 253 253
46037-253 253 253 253 253 253 253 253 253 253 253 253
46038-253 253 253 253 253 253 94 94 94 6 6 6
46039- 2 2 6 2 2 6 10 10 10 34 34 34
46040- 2 2 6 2 2 6 2 2 6 2 2 6
46041- 74 74 74 58 58 58 22 22 22 6 6 6
46042- 0 0 0 0 0 0 0 0 0 0 0 0
46043- 0 0 0 0 0 0 0 0 0 0 0 0
46044- 0 0 0 0 0 0 0 0 0 0 0 0
46045- 0 0 0 0 0 0 0 0 0 0 0 0
46046- 0 0 0 0 0 0 0 0 0 0 0 0
46047- 0 0 0 0 0 0 0 0 0 0 0 0
46048- 0 0 0 0 0 0 0 0 0 0 0 0
46049- 0 0 0 10 10 10 26 26 26 66 66 66
46050- 82 82 82 2 2 6 38 38 38 6 6 6
46051- 14 14 14 210 210 210 253 253 253 253 253 253
46052-253 253 253 253 253 253 253 253 253 253 253 253
46053-253 253 253 253 253 253 246 246 246 242 242 242
46054-253 253 253 253 253 253 253 253 253 253 253 253
46055-253 253 253 253 253 253 253 253 253 253 253 253
46056-253 253 253 253 253 253 253 253 253 253 253 253
46057-253 253 253 253 253 253 253 253 253 253 253 253
46058-253 253 253 253 253 253 144 144 144 2 2 6
46059- 2 2 6 2 2 6 2 2 6 46 46 46
46060- 2 2 6 2 2 6 2 2 6 2 2 6
46061- 42 42 42 74 74 74 30 30 30 10 10 10
46062- 0 0 0 0 0 0 0 0 0 0 0 0
46063- 0 0 0 0 0 0 0 0 0 0 0 0
46064- 0 0 0 0 0 0 0 0 0 0 0 0
46065- 0 0 0 0 0 0 0 0 0 0 0 0
46066- 0 0 0 0 0 0 0 0 0 0 0 0
46067- 0 0 0 0 0 0 0 0 0 0 0 0
46068- 0 0 0 0 0 0 0 0 0 0 0 0
46069- 6 6 6 14 14 14 42 42 42 90 90 90
46070- 26 26 26 6 6 6 42 42 42 2 2 6
46071- 74 74 74 250 250 250 253 253 253 253 253 253
46072-253 253 253 253 253 253 253 253 253 253 253 253
46073-253 253 253 253 253 253 242 242 242 242 242 242
46074-253 253 253 253 253 253 253 253 253 253 253 253
46075-253 253 253 253 253 253 253 253 253 253 253 253
46076-253 253 253 253 253 253 253 253 253 253 253 253
46077-253 253 253 253 253 253 253 253 253 253 253 253
46078-253 253 253 253 253 253 182 182 182 2 2 6
46079- 2 2 6 2 2 6 2 2 6 46 46 46
46080- 2 2 6 2 2 6 2 2 6 2 2 6
46081- 10 10 10 86 86 86 38 38 38 10 10 10
46082- 0 0 0 0 0 0 0 0 0 0 0 0
46083- 0 0 0 0 0 0 0 0 0 0 0 0
46084- 0 0 0 0 0 0 0 0 0 0 0 0
46085- 0 0 0 0 0 0 0 0 0 0 0 0
46086- 0 0 0 0 0 0 0 0 0 0 0 0
46087- 0 0 0 0 0 0 0 0 0 0 0 0
46088- 0 0 0 0 0 0 0 0 0 0 0 0
46089- 10 10 10 26 26 26 66 66 66 82 82 82
46090- 2 2 6 22 22 22 18 18 18 2 2 6
46091-149 149 149 253 253 253 253 253 253 253 253 253
46092-253 253 253 253 253 253 253 253 253 253 253 253
46093-253 253 253 253 253 253 234 234 234 242 242 242
46094-253 253 253 253 253 253 253 253 253 253 253 253
46095-253 253 253 253 253 253 253 253 253 253 253 253
46096-253 253 253 253 253 253 253 253 253 253 253 253
46097-253 253 253 253 253 253 253 253 253 253 253 253
46098-253 253 253 253 253 253 206 206 206 2 2 6
46099- 2 2 6 2 2 6 2 2 6 38 38 38
46100- 2 2 6 2 2 6 2 2 6 2 2 6
46101- 6 6 6 86 86 86 46 46 46 14 14 14
46102- 0 0 0 0 0 0 0 0 0 0 0 0
46103- 0 0 0 0 0 0 0 0 0 0 0 0
46104- 0 0 0 0 0 0 0 0 0 0 0 0
46105- 0 0 0 0 0 0 0 0 0 0 0 0
46106- 0 0 0 0 0 0 0 0 0 0 0 0
46107- 0 0 0 0 0 0 0 0 0 0 0 0
46108- 0 0 0 0 0 0 0 0 0 6 6 6
46109- 18 18 18 46 46 46 86 86 86 18 18 18
46110- 2 2 6 34 34 34 10 10 10 6 6 6
46111-210 210 210 253 253 253 253 253 253 253 253 253
46112-253 253 253 253 253 253 253 253 253 253 253 253
46113-253 253 253 253 253 253 234 234 234 242 242 242
46114-253 253 253 253 253 253 253 253 253 253 253 253
46115-253 253 253 253 253 253 253 253 253 253 253 253
46116-253 253 253 253 253 253 253 253 253 253 253 253
46117-253 253 253 253 253 253 253 253 253 253 253 253
46118-253 253 253 253 253 253 221 221 221 6 6 6
46119- 2 2 6 2 2 6 6 6 6 30 30 30
46120- 2 2 6 2 2 6 2 2 6 2 2 6
46121- 2 2 6 82 82 82 54 54 54 18 18 18
46122- 6 6 6 0 0 0 0 0 0 0 0 0
46123- 0 0 0 0 0 0 0 0 0 0 0 0
46124- 0 0 0 0 0 0 0 0 0 0 0 0
46125- 0 0 0 0 0 0 0 0 0 0 0 0
46126- 0 0 0 0 0 0 0 0 0 0 0 0
46127- 0 0 0 0 0 0 0 0 0 0 0 0
46128- 0 0 0 0 0 0 0 0 0 10 10 10
46129- 26 26 26 66 66 66 62 62 62 2 2 6
46130- 2 2 6 38 38 38 10 10 10 26 26 26
46131-238 238 238 253 253 253 253 253 253 253 253 253
46132-253 253 253 253 253 253 253 253 253 253 253 253
46133-253 253 253 253 253 253 231 231 231 238 238 238
46134-253 253 253 253 253 253 253 253 253 253 253 253
46135-253 253 253 253 253 253 253 253 253 253 253 253
46136-253 253 253 253 253 253 253 253 253 253 253 253
46137-253 253 253 253 253 253 253 253 253 253 253 253
46138-253 253 253 253 253 253 231 231 231 6 6 6
46139- 2 2 6 2 2 6 10 10 10 30 30 30
46140- 2 2 6 2 2 6 2 2 6 2 2 6
46141- 2 2 6 66 66 66 58 58 58 22 22 22
46142- 6 6 6 0 0 0 0 0 0 0 0 0
46143- 0 0 0 0 0 0 0 0 0 0 0 0
46144- 0 0 0 0 0 0 0 0 0 0 0 0
46145- 0 0 0 0 0 0 0 0 0 0 0 0
46146- 0 0 0 0 0 0 0 0 0 0 0 0
46147- 0 0 0 0 0 0 0 0 0 0 0 0
46148- 0 0 0 0 0 0 0 0 0 10 10 10
46149- 38 38 38 78 78 78 6 6 6 2 2 6
46150- 2 2 6 46 46 46 14 14 14 42 42 42
46151-246 246 246 253 253 253 253 253 253 253 253 253
46152-253 253 253 253 253 253 253 253 253 253 253 253
46153-253 253 253 253 253 253 231 231 231 242 242 242
46154-253 253 253 253 253 253 253 253 253 253 253 253
46155-253 253 253 253 253 253 253 253 253 253 253 253
46156-253 253 253 253 253 253 253 253 253 253 253 253
46157-253 253 253 253 253 253 253 253 253 253 253 253
46158-253 253 253 253 253 253 234 234 234 10 10 10
46159- 2 2 6 2 2 6 22 22 22 14 14 14
46160- 2 2 6 2 2 6 2 2 6 2 2 6
46161- 2 2 6 66 66 66 62 62 62 22 22 22
46162- 6 6 6 0 0 0 0 0 0 0 0 0
46163- 0 0 0 0 0 0 0 0 0 0 0 0
46164- 0 0 0 0 0 0 0 0 0 0 0 0
46165- 0 0 0 0 0 0 0 0 0 0 0 0
46166- 0 0 0 0 0 0 0 0 0 0 0 0
46167- 0 0 0 0 0 0 0 0 0 0 0 0
46168- 0 0 0 0 0 0 6 6 6 18 18 18
46169- 50 50 50 74 74 74 2 2 6 2 2 6
46170- 14 14 14 70 70 70 34 34 34 62 62 62
46171-250 250 250 253 253 253 253 253 253 253 253 253
46172-253 253 253 253 253 253 253 253 253 253 253 253
46173-253 253 253 253 253 253 231 231 231 246 246 246
46174-253 253 253 253 253 253 253 253 253 253 253 253
46175-253 253 253 253 253 253 253 253 253 253 253 253
46176-253 253 253 253 253 253 253 253 253 253 253 253
46177-253 253 253 253 253 253 253 253 253 253 253 253
46178-253 253 253 253 253 253 234 234 234 14 14 14
46179- 2 2 6 2 2 6 30 30 30 2 2 6
46180- 2 2 6 2 2 6 2 2 6 2 2 6
46181- 2 2 6 66 66 66 62 62 62 22 22 22
46182- 6 6 6 0 0 0 0 0 0 0 0 0
46183- 0 0 0 0 0 0 0 0 0 0 0 0
46184- 0 0 0 0 0 0 0 0 0 0 0 0
46185- 0 0 0 0 0 0 0 0 0 0 0 0
46186- 0 0 0 0 0 0 0 0 0 0 0 0
46187- 0 0 0 0 0 0 0 0 0 0 0 0
46188- 0 0 0 0 0 0 6 6 6 18 18 18
46189- 54 54 54 62 62 62 2 2 6 2 2 6
46190- 2 2 6 30 30 30 46 46 46 70 70 70
46191-250 250 250 253 253 253 253 253 253 253 253 253
46192-253 253 253 253 253 253 253 253 253 253 253 253
46193-253 253 253 253 253 253 231 231 231 246 246 246
46194-253 253 253 253 253 253 253 253 253 253 253 253
46195-253 253 253 253 253 253 253 253 253 253 253 253
46196-253 253 253 253 253 253 253 253 253 253 253 253
46197-253 253 253 253 253 253 253 253 253 253 253 253
46198-253 253 253 253 253 253 226 226 226 10 10 10
46199- 2 2 6 6 6 6 30 30 30 2 2 6
46200- 2 2 6 2 2 6 2 2 6 2 2 6
46201- 2 2 6 66 66 66 58 58 58 22 22 22
46202- 6 6 6 0 0 0 0 0 0 0 0 0
46203- 0 0 0 0 0 0 0 0 0 0 0 0
46204- 0 0 0 0 0 0 0 0 0 0 0 0
46205- 0 0 0 0 0 0 0 0 0 0 0 0
46206- 0 0 0 0 0 0 0 0 0 0 0 0
46207- 0 0 0 0 0 0 0 0 0 0 0 0
46208- 0 0 0 0 0 0 6 6 6 22 22 22
46209- 58 58 58 62 62 62 2 2 6 2 2 6
46210- 2 2 6 2 2 6 30 30 30 78 78 78
46211-250 250 250 253 253 253 253 253 253 253 253 253
46212-253 253 253 253 253 253 253 253 253 253 253 253
46213-253 253 253 253 253 253 231 231 231 246 246 246
46214-253 253 253 253 253 253 253 253 253 253 253 253
46215-253 253 253 253 253 253 253 253 253 253 253 253
46216-253 253 253 253 253 253 253 253 253 253 253 253
46217-253 253 253 253 253 253 253 253 253 253 253 253
46218-253 253 253 253 253 253 206 206 206 2 2 6
46219- 22 22 22 34 34 34 18 14 6 22 22 22
46220- 26 26 26 18 18 18 6 6 6 2 2 6
46221- 2 2 6 82 82 82 54 54 54 18 18 18
46222- 6 6 6 0 0 0 0 0 0 0 0 0
46223- 0 0 0 0 0 0 0 0 0 0 0 0
46224- 0 0 0 0 0 0 0 0 0 0 0 0
46225- 0 0 0 0 0 0 0 0 0 0 0 0
46226- 0 0 0 0 0 0 0 0 0 0 0 0
46227- 0 0 0 0 0 0 0 0 0 0 0 0
46228- 0 0 0 0 0 0 6 6 6 26 26 26
46229- 62 62 62 106 106 106 74 54 14 185 133 11
46230-210 162 10 121 92 8 6 6 6 62 62 62
46231-238 238 238 253 253 253 253 253 253 253 253 253
46232-253 253 253 253 253 253 253 253 253 253 253 253
46233-253 253 253 253 253 253 231 231 231 246 246 246
46234-253 253 253 253 253 253 253 253 253 253 253 253
46235-253 253 253 253 253 253 253 253 253 253 253 253
46236-253 253 253 253 253 253 253 253 253 253 253 253
46237-253 253 253 253 253 253 253 253 253 253 253 253
46238-253 253 253 253 253 253 158 158 158 18 18 18
46239- 14 14 14 2 2 6 2 2 6 2 2 6
46240- 6 6 6 18 18 18 66 66 66 38 38 38
46241- 6 6 6 94 94 94 50 50 50 18 18 18
46242- 6 6 6 0 0 0 0 0 0 0 0 0
46243- 0 0 0 0 0 0 0 0 0 0 0 0
46244- 0 0 0 0 0 0 0 0 0 0 0 0
46245- 0 0 0 0 0 0 0 0 0 0 0 0
46246- 0 0 0 0 0 0 0 0 0 0 0 0
46247- 0 0 0 0 0 0 0 0 0 6 6 6
46248- 10 10 10 10 10 10 18 18 18 38 38 38
46249- 78 78 78 142 134 106 216 158 10 242 186 14
46250-246 190 14 246 190 14 156 118 10 10 10 10
46251- 90 90 90 238 238 238 253 253 253 253 253 253
46252-253 253 253 253 253 253 253 253 253 253 253 253
46253-253 253 253 253 253 253 231 231 231 250 250 250
46254-253 253 253 253 253 253 253 253 253 253 253 253
46255-253 253 253 253 253 253 253 253 253 253 253 253
46256-253 253 253 253 253 253 253 253 253 253 253 253
46257-253 253 253 253 253 253 253 253 253 246 230 190
46258-238 204 91 238 204 91 181 142 44 37 26 9
46259- 2 2 6 2 2 6 2 2 6 2 2 6
46260- 2 2 6 2 2 6 38 38 38 46 46 46
46261- 26 26 26 106 106 106 54 54 54 18 18 18
46262- 6 6 6 0 0 0 0 0 0 0 0 0
46263- 0 0 0 0 0 0 0 0 0 0 0 0
46264- 0 0 0 0 0 0 0 0 0 0 0 0
46265- 0 0 0 0 0 0 0 0 0 0 0 0
46266- 0 0 0 0 0 0 0 0 0 0 0 0
46267- 0 0 0 6 6 6 14 14 14 22 22 22
46268- 30 30 30 38 38 38 50 50 50 70 70 70
46269-106 106 106 190 142 34 226 170 11 242 186 14
46270-246 190 14 246 190 14 246 190 14 154 114 10
46271- 6 6 6 74 74 74 226 226 226 253 253 253
46272-253 253 253 253 253 253 253 253 253 253 253 253
46273-253 253 253 253 253 253 231 231 231 250 250 250
46274-253 253 253 253 253 253 253 253 253 253 253 253
46275-253 253 253 253 253 253 253 253 253 253 253 253
46276-253 253 253 253 253 253 253 253 253 253 253 253
46277-253 253 253 253 253 253 253 253 253 228 184 62
46278-241 196 14 241 208 19 232 195 16 38 30 10
46279- 2 2 6 2 2 6 2 2 6 2 2 6
46280- 2 2 6 6 6 6 30 30 30 26 26 26
46281-203 166 17 154 142 90 66 66 66 26 26 26
46282- 6 6 6 0 0 0 0 0 0 0 0 0
46283- 0 0 0 0 0 0 0 0 0 0 0 0
46284- 0 0 0 0 0 0 0 0 0 0 0 0
46285- 0 0 0 0 0 0 0 0 0 0 0 0
46286- 0 0 0 0 0 0 0 0 0 0 0 0
46287- 6 6 6 18 18 18 38 38 38 58 58 58
46288- 78 78 78 86 86 86 101 101 101 123 123 123
46289-175 146 61 210 150 10 234 174 13 246 186 14
46290-246 190 14 246 190 14 246 190 14 238 190 10
46291-102 78 10 2 2 6 46 46 46 198 198 198
46292-253 253 253 253 253 253 253 253 253 253 253 253
46293-253 253 253 253 253 253 234 234 234 242 242 242
46294-253 253 253 253 253 253 253 253 253 253 253 253
46295-253 253 253 253 253 253 253 253 253 253 253 253
46296-253 253 253 253 253 253 253 253 253 253 253 253
46297-253 253 253 253 253 253 253 253 253 224 178 62
46298-242 186 14 241 196 14 210 166 10 22 18 6
46299- 2 2 6 2 2 6 2 2 6 2 2 6
46300- 2 2 6 2 2 6 6 6 6 121 92 8
46301-238 202 15 232 195 16 82 82 82 34 34 34
46302- 10 10 10 0 0 0 0 0 0 0 0 0
46303- 0 0 0 0 0 0 0 0 0 0 0 0
46304- 0 0 0 0 0 0 0 0 0 0 0 0
46305- 0 0 0 0 0 0 0 0 0 0 0 0
46306- 0 0 0 0 0 0 0 0 0 0 0 0
46307- 14 14 14 38 38 38 70 70 70 154 122 46
46308-190 142 34 200 144 11 197 138 11 197 138 11
46309-213 154 11 226 170 11 242 186 14 246 190 14
46310-246 190 14 246 190 14 246 190 14 246 190 14
46311-225 175 15 46 32 6 2 2 6 22 22 22
46312-158 158 158 250 250 250 253 253 253 253 253 253
46313-253 253 253 253 253 253 253 253 253 253 253 253
46314-253 253 253 253 253 253 253 253 253 253 253 253
46315-253 253 253 253 253 253 253 253 253 253 253 253
46316-253 253 253 253 253 253 253 253 253 253 253 253
46317-253 253 253 250 250 250 242 242 242 224 178 62
46318-239 182 13 236 186 11 213 154 11 46 32 6
46319- 2 2 6 2 2 6 2 2 6 2 2 6
46320- 2 2 6 2 2 6 61 42 6 225 175 15
46321-238 190 10 236 186 11 112 100 78 42 42 42
46322- 14 14 14 0 0 0 0 0 0 0 0 0
46323- 0 0 0 0 0 0 0 0 0 0 0 0
46324- 0 0 0 0 0 0 0 0 0 0 0 0
46325- 0 0 0 0 0 0 0 0 0 0 0 0
46326- 0 0 0 0 0 0 0 0 0 6 6 6
46327- 22 22 22 54 54 54 154 122 46 213 154 11
46328-226 170 11 230 174 11 226 170 11 226 170 11
46329-236 178 12 242 186 14 246 190 14 246 190 14
46330-246 190 14 246 190 14 246 190 14 246 190 14
46331-241 196 14 184 144 12 10 10 10 2 2 6
46332- 6 6 6 116 116 116 242 242 242 253 253 253
46333-253 253 253 253 253 253 253 253 253 253 253 253
46334-253 253 253 253 253 253 253 253 253 253 253 253
46335-253 253 253 253 253 253 253 253 253 253 253 253
46336-253 253 253 253 253 253 253 253 253 253 253 253
46337-253 253 253 231 231 231 198 198 198 214 170 54
46338-236 178 12 236 178 12 210 150 10 137 92 6
46339- 18 14 6 2 2 6 2 2 6 2 2 6
46340- 6 6 6 70 47 6 200 144 11 236 178 12
46341-239 182 13 239 182 13 124 112 88 58 58 58
46342- 22 22 22 6 6 6 0 0 0 0 0 0
46343- 0 0 0 0 0 0 0 0 0 0 0 0
46344- 0 0 0 0 0 0 0 0 0 0 0 0
46345- 0 0 0 0 0 0 0 0 0 0 0 0
46346- 0 0 0 0 0 0 0 0 0 10 10 10
46347- 30 30 30 70 70 70 180 133 36 226 170 11
46348-239 182 13 242 186 14 242 186 14 246 186 14
46349-246 190 14 246 190 14 246 190 14 246 190 14
46350-246 190 14 246 190 14 246 190 14 246 190 14
46351-246 190 14 232 195 16 98 70 6 2 2 6
46352- 2 2 6 2 2 6 66 66 66 221 221 221
46353-253 253 253 253 253 253 253 253 253 253 253 253
46354-253 253 253 253 253 253 253 253 253 253 253 253
46355-253 253 253 253 253 253 253 253 253 253 253 253
46356-253 253 253 253 253 253 253 253 253 253 253 253
46357-253 253 253 206 206 206 198 198 198 214 166 58
46358-230 174 11 230 174 11 216 158 10 192 133 9
46359-163 110 8 116 81 8 102 78 10 116 81 8
46360-167 114 7 197 138 11 226 170 11 239 182 13
46361-242 186 14 242 186 14 162 146 94 78 78 78
46362- 34 34 34 14 14 14 6 6 6 0 0 0
46363- 0 0 0 0 0 0 0 0 0 0 0 0
46364- 0 0 0 0 0 0 0 0 0 0 0 0
46365- 0 0 0 0 0 0 0 0 0 0 0 0
46366- 0 0 0 0 0 0 0 0 0 6 6 6
46367- 30 30 30 78 78 78 190 142 34 226 170 11
46368-239 182 13 246 190 14 246 190 14 246 190 14
46369-246 190 14 246 190 14 246 190 14 246 190 14
46370-246 190 14 246 190 14 246 190 14 246 190 14
46371-246 190 14 241 196 14 203 166 17 22 18 6
46372- 2 2 6 2 2 6 2 2 6 38 38 38
46373-218 218 218 253 253 253 253 253 253 253 253 253
46374-253 253 253 253 253 253 253 253 253 253 253 253
46375-253 253 253 253 253 253 253 253 253 253 253 253
46376-253 253 253 253 253 253 253 253 253 253 253 253
46377-250 250 250 206 206 206 198 198 198 202 162 69
46378-226 170 11 236 178 12 224 166 10 210 150 10
46379-200 144 11 197 138 11 192 133 9 197 138 11
46380-210 150 10 226 170 11 242 186 14 246 190 14
46381-246 190 14 246 186 14 225 175 15 124 112 88
46382- 62 62 62 30 30 30 14 14 14 6 6 6
46383- 0 0 0 0 0 0 0 0 0 0 0 0
46384- 0 0 0 0 0 0 0 0 0 0 0 0
46385- 0 0 0 0 0 0 0 0 0 0 0 0
46386- 0 0 0 0 0 0 0 0 0 10 10 10
46387- 30 30 30 78 78 78 174 135 50 224 166 10
46388-239 182 13 246 190 14 246 190 14 246 190 14
46389-246 190 14 246 190 14 246 190 14 246 190 14
46390-246 190 14 246 190 14 246 190 14 246 190 14
46391-246 190 14 246 190 14 241 196 14 139 102 15
46392- 2 2 6 2 2 6 2 2 6 2 2 6
46393- 78 78 78 250 250 250 253 253 253 253 253 253
46394-253 253 253 253 253 253 253 253 253 253 253 253
46395-253 253 253 253 253 253 253 253 253 253 253 253
46396-253 253 253 253 253 253 253 253 253 253 253 253
46397-250 250 250 214 214 214 198 198 198 190 150 46
46398-219 162 10 236 178 12 234 174 13 224 166 10
46399-216 158 10 213 154 11 213 154 11 216 158 10
46400-226 170 11 239 182 13 246 190 14 246 190 14
46401-246 190 14 246 190 14 242 186 14 206 162 42
46402-101 101 101 58 58 58 30 30 30 14 14 14
46403- 6 6 6 0 0 0 0 0 0 0 0 0
46404- 0 0 0 0 0 0 0 0 0 0 0 0
46405- 0 0 0 0 0 0 0 0 0 0 0 0
46406- 0 0 0 0 0 0 0 0 0 10 10 10
46407- 30 30 30 74 74 74 174 135 50 216 158 10
46408-236 178 12 246 190 14 246 190 14 246 190 14
46409-246 190 14 246 190 14 246 190 14 246 190 14
46410-246 190 14 246 190 14 246 190 14 246 190 14
46411-246 190 14 246 190 14 241 196 14 226 184 13
46412- 61 42 6 2 2 6 2 2 6 2 2 6
46413- 22 22 22 238 238 238 253 253 253 253 253 253
46414-253 253 253 253 253 253 253 253 253 253 253 253
46415-253 253 253 253 253 253 253 253 253 253 253 253
46416-253 253 253 253 253 253 253 253 253 253 253 253
46417-253 253 253 226 226 226 187 187 187 180 133 36
46418-216 158 10 236 178 12 239 182 13 236 178 12
46419-230 174 11 226 170 11 226 170 11 230 174 11
46420-236 178 12 242 186 14 246 190 14 246 190 14
46421-246 190 14 246 190 14 246 186 14 239 182 13
46422-206 162 42 106 106 106 66 66 66 34 34 34
46423- 14 14 14 6 6 6 0 0 0 0 0 0
46424- 0 0 0 0 0 0 0 0 0 0 0 0
46425- 0 0 0 0 0 0 0 0 0 0 0 0
46426- 0 0 0 0 0 0 0 0 0 6 6 6
46427- 26 26 26 70 70 70 163 133 67 213 154 11
46428-236 178 12 246 190 14 246 190 14 246 190 14
46429-246 190 14 246 190 14 246 190 14 246 190 14
46430-246 190 14 246 190 14 246 190 14 246 190 14
46431-246 190 14 246 190 14 246 190 14 241 196 14
46432-190 146 13 18 14 6 2 2 6 2 2 6
46433- 46 46 46 246 246 246 253 253 253 253 253 253
46434-253 253 253 253 253 253 253 253 253 253 253 253
46435-253 253 253 253 253 253 253 253 253 253 253 253
46436-253 253 253 253 253 253 253 253 253 253 253 253
46437-253 253 253 221 221 221 86 86 86 156 107 11
46438-216 158 10 236 178 12 242 186 14 246 186 14
46439-242 186 14 239 182 13 239 182 13 242 186 14
46440-242 186 14 246 186 14 246 190 14 246 190 14
46441-246 190 14 246 190 14 246 190 14 246 190 14
46442-242 186 14 225 175 15 142 122 72 66 66 66
46443- 30 30 30 10 10 10 0 0 0 0 0 0
46444- 0 0 0 0 0 0 0 0 0 0 0 0
46445- 0 0 0 0 0 0 0 0 0 0 0 0
46446- 0 0 0 0 0 0 0 0 0 6 6 6
46447- 26 26 26 70 70 70 163 133 67 210 150 10
46448-236 178 12 246 190 14 246 190 14 246 190 14
46449-246 190 14 246 190 14 246 190 14 246 190 14
46450-246 190 14 246 190 14 246 190 14 246 190 14
46451-246 190 14 246 190 14 246 190 14 246 190 14
46452-232 195 16 121 92 8 34 34 34 106 106 106
46453-221 221 221 253 253 253 253 253 253 253 253 253
46454-253 253 253 253 253 253 253 253 253 253 253 253
46455-253 253 253 253 253 253 253 253 253 253 253 253
46456-253 253 253 253 253 253 253 253 253 253 253 253
46457-242 242 242 82 82 82 18 14 6 163 110 8
46458-216 158 10 236 178 12 242 186 14 246 190 14
46459-246 190 14 246 190 14 246 190 14 246 190 14
46460-246 190 14 246 190 14 246 190 14 246 190 14
46461-246 190 14 246 190 14 246 190 14 246 190 14
46462-246 190 14 246 190 14 242 186 14 163 133 67
46463- 46 46 46 18 18 18 6 6 6 0 0 0
46464- 0 0 0 0 0 0 0 0 0 0 0 0
46465- 0 0 0 0 0 0 0 0 0 0 0 0
46466- 0 0 0 0 0 0 0 0 0 10 10 10
46467- 30 30 30 78 78 78 163 133 67 210 150 10
46468-236 178 12 246 186 14 246 190 14 246 190 14
46469-246 190 14 246 190 14 246 190 14 246 190 14
46470-246 190 14 246 190 14 246 190 14 246 190 14
46471-246 190 14 246 190 14 246 190 14 246 190 14
46472-241 196 14 215 174 15 190 178 144 253 253 253
46473-253 253 253 253 253 253 253 253 253 253 253 253
46474-253 253 253 253 253 253 253 253 253 253 253 253
46475-253 253 253 253 253 253 253 253 253 253 253 253
46476-253 253 253 253 253 253 253 253 253 218 218 218
46477- 58 58 58 2 2 6 22 18 6 167 114 7
46478-216 158 10 236 178 12 246 186 14 246 190 14
46479-246 190 14 246 190 14 246 190 14 246 190 14
46480-246 190 14 246 190 14 246 190 14 246 190 14
46481-246 190 14 246 190 14 246 190 14 246 190 14
46482-246 190 14 246 186 14 242 186 14 190 150 46
46483- 54 54 54 22 22 22 6 6 6 0 0 0
46484- 0 0 0 0 0 0 0 0 0 0 0 0
46485- 0 0 0 0 0 0 0 0 0 0 0 0
46486- 0 0 0 0 0 0 0 0 0 14 14 14
46487- 38 38 38 86 86 86 180 133 36 213 154 11
46488-236 178 12 246 186 14 246 190 14 246 190 14
46489-246 190 14 246 190 14 246 190 14 246 190 14
46490-246 190 14 246 190 14 246 190 14 246 190 14
46491-246 190 14 246 190 14 246 190 14 246 190 14
46492-246 190 14 232 195 16 190 146 13 214 214 214
46493-253 253 253 253 253 253 253 253 253 253 253 253
46494-253 253 253 253 253 253 253 253 253 253 253 253
46495-253 253 253 253 253 253 253 253 253 253 253 253
46496-253 253 253 250 250 250 170 170 170 26 26 26
46497- 2 2 6 2 2 6 37 26 9 163 110 8
46498-219 162 10 239 182 13 246 186 14 246 190 14
46499-246 190 14 246 190 14 246 190 14 246 190 14
46500-246 190 14 246 190 14 246 190 14 246 190 14
46501-246 190 14 246 190 14 246 190 14 246 190 14
46502-246 186 14 236 178 12 224 166 10 142 122 72
46503- 46 46 46 18 18 18 6 6 6 0 0 0
46504- 0 0 0 0 0 0 0 0 0 0 0 0
46505- 0 0 0 0 0 0 0 0 0 0 0 0
46506- 0 0 0 0 0 0 6 6 6 18 18 18
46507- 50 50 50 109 106 95 192 133 9 224 166 10
46508-242 186 14 246 190 14 246 190 14 246 190 14
46509-246 190 14 246 190 14 246 190 14 246 190 14
46510-246 190 14 246 190 14 246 190 14 246 190 14
46511-246 190 14 246 190 14 246 190 14 246 190 14
46512-242 186 14 226 184 13 210 162 10 142 110 46
46513-226 226 226 253 253 253 253 253 253 253 253 253
46514-253 253 253 253 253 253 253 253 253 253 253 253
46515-253 253 253 253 253 253 253 253 253 253 253 253
46516-198 198 198 66 66 66 2 2 6 2 2 6
46517- 2 2 6 2 2 6 50 34 6 156 107 11
46518-219 162 10 239 182 13 246 186 14 246 190 14
46519-246 190 14 246 190 14 246 190 14 246 190 14
46520-246 190 14 246 190 14 246 190 14 246 190 14
46521-246 190 14 246 190 14 246 190 14 242 186 14
46522-234 174 13 213 154 11 154 122 46 66 66 66
46523- 30 30 30 10 10 10 0 0 0 0 0 0
46524- 0 0 0 0 0 0 0 0 0 0 0 0
46525- 0 0 0 0 0 0 0 0 0 0 0 0
46526- 0 0 0 0 0 0 6 6 6 22 22 22
46527- 58 58 58 154 121 60 206 145 10 234 174 13
46528-242 186 14 246 186 14 246 190 14 246 190 14
46529-246 190 14 246 190 14 246 190 14 246 190 14
46530-246 190 14 246 190 14 246 190 14 246 190 14
46531-246 190 14 246 190 14 246 190 14 246 190 14
46532-246 186 14 236 178 12 210 162 10 163 110 8
46533- 61 42 6 138 138 138 218 218 218 250 250 250
46534-253 253 253 253 253 253 253 253 253 250 250 250
46535-242 242 242 210 210 210 144 144 144 66 66 66
46536- 6 6 6 2 2 6 2 2 6 2 2 6
46537- 2 2 6 2 2 6 61 42 6 163 110 8
46538-216 158 10 236 178 12 246 190 14 246 190 14
46539-246 190 14 246 190 14 246 190 14 246 190 14
46540-246 190 14 246 190 14 246 190 14 246 190 14
46541-246 190 14 239 182 13 230 174 11 216 158 10
46542-190 142 34 124 112 88 70 70 70 38 38 38
46543- 18 18 18 6 6 6 0 0 0 0 0 0
46544- 0 0 0 0 0 0 0 0 0 0 0 0
46545- 0 0 0 0 0 0 0 0 0 0 0 0
46546- 0 0 0 0 0 0 6 6 6 22 22 22
46547- 62 62 62 168 124 44 206 145 10 224 166 10
46548-236 178 12 239 182 13 242 186 14 242 186 14
46549-246 186 14 246 190 14 246 190 14 246 190 14
46550-246 190 14 246 190 14 246 190 14 246 190 14
46551-246 190 14 246 190 14 246 190 14 246 190 14
46552-246 190 14 236 178 12 216 158 10 175 118 6
46553- 80 54 7 2 2 6 6 6 6 30 30 30
46554- 54 54 54 62 62 62 50 50 50 38 38 38
46555- 14 14 14 2 2 6 2 2 6 2 2 6
46556- 2 2 6 2 2 6 2 2 6 2 2 6
46557- 2 2 6 6 6 6 80 54 7 167 114 7
46558-213 154 11 236 178 12 246 190 14 246 190 14
46559-246 190 14 246 190 14 246 190 14 246 190 14
46560-246 190 14 242 186 14 239 182 13 239 182 13
46561-230 174 11 210 150 10 174 135 50 124 112 88
46562- 82 82 82 54 54 54 34 34 34 18 18 18
46563- 6 6 6 0 0 0 0 0 0 0 0 0
46564- 0 0 0 0 0 0 0 0 0 0 0 0
46565- 0 0 0 0 0 0 0 0 0 0 0 0
46566- 0 0 0 0 0 0 6 6 6 18 18 18
46567- 50 50 50 158 118 36 192 133 9 200 144 11
46568-216 158 10 219 162 10 224 166 10 226 170 11
46569-230 174 11 236 178 12 239 182 13 239 182 13
46570-242 186 14 246 186 14 246 190 14 246 190 14
46571-246 190 14 246 190 14 246 190 14 246 190 14
46572-246 186 14 230 174 11 210 150 10 163 110 8
46573-104 69 6 10 10 10 2 2 6 2 2 6
46574- 2 2 6 2 2 6 2 2 6 2 2 6
46575- 2 2 6 2 2 6 2 2 6 2 2 6
46576- 2 2 6 2 2 6 2 2 6 2 2 6
46577- 2 2 6 6 6 6 91 60 6 167 114 7
46578-206 145 10 230 174 11 242 186 14 246 190 14
46579-246 190 14 246 190 14 246 186 14 242 186 14
46580-239 182 13 230 174 11 224 166 10 213 154 11
46581-180 133 36 124 112 88 86 86 86 58 58 58
46582- 38 38 38 22 22 22 10 10 10 6 6 6
46583- 0 0 0 0 0 0 0 0 0 0 0 0
46584- 0 0 0 0 0 0 0 0 0 0 0 0
46585- 0 0 0 0 0 0 0 0 0 0 0 0
46586- 0 0 0 0 0 0 0 0 0 14 14 14
46587- 34 34 34 70 70 70 138 110 50 158 118 36
46588-167 114 7 180 123 7 192 133 9 197 138 11
46589-200 144 11 206 145 10 213 154 11 219 162 10
46590-224 166 10 230 174 11 239 182 13 242 186 14
46591-246 186 14 246 186 14 246 186 14 246 186 14
46592-239 182 13 216 158 10 185 133 11 152 99 6
46593-104 69 6 18 14 6 2 2 6 2 2 6
46594- 2 2 6 2 2 6 2 2 6 2 2 6
46595- 2 2 6 2 2 6 2 2 6 2 2 6
46596- 2 2 6 2 2 6 2 2 6 2 2 6
46597- 2 2 6 6 6 6 80 54 7 152 99 6
46598-192 133 9 219 162 10 236 178 12 239 182 13
46599-246 186 14 242 186 14 239 182 13 236 178 12
46600-224 166 10 206 145 10 192 133 9 154 121 60
46601- 94 94 94 62 62 62 42 42 42 22 22 22
46602- 14 14 14 6 6 6 0 0 0 0 0 0
46603- 0 0 0 0 0 0 0 0 0 0 0 0
46604- 0 0 0 0 0 0 0 0 0 0 0 0
46605- 0 0 0 0 0 0 0 0 0 0 0 0
46606- 0 0 0 0 0 0 0 0 0 6 6 6
46607- 18 18 18 34 34 34 58 58 58 78 78 78
46608-101 98 89 124 112 88 142 110 46 156 107 11
46609-163 110 8 167 114 7 175 118 6 180 123 7
46610-185 133 11 197 138 11 210 150 10 219 162 10
46611-226 170 11 236 178 12 236 178 12 234 174 13
46612-219 162 10 197 138 11 163 110 8 130 83 6
46613- 91 60 6 10 10 10 2 2 6 2 2 6
46614- 18 18 18 38 38 38 38 38 38 38 38 38
46615- 38 38 38 38 38 38 38 38 38 38 38 38
46616- 38 38 38 38 38 38 26 26 26 2 2 6
46617- 2 2 6 6 6 6 70 47 6 137 92 6
46618-175 118 6 200 144 11 219 162 10 230 174 11
46619-234 174 13 230 174 11 219 162 10 210 150 10
46620-192 133 9 163 110 8 124 112 88 82 82 82
46621- 50 50 50 30 30 30 14 14 14 6 6 6
46622- 0 0 0 0 0 0 0 0 0 0 0 0
46623- 0 0 0 0 0 0 0 0 0 0 0 0
46624- 0 0 0 0 0 0 0 0 0 0 0 0
46625- 0 0 0 0 0 0 0 0 0 0 0 0
46626- 0 0 0 0 0 0 0 0 0 0 0 0
46627- 6 6 6 14 14 14 22 22 22 34 34 34
46628- 42 42 42 58 58 58 74 74 74 86 86 86
46629-101 98 89 122 102 70 130 98 46 121 87 25
46630-137 92 6 152 99 6 163 110 8 180 123 7
46631-185 133 11 197 138 11 206 145 10 200 144 11
46632-180 123 7 156 107 11 130 83 6 104 69 6
46633- 50 34 6 54 54 54 110 110 110 101 98 89
46634- 86 86 86 82 82 82 78 78 78 78 78 78
46635- 78 78 78 78 78 78 78 78 78 78 78 78
46636- 78 78 78 82 82 82 86 86 86 94 94 94
46637-106 106 106 101 101 101 86 66 34 124 80 6
46638-156 107 11 180 123 7 192 133 9 200 144 11
46639-206 145 10 200 144 11 192 133 9 175 118 6
46640-139 102 15 109 106 95 70 70 70 42 42 42
46641- 22 22 22 10 10 10 0 0 0 0 0 0
46642- 0 0 0 0 0 0 0 0 0 0 0 0
46643- 0 0 0 0 0 0 0 0 0 0 0 0
46644- 0 0 0 0 0 0 0 0 0 0 0 0
46645- 0 0 0 0 0 0 0 0 0 0 0 0
46646- 0 0 0 0 0 0 0 0 0 0 0 0
46647- 0 0 0 0 0 0 6 6 6 10 10 10
46648- 14 14 14 22 22 22 30 30 30 38 38 38
46649- 50 50 50 62 62 62 74 74 74 90 90 90
46650-101 98 89 112 100 78 121 87 25 124 80 6
46651-137 92 6 152 99 6 152 99 6 152 99 6
46652-138 86 6 124 80 6 98 70 6 86 66 30
46653-101 98 89 82 82 82 58 58 58 46 46 46
46654- 38 38 38 34 34 34 34 34 34 34 34 34
46655- 34 34 34 34 34 34 34 34 34 34 34 34
46656- 34 34 34 34 34 34 38 38 38 42 42 42
46657- 54 54 54 82 82 82 94 86 76 91 60 6
46658-134 86 6 156 107 11 167 114 7 175 118 6
46659-175 118 6 167 114 7 152 99 6 121 87 25
46660-101 98 89 62 62 62 34 34 34 18 18 18
46661- 6 6 6 0 0 0 0 0 0 0 0 0
46662- 0 0 0 0 0 0 0 0 0 0 0 0
46663- 0 0 0 0 0 0 0 0 0 0 0 0
46664- 0 0 0 0 0 0 0 0 0 0 0 0
46665- 0 0 0 0 0 0 0 0 0 0 0 0
46666- 0 0 0 0 0 0 0 0 0 0 0 0
46667- 0 0 0 0 0 0 0 0 0 0 0 0
46668- 0 0 0 6 6 6 6 6 6 10 10 10
46669- 18 18 18 22 22 22 30 30 30 42 42 42
46670- 50 50 50 66 66 66 86 86 86 101 98 89
46671-106 86 58 98 70 6 104 69 6 104 69 6
46672-104 69 6 91 60 6 82 62 34 90 90 90
46673- 62 62 62 38 38 38 22 22 22 14 14 14
46674- 10 10 10 10 10 10 10 10 10 10 10 10
46675- 10 10 10 10 10 10 6 6 6 10 10 10
46676- 10 10 10 10 10 10 10 10 10 14 14 14
46677- 22 22 22 42 42 42 70 70 70 89 81 66
46678- 80 54 7 104 69 6 124 80 6 137 92 6
46679-134 86 6 116 81 8 100 82 52 86 86 86
46680- 58 58 58 30 30 30 14 14 14 6 6 6
46681- 0 0 0 0 0 0 0 0 0 0 0 0
46682- 0 0 0 0 0 0 0 0 0 0 0 0
46683- 0 0 0 0 0 0 0 0 0 0 0 0
46684- 0 0 0 0 0 0 0 0 0 0 0 0
46685- 0 0 0 0 0 0 0 0 0 0 0 0
46686- 0 0 0 0 0 0 0 0 0 0 0 0
46687- 0 0 0 0 0 0 0 0 0 0 0 0
46688- 0 0 0 0 0 0 0 0 0 0 0 0
46689- 0 0 0 6 6 6 10 10 10 14 14 14
46690- 18 18 18 26 26 26 38 38 38 54 54 54
46691- 70 70 70 86 86 86 94 86 76 89 81 66
46692- 89 81 66 86 86 86 74 74 74 50 50 50
46693- 30 30 30 14 14 14 6 6 6 0 0 0
46694- 0 0 0 0 0 0 0 0 0 0 0 0
46695- 0 0 0 0 0 0 0 0 0 0 0 0
46696- 0 0 0 0 0 0 0 0 0 0 0 0
46697- 6 6 6 18 18 18 34 34 34 58 58 58
46698- 82 82 82 89 81 66 89 81 66 89 81 66
46699- 94 86 66 94 86 76 74 74 74 50 50 50
46700- 26 26 26 14 14 14 6 6 6 0 0 0
46701- 0 0 0 0 0 0 0 0 0 0 0 0
46702- 0 0 0 0 0 0 0 0 0 0 0 0
46703- 0 0 0 0 0 0 0 0 0 0 0 0
46704- 0 0 0 0 0 0 0 0 0 0 0 0
46705- 0 0 0 0 0 0 0 0 0 0 0 0
46706- 0 0 0 0 0 0 0 0 0 0 0 0
46707- 0 0 0 0 0 0 0 0 0 0 0 0
46708- 0 0 0 0 0 0 0 0 0 0 0 0
46709- 0 0 0 0 0 0 0 0 0 0 0 0
46710- 6 6 6 6 6 6 14 14 14 18 18 18
46711- 30 30 30 38 38 38 46 46 46 54 54 54
46712- 50 50 50 42 42 42 30 30 30 18 18 18
46713- 10 10 10 0 0 0 0 0 0 0 0 0
46714- 0 0 0 0 0 0 0 0 0 0 0 0
46715- 0 0 0 0 0 0 0 0 0 0 0 0
46716- 0 0 0 0 0 0 0 0 0 0 0 0
46717- 0 0 0 6 6 6 14 14 14 26 26 26
46718- 38 38 38 50 50 50 58 58 58 58 58 58
46719- 54 54 54 42 42 42 30 30 30 18 18 18
46720- 10 10 10 0 0 0 0 0 0 0 0 0
46721- 0 0 0 0 0 0 0 0 0 0 0 0
46722- 0 0 0 0 0 0 0 0 0 0 0 0
46723- 0 0 0 0 0 0 0 0 0 0 0 0
46724- 0 0 0 0 0 0 0 0 0 0 0 0
46725- 0 0 0 0 0 0 0 0 0 0 0 0
46726- 0 0 0 0 0 0 0 0 0 0 0 0
46727- 0 0 0 0 0 0 0 0 0 0 0 0
46728- 0 0 0 0 0 0 0 0 0 0 0 0
46729- 0 0 0 0 0 0 0 0 0 0 0 0
46730- 0 0 0 0 0 0 0 0 0 6 6 6
46731- 6 6 6 10 10 10 14 14 14 18 18 18
46732- 18 18 18 14 14 14 10 10 10 6 6 6
46733- 0 0 0 0 0 0 0 0 0 0 0 0
46734- 0 0 0 0 0 0 0 0 0 0 0 0
46735- 0 0 0 0 0 0 0 0 0 0 0 0
46736- 0 0 0 0 0 0 0 0 0 0 0 0
46737- 0 0 0 0 0 0 0 0 0 6 6 6
46738- 14 14 14 18 18 18 22 22 22 22 22 22
46739- 18 18 18 14 14 14 10 10 10 6 6 6
46740- 0 0 0 0 0 0 0 0 0 0 0 0
46741- 0 0 0 0 0 0 0 0 0 0 0 0
46742- 0 0 0 0 0 0 0 0 0 0 0 0
46743- 0 0 0 0 0 0 0 0 0 0 0 0
46744- 0 0 0 0 0 0 0 0 0 0 0 0
46745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46752+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46756+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46757+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46758+4 4 4 4 4 4
46759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46766+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46770+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46771+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46772+4 4 4 4 4 4
46773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46786+4 4 4 4 4 4
46787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46800+4 4 4 4 4 4
46801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46814+4 4 4 4 4 4
46815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46828+4 4 4 4 4 4
46829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46833+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46834+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46838+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46839+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46840+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46842+4 4 4 4 4 4
46843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46847+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46848+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46849+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46852+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46853+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46854+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46855+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46856+4 4 4 4 4 4
46857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46861+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46862+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46863+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46866+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46867+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46868+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46869+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46870+4 4 4 4 4 4
46871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46874+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46875+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46876+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46877+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46879+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46880+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46881+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46882+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46883+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46884+4 4 4 4 4 4
46885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46888+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46889+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46890+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46891+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46892+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46893+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46894+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46895+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46896+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46897+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46898+4 4 4 4 4 4
46899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46902+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46903+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46904+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46905+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46906+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46907+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46908+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46909+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46910+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46911+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46912+4 4 4 4 4 4
46913+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46914+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46915+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46916+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46917+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46918+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46919+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46920+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46921+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46922+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46923+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46924+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46925+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46926+4 4 4 4 4 4
46927+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46928+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46929+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46930+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46931+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46932+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46933+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46934+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46935+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46936+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46937+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46938+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46939+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46940+4 4 4 4 4 4
46941+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46942+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46943+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46944+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46945+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46946+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46947+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46948+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46949+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46950+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46951+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46952+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46953+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46954+4 4 4 4 4 4
46955+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46956+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46957+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46958+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46959+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46960+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
46961+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
46962+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
46963+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
46964+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
46965+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
46966+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
46967+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
46968+4 4 4 4 4 4
46969+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46970+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
46971+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
46972+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
46973+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
46974+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
46975+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
46976+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
46977+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
46978+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
46979+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
46980+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
46981+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
46982+4 4 4 4 4 4
46983+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46984+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
46985+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
46986+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
46987+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
46988+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
46989+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
46990+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
46991+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
46992+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
46993+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
46994+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
46995+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
46996+0 0 0 4 4 4
46997+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46998+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
46999+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
47000+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
47001+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
47002+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
47003+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
47004+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
47005+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
47006+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
47007+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
47008+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
47009+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
47010+2 0 0 0 0 0
47011+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
47012+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
47013+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
47014+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
47015+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
47016+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
47017+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
47018+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
47019+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
47020+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
47021+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
47022+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
47023+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
47024+37 38 37 0 0 0
47025+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47026+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
47027+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
47028+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
47029+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
47030+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
47031+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
47032+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
47033+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
47034+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
47035+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
47036+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
47037+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
47038+85 115 134 4 0 0
47039+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
47040+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
47041+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
47042+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
47043+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
47044+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
47045+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
47046+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
47047+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
47048+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
47049+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
47050+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
47051+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
47052+60 73 81 4 0 0
47053+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
47054+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
47055+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
47056+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
47057+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
47058+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
47059+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
47060+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
47061+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
47062+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
47063+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
47064+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
47065+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
47066+16 19 21 4 0 0
47067+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
47068+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
47069+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
47070+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
47071+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
47072+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
47073+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
47074+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
47075+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
47076+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
47077+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
47078+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
47079+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
47080+4 0 0 4 3 3
47081+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
47082+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
47083+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
47084+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
47085+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
47086+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
47087+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
47088+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
47089+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
47090+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
47091+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
47092+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
47093+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
47094+3 2 2 4 4 4
47095+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
47096+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
47097+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
47098+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47099+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
47100+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
47101+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
47102+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
47103+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
47104+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
47105+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
47106+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
47107+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
47108+4 4 4 4 4 4
47109+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
47110+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
47111+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
47112+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
47113+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
47114+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
47115+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
47116+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
47117+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
47118+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
47119+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
47120+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
47121+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
47122+4 4 4 4 4 4
47123+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
47124+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
47125+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
47126+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
47127+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
47128+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47129+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
47130+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
47131+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
47132+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
47133+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
47134+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
47135+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
47136+5 5 5 5 5 5
47137+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
47138+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
47139+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
47140+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
47141+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
47142+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47143+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
47144+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
47145+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
47146+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
47147+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
47148+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
47149+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47150+5 5 5 4 4 4
47151+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
47152+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
47153+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
47154+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
47155+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47156+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
47157+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
47158+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
47159+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
47160+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
47161+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
47162+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47163+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47164+4 4 4 4 4 4
47165+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
47166+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
47167+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
47168+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
47169+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
47170+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47171+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47172+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
47173+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
47174+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
47175+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
47176+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
47177+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47178+4 4 4 4 4 4
47179+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
47180+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
47181+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
47182+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
47183+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47184+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
47185+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
47186+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
47187+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
47188+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
47189+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
47190+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47191+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47192+4 4 4 4 4 4
47193+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
47194+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
47195+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
47196+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
47197+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47198+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47199+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47200+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
47201+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
47202+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
47203+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
47204+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47205+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47206+4 4 4 4 4 4
47207+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
47208+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
47209+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
47210+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
47211+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47212+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
47213+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47214+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
47215+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
47216+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
47217+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47218+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47219+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47220+4 4 4 4 4 4
47221+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
47222+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
47223+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
47224+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
47225+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47226+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
47227+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
47228+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
47229+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
47230+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
47231+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
47232+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47233+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47234+4 4 4 4 4 4
47235+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
47236+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
47237+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
47238+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
47239+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47240+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
47241+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
47242+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
47243+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
47244+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
47245+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
47246+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47247+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47248+4 4 4 4 4 4
47249+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
47250+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
47251+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47252+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47253+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47254+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47255+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47256+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47257+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47258+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47259+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47260+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47261+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47262+4 4 4 4 4 4
47263+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47264+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47265+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47266+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47267+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47268+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47269+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47270+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47271+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47272+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47273+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47274+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47275+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47276+4 4 4 4 4 4
47277+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47278+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47279+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47280+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47281+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47282+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47283+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47284+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47285+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47286+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47287+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47288+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47289+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47290+4 4 4 4 4 4
47291+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47292+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47293+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47294+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47295+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47296+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47297+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47298+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47299+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47300+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47301+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47302+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47303+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47304+4 4 4 4 4 4
47305+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47306+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47307+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47308+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47309+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47310+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47311+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47312+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47313+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47314+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47315+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47316+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47317+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47318+4 4 4 4 4 4
47319+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47320+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47321+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47322+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47323+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47324+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47325+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47326+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47327+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47328+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47329+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47330+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47331+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47332+4 4 4 4 4 4
47333+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47334+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47335+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47336+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47337+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47338+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47339+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47340+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47341+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47342+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47343+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47344+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47345+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47346+4 4 4 4 4 4
47347+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47348+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47349+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47350+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47351+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47352+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47353+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47354+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47355+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47356+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47357+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47358+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47359+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47360+4 4 4 4 4 4
47361+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47362+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47363+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47364+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47365+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47366+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47367+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47368+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47369+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47370+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47371+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47372+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47373+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47374+4 4 4 4 4 4
47375+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47376+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47377+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47378+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47379+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47380+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47381+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47382+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47383+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47384+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47385+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47386+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47387+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47388+4 4 4 4 4 4
47389+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47390+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47391+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47392+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47393+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47394+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47395+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
47396+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
47397+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47398+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47399+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47400+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47401+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47402+4 4 4 4 4 4
47403+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47404+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
47405+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47406+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
47407+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
47408+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
47409+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
47410+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
47411+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47412+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47413+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47414+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47415+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47416+4 4 4 4 4 4
47417+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47418+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
47419+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
47420+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
47421+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
47422+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
47423+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47424+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
47425+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47426+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47427+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47428+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47429+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47430+4 4 4 4 4 4
47431+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47432+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
47433+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
47434+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47435+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
47436+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
47437+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47438+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
47439+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47440+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47441+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47442+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47443+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47444+4 4 4 4 4 4
47445+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47446+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
47447+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
47448+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
47449+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
47450+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
47451+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
47452+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
47453+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
47454+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47455+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47456+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47457+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47458+4 4 4 4 4 4
47459+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47460+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
47461+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
47462+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
47463+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
47464+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
47465+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
47466+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
47467+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
47468+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47469+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47470+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47471+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47472+4 4 4 4 4 4
47473+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
47474+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
47475+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
47476+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
47477+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47478+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
47479+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
47480+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
47481+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
47482+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47483+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47484+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47485+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47486+4 4 4 4 4 4
47487+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47488+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
47489+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
47490+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
47491+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
47492+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
47493+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
47494+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
47495+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
47496+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47497+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47498+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47499+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47500+4 4 4 4 4 4
47501+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
47502+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
47503+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
47504+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
47505+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
47506+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
47507+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
47508+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
47509+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
47510+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
47511+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47512+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47513+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47514+4 4 4 4 4 4
47515+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
47516+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47517+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
47518+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
47519+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
47520+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
47521+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
47522+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
47523+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
47524+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
47525+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47526+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47527+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47528+4 4 4 4 4 4
47529+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
47530+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47531+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
47532+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
47533+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
47534+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
47535+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47536+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
47537+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
47538+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
47539+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47540+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47541+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47542+4 4 4 4 4 4
47543+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
47544+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
47545+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
47546+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
47547+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
47548+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
47549+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
47550+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
47551+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
47552+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
47553+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47554+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47555+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47556+4 4 4 4 4 4
47557+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
47558+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
47559+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47560+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
47561+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
47562+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
47563+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
47564+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
47565+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
47566+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
47567+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47568+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47569+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47570+4 4 4 4 4 4
47571+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47572+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
47573+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
47574+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
47575+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
47576+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
47577+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
47578+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
47579+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
47580+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47581+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47582+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47583+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47584+4 4 4 4 4 4
47585+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
47586+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
47587+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
47588+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
47589+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
47590+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
47591+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
47592+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
47593+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
47594+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47595+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47596+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47597+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47598+4 4 4 4 4 4
47599+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
47600+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
47601+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
47602+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
47603+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
47604+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
47605+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
47606+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
47607+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47608+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47609+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47610+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47611+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47612+4 4 4 4 4 4
47613+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
47614+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47615+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
47616+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47617+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
47618+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
47619+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
47620+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
47621+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
47622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47623+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47624+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47625+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47626+4 4 4 4 4 4
47627+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
47628+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
47629+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
47630+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
47631+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
47632+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
47633+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
47634+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
47635+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
47636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47637+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47638+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47639+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47640+4 4 4 4 4 4
47641+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47642+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
47643+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
47644+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
47645+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
47646+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
47647+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
47648+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
47649+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47651+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47652+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47653+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47654+4 4 4 4 4 4
47655+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
47656+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
47657+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47658+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
47659+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
47660+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
47661+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
47662+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
47663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47665+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47666+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47667+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47668+4 4 4 4 4 4
47669+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47670+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
47671+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
47672+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
47673+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
47674+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
47675+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
47676+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47679+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47680+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47681+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47682+4 4 4 4 4 4
47683+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47684+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
47685+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47686+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
47687+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
47688+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
47689+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
47690+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
47691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47693+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47694+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47695+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47696+4 4 4 4 4 4
47697+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47698+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47699+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47700+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47701+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47702+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47703+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47704+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47707+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47708+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47709+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47710+4 4 4 4 4 4
47711+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47712+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47713+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47714+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47715+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47716+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47717+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47718+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47721+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47723+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47724+4 4 4 4 4 4
47725+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47726+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47727+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47728+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47729+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47730+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47731+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47732+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47735+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47737+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47738+4 4 4 4 4 4
47739+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47740+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47741+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47742+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47743+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47744+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47745+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47749+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47751+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47752+4 4 4 4 4 4
47753+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47754+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47755+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47756+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47757+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47758+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47759+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47763+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47766+4 4 4 4 4 4
47767+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47768+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47769+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47770+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47771+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47772+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47773+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47777+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47780+4 4 4 4 4 4
47781+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47782+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47783+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47784+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47785+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47786+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47787+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47794+4 4 4 4 4 4
47795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47797+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47799+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47800+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47801+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47808+4 4 4 4 4 4
47809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47811+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47813+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47814+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47815+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47822+4 4 4 4 4 4
47823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47825+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47827+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47828+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47829+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47836+4 4 4 4 4 4
47837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47839+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47841+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47842+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47850+4 4 4 4 4 4
47851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47853+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47855+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47856+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47864+4 4 4 4 4 4
47865diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47866index fe92eed..106e085 100644
47867--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47868+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47869@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47870 struct mb862xxfb_par *par = info->par;
47871
47872 if (info->var.bits_per_pixel == 32) {
47873- info->fbops->fb_fillrect = cfb_fillrect;
47874- info->fbops->fb_copyarea = cfb_copyarea;
47875- info->fbops->fb_imageblit = cfb_imageblit;
47876+ pax_open_kernel();
47877+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47878+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47879+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47880+ pax_close_kernel();
47881 } else {
47882 outreg(disp, GC_L0EM, 3);
47883- info->fbops->fb_fillrect = mb86290fb_fillrect;
47884- info->fbops->fb_copyarea = mb86290fb_copyarea;
47885- info->fbops->fb_imageblit = mb86290fb_imageblit;
47886+ pax_open_kernel();
47887+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47888+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47889+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47890+ pax_close_kernel();
47891 }
47892 outreg(draw, GDC_REG_DRAW_BASE, 0);
47893 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47894diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47895index ff22871..b129bed 100644
47896--- a/drivers/video/nvidia/nvidia.c
47897+++ b/drivers/video/nvidia/nvidia.c
47898@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47899 info->fix.line_length = (info->var.xres_virtual *
47900 info->var.bits_per_pixel) >> 3;
47901 if (info->var.accel_flags) {
47902- info->fbops->fb_imageblit = nvidiafb_imageblit;
47903- info->fbops->fb_fillrect = nvidiafb_fillrect;
47904- info->fbops->fb_copyarea = nvidiafb_copyarea;
47905- info->fbops->fb_sync = nvidiafb_sync;
47906+ pax_open_kernel();
47907+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47908+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47909+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47910+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47911+ pax_close_kernel();
47912 info->pixmap.scan_align = 4;
47913 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47914 info->flags |= FBINFO_READS_FAST;
47915 NVResetGraphics(info);
47916 } else {
47917- info->fbops->fb_imageblit = cfb_imageblit;
47918- info->fbops->fb_fillrect = cfb_fillrect;
47919- info->fbops->fb_copyarea = cfb_copyarea;
47920- info->fbops->fb_sync = NULL;
47921+ pax_open_kernel();
47922+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47923+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47924+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47925+ *(void **)&info->fbops->fb_sync = NULL;
47926+ pax_close_kernel();
47927 info->pixmap.scan_align = 1;
47928 info->flags |= FBINFO_HWACCEL_DISABLED;
47929 info->flags &= ~FBINFO_READS_FAST;
47930@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47931 info->pixmap.size = 8 * 1024;
47932 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47933
47934- if (!hwcur)
47935- info->fbops->fb_cursor = NULL;
47936+ if (!hwcur) {
47937+ pax_open_kernel();
47938+ *(void **)&info->fbops->fb_cursor = NULL;
47939+ pax_close_kernel();
47940+ }
47941
47942 info->var.accel_flags = (!noaccel);
47943
47944diff --git a/drivers/video/output.c b/drivers/video/output.c
47945index 0d6f2cd..6285b97 100644
47946--- a/drivers/video/output.c
47947+++ b/drivers/video/output.c
47948@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
47949 new_dev->props = op;
47950 new_dev->dev.class = &video_output_class;
47951 new_dev->dev.parent = dev;
47952- dev_set_name(&new_dev->dev, name);
47953+ dev_set_name(&new_dev->dev, "%s", name);
47954 dev_set_drvdata(&new_dev->dev, devdata);
47955 ret_code = device_register(&new_dev->dev);
47956 if (ret_code) {
47957diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47958index 05c2dc3..ea1f391 100644
47959--- a/drivers/video/s1d13xxxfb.c
47960+++ b/drivers/video/s1d13xxxfb.c
47961@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
47962
47963 switch(prod_id) {
47964 case S1D13506_PROD_ID: /* activate acceleration */
47965- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47966- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47967+ pax_open_kernel();
47968+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
47969+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
47970+ pax_close_kernel();
47971 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
47972 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
47973 break;
47974diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
47975index b2b33fc..f9f4658 100644
47976--- a/drivers/video/smscufx.c
47977+++ b/drivers/video/smscufx.c
47978@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
47979 fb_deferred_io_cleanup(info);
47980 kfree(info->fbdefio);
47981 info->fbdefio = NULL;
47982- info->fbops->fb_mmap = ufx_ops_mmap;
47983+ pax_open_kernel();
47984+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
47985+ pax_close_kernel();
47986 }
47987
47988 pr_debug("released /dev/fb%d user=%d count=%d",
47989diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
47990index ec03e72..f578436 100644
47991--- a/drivers/video/udlfb.c
47992+++ b/drivers/video/udlfb.c
47993@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
47994 dlfb_urb_completion(urb);
47995
47996 error:
47997- atomic_add(bytes_sent, &dev->bytes_sent);
47998- atomic_add(bytes_identical, &dev->bytes_identical);
47999- atomic_add(width*height*2, &dev->bytes_rendered);
48000+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
48001+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
48002+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
48003 end_cycles = get_cycles();
48004- atomic_add(((unsigned int) ((end_cycles - start_cycles)
48005+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
48006 >> 10)), /* Kcycles */
48007 &dev->cpu_kcycles_used);
48008
48009@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
48010 dlfb_urb_completion(urb);
48011
48012 error:
48013- atomic_add(bytes_sent, &dev->bytes_sent);
48014- atomic_add(bytes_identical, &dev->bytes_identical);
48015- atomic_add(bytes_rendered, &dev->bytes_rendered);
48016+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
48017+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
48018+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
48019 end_cycles = get_cycles();
48020- atomic_add(((unsigned int) ((end_cycles - start_cycles)
48021+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
48022 >> 10)), /* Kcycles */
48023 &dev->cpu_kcycles_used);
48024 }
48025@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
48026 fb_deferred_io_cleanup(info);
48027 kfree(info->fbdefio);
48028 info->fbdefio = NULL;
48029- info->fbops->fb_mmap = dlfb_ops_mmap;
48030+ pax_open_kernel();
48031+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
48032+ pax_close_kernel();
48033 }
48034
48035 pr_warn("released /dev/fb%d user=%d count=%d\n",
48036@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
48037 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48038 struct dlfb_data *dev = fb_info->par;
48039 return snprintf(buf, PAGE_SIZE, "%u\n",
48040- atomic_read(&dev->bytes_rendered));
48041+ atomic_read_unchecked(&dev->bytes_rendered));
48042 }
48043
48044 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
48045@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
48046 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48047 struct dlfb_data *dev = fb_info->par;
48048 return snprintf(buf, PAGE_SIZE, "%u\n",
48049- atomic_read(&dev->bytes_identical));
48050+ atomic_read_unchecked(&dev->bytes_identical));
48051 }
48052
48053 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
48054@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
48055 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48056 struct dlfb_data *dev = fb_info->par;
48057 return snprintf(buf, PAGE_SIZE, "%u\n",
48058- atomic_read(&dev->bytes_sent));
48059+ atomic_read_unchecked(&dev->bytes_sent));
48060 }
48061
48062 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
48063@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
48064 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48065 struct dlfb_data *dev = fb_info->par;
48066 return snprintf(buf, PAGE_SIZE, "%u\n",
48067- atomic_read(&dev->cpu_kcycles_used));
48068+ atomic_read_unchecked(&dev->cpu_kcycles_used));
48069 }
48070
48071 static ssize_t edid_show(
48072@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
48073 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48074 struct dlfb_data *dev = fb_info->par;
48075
48076- atomic_set(&dev->bytes_rendered, 0);
48077- atomic_set(&dev->bytes_identical, 0);
48078- atomic_set(&dev->bytes_sent, 0);
48079- atomic_set(&dev->cpu_kcycles_used, 0);
48080+ atomic_set_unchecked(&dev->bytes_rendered, 0);
48081+ atomic_set_unchecked(&dev->bytes_identical, 0);
48082+ atomic_set_unchecked(&dev->bytes_sent, 0);
48083+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
48084
48085 return count;
48086 }
48087diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
48088index e328a61..1b08ecb 100644
48089--- a/drivers/video/uvesafb.c
48090+++ b/drivers/video/uvesafb.c
48091@@ -19,6 +19,7 @@
48092 #include <linux/io.h>
48093 #include <linux/mutex.h>
48094 #include <linux/slab.h>
48095+#include <linux/moduleloader.h>
48096 #include <video/edid.h>
48097 #include <video/uvesafb.h>
48098 #ifdef CONFIG_X86
48099@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
48100 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
48101 par->pmi_setpal = par->ypan = 0;
48102 } else {
48103+
48104+#ifdef CONFIG_PAX_KERNEXEC
48105+#ifdef CONFIG_MODULES
48106+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
48107+#endif
48108+ if (!par->pmi_code) {
48109+ par->pmi_setpal = par->ypan = 0;
48110+ return 0;
48111+ }
48112+#endif
48113+
48114 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
48115 + task->t.regs.edi);
48116+
48117+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48118+ pax_open_kernel();
48119+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
48120+ pax_close_kernel();
48121+
48122+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
48123+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
48124+#else
48125 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
48126 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
48127+#endif
48128+
48129 printk(KERN_INFO "uvesafb: protected mode interface info at "
48130 "%04x:%04x\n",
48131 (u16)task->t.regs.es, (u16)task->t.regs.edi);
48132@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
48133 par->ypan = ypan;
48134
48135 if (par->pmi_setpal || par->ypan) {
48136+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
48137 if (__supported_pte_mask & _PAGE_NX) {
48138 par->pmi_setpal = par->ypan = 0;
48139 printk(KERN_WARNING "uvesafb: NX protection is actively."
48140 "We have better not to use the PMI.\n");
48141- } else {
48142+ } else
48143+#endif
48144 uvesafb_vbe_getpmi(task, par);
48145- }
48146 }
48147 #else
48148 /* The protected mode interface is not available on non-x86. */
48149@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48150 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
48151
48152 /* Disable blanking if the user requested so. */
48153- if (!blank)
48154- info->fbops->fb_blank = NULL;
48155+ if (!blank) {
48156+ pax_open_kernel();
48157+ *(void **)&info->fbops->fb_blank = NULL;
48158+ pax_close_kernel();
48159+ }
48160
48161 /*
48162 * Find out how much IO memory is required for the mode with
48163@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48164 info->flags = FBINFO_FLAG_DEFAULT |
48165 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
48166
48167- if (!par->ypan)
48168- info->fbops->fb_pan_display = NULL;
48169+ if (!par->ypan) {
48170+ pax_open_kernel();
48171+ *(void **)&info->fbops->fb_pan_display = NULL;
48172+ pax_close_kernel();
48173+ }
48174 }
48175
48176 static void uvesafb_init_mtrr(struct fb_info *info)
48177@@ -1836,6 +1866,11 @@ out:
48178 if (par->vbe_modes)
48179 kfree(par->vbe_modes);
48180
48181+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48182+ if (par->pmi_code)
48183+ module_free_exec(NULL, par->pmi_code);
48184+#endif
48185+
48186 framebuffer_release(info);
48187 return err;
48188 }
48189@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
48190 kfree(par->vbe_state_orig);
48191 if (par->vbe_state_saved)
48192 kfree(par->vbe_state_saved);
48193+
48194+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48195+ if (par->pmi_code)
48196+ module_free_exec(NULL, par->pmi_code);
48197+#endif
48198+
48199 }
48200
48201 framebuffer_release(info);
48202diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
48203index 501b340..d80aa17 100644
48204--- a/drivers/video/vesafb.c
48205+++ b/drivers/video/vesafb.c
48206@@ -9,6 +9,7 @@
48207 */
48208
48209 #include <linux/module.h>
48210+#include <linux/moduleloader.h>
48211 #include <linux/kernel.h>
48212 #include <linux/errno.h>
48213 #include <linux/string.h>
48214@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
48215 static int vram_total __initdata; /* Set total amount of memory */
48216 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
48217 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
48218-static void (*pmi_start)(void) __read_mostly;
48219-static void (*pmi_pal) (void) __read_mostly;
48220+static void (*pmi_start)(void) __read_only;
48221+static void (*pmi_pal) (void) __read_only;
48222 static int depth __read_mostly;
48223 static int vga_compat __read_mostly;
48224 /* --------------------------------------------------------------------- */
48225@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
48226 unsigned int size_vmode;
48227 unsigned int size_remap;
48228 unsigned int size_total;
48229+ void *pmi_code = NULL;
48230
48231 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
48232 return -ENODEV;
48233@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
48234 size_remap = size_total;
48235 vesafb_fix.smem_len = size_remap;
48236
48237-#ifndef __i386__
48238- screen_info.vesapm_seg = 0;
48239-#endif
48240-
48241 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
48242 printk(KERN_WARNING
48243 "vesafb: cannot reserve video memory at 0x%lx\n",
48244@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
48245 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
48246 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
48247
48248+#ifdef __i386__
48249+
48250+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48251+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
48252+ if (!pmi_code)
48253+#elif !defined(CONFIG_PAX_KERNEXEC)
48254+ if (0)
48255+#endif
48256+
48257+#endif
48258+ screen_info.vesapm_seg = 0;
48259+
48260 if (screen_info.vesapm_seg) {
48261- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
48262- screen_info.vesapm_seg,screen_info.vesapm_off);
48263+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
48264+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48265 }
48266
48267 if (screen_info.vesapm_seg < 0xc000)
48268@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48269
48270 if (ypan || pmi_setpal) {
48271 unsigned short *pmi_base;
48272+
48273 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48274- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48275- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48276+
48277+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48278+ pax_open_kernel();
48279+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48280+#else
48281+ pmi_code = pmi_base;
48282+#endif
48283+
48284+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48285+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48286+
48287+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48288+ pmi_start = ktva_ktla(pmi_start);
48289+ pmi_pal = ktva_ktla(pmi_pal);
48290+ pax_close_kernel();
48291+#endif
48292+
48293 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48294 if (pmi_base[3]) {
48295 printk(KERN_INFO "vesafb: pmi: ports = ");
48296@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48297 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48298 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48299
48300- if (!ypan)
48301- info->fbops->fb_pan_display = NULL;
48302+ if (!ypan) {
48303+ pax_open_kernel();
48304+ *(void **)&info->fbops->fb_pan_display = NULL;
48305+ pax_close_kernel();
48306+ }
48307
48308 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48309 err = -ENOMEM;
48310@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48311 info->node, info->fix.id);
48312 return 0;
48313 err:
48314+
48315+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48316+ module_free_exec(NULL, pmi_code);
48317+#endif
48318+
48319 if (info->screen_base)
48320 iounmap(info->screen_base);
48321 framebuffer_release(info);
48322diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48323index 88714ae..16c2e11 100644
48324--- a/drivers/video/via/via_clock.h
48325+++ b/drivers/video/via/via_clock.h
48326@@ -56,7 +56,7 @@ struct via_clock {
48327
48328 void (*set_engine_pll_state)(u8 state);
48329 void (*set_engine_pll)(struct via_pll_config config);
48330-};
48331+} __no_const;
48332
48333
48334 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48335diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48336index fef20db..d28b1ab 100644
48337--- a/drivers/xen/xenfs/xenstored.c
48338+++ b/drivers/xen/xenfs/xenstored.c
48339@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48340 static int xsd_kva_open(struct inode *inode, struct file *file)
48341 {
48342 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48343+#ifdef CONFIG_GRKERNSEC_HIDESYM
48344+ NULL);
48345+#else
48346 xen_store_interface);
48347+#endif
48348+
48349 if (!file->private_data)
48350 return -ENOMEM;
48351 return 0;
48352diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
48353index 055562c..fdfb10d 100644
48354--- a/fs/9p/vfs_addr.c
48355+++ b/fs/9p/vfs_addr.c
48356@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
48357
48358 retval = v9fs_file_write_internal(inode,
48359 v9inode->writeback_fid,
48360- (__force const char __user *)buffer,
48361+ (const char __force_user *)buffer,
48362 len, &offset, 0);
48363 if (retval > 0)
48364 retval = 0;
48365diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48366index d86edc8..40ff2fb 100644
48367--- a/fs/9p/vfs_inode.c
48368+++ b/fs/9p/vfs_inode.c
48369@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48370 void
48371 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48372 {
48373- char *s = nd_get_link(nd);
48374+ const char *s = nd_get_link(nd);
48375
48376 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48377 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48378diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48379index 370b24c..ff0be7b 100644
48380--- a/fs/Kconfig.binfmt
48381+++ b/fs/Kconfig.binfmt
48382@@ -103,7 +103,7 @@ config HAVE_AOUT
48383
48384 config BINFMT_AOUT
48385 tristate "Kernel support for a.out and ECOFF binaries"
48386- depends on HAVE_AOUT
48387+ depends on HAVE_AOUT && BROKEN
48388 ---help---
48389 A.out (Assembler.OUTput) is a set of formats for libraries and
48390 executables used in the earliest versions of UNIX. Linux used
48391diff --git a/fs/aio.c b/fs/aio.c
48392index 2bbcacf..8614116 100644
48393--- a/fs/aio.c
48394+++ b/fs/aio.c
48395@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48396 size += sizeof(struct io_event) * nr_events;
48397 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48398
48399- if (nr_pages < 0)
48400+ if (nr_pages <= 0)
48401 return -EINVAL;
48402
48403 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48404@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
48405 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
48406 {
48407 ssize_t ret;
48408+ struct iovec iovstack;
48409
48410 kiocb->ki_nr_segs = kiocb->ki_nbytes;
48411
48412@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
48413 if (compat)
48414 ret = compat_rw_copy_check_uvector(rw,
48415 (struct compat_iovec __user *)kiocb->ki_buf,
48416- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
48417+ kiocb->ki_nr_segs, 1, &iovstack,
48418 &kiocb->ki_iovec);
48419 else
48420 #endif
48421 ret = rw_copy_check_uvector(rw,
48422 (struct iovec __user *)kiocb->ki_buf,
48423- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
48424+ kiocb->ki_nr_segs, 1, &iovstack,
48425 &kiocb->ki_iovec);
48426 if (ret < 0)
48427 return ret;
48428
48429+ if (kiocb->ki_iovec == &iovstack) {
48430+ kiocb->ki_inline_vec = iovstack;
48431+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
48432+ }
48433+
48434 /* ki_nbytes now reflect bytes instead of segs */
48435 kiocb->ki_nbytes = ret;
48436 return 0;
48437diff --git a/fs/attr.c b/fs/attr.c
48438index 1449adb..a2038c2 100644
48439--- a/fs/attr.c
48440+++ b/fs/attr.c
48441@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
48442 unsigned long limit;
48443
48444 limit = rlimit(RLIMIT_FSIZE);
48445+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
48446 if (limit != RLIM_INFINITY && offset > limit)
48447 goto out_sig;
48448 if (offset > inode->i_sb->s_maxbytes)
48449diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
48450index 3db70da..7aeec5b 100644
48451--- a/fs/autofs4/waitq.c
48452+++ b/fs/autofs4/waitq.c
48453@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
48454 {
48455 unsigned long sigpipe, flags;
48456 mm_segment_t fs;
48457- const char *data = (const char *)addr;
48458+ const char __user *data = (const char __force_user *)addr;
48459 ssize_t wr = 0;
48460
48461 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
48462@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
48463 return 1;
48464 }
48465
48466+#ifdef CONFIG_GRKERNSEC_HIDESYM
48467+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
48468+#endif
48469+
48470 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48471 enum autofs_notify notify)
48472 {
48473@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48474
48475 /* If this is a direct mount request create a dummy name */
48476 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
48477+#ifdef CONFIG_GRKERNSEC_HIDESYM
48478+ /* this name does get written to userland via autofs4_write() */
48479+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
48480+#else
48481 qstr.len = sprintf(name, "%p", dentry);
48482+#endif
48483 else {
48484 qstr.len = autofs4_getpath(sbi, dentry, &name);
48485 if (!qstr.len) {
48486diff --git a/fs/befs/endian.h b/fs/befs/endian.h
48487index 2722387..c8dd2a7 100644
48488--- a/fs/befs/endian.h
48489+++ b/fs/befs/endian.h
48490@@ -11,7 +11,7 @@
48491
48492 #include <asm/byteorder.h>
48493
48494-static inline u64
48495+static inline u64 __intentional_overflow(-1)
48496 fs64_to_cpu(const struct super_block *sb, fs64 n)
48497 {
48498 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48499@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
48500 return (__force fs64)cpu_to_be64(n);
48501 }
48502
48503-static inline u32
48504+static inline u32 __intentional_overflow(-1)
48505 fs32_to_cpu(const struct super_block *sb, fs32 n)
48506 {
48507 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48508diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
48509index f95dddc..b1e2c1c 100644
48510--- a/fs/befs/linuxvfs.c
48511+++ b/fs/befs/linuxvfs.c
48512@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48513 {
48514 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
48515 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
48516- char *link = nd_get_link(nd);
48517+ const char *link = nd_get_link(nd);
48518 if (!IS_ERR(link))
48519 kfree(link);
48520 }
48521diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
48522index bce8769..7fc7544 100644
48523--- a/fs/binfmt_aout.c
48524+++ b/fs/binfmt_aout.c
48525@@ -16,6 +16,7 @@
48526 #include <linux/string.h>
48527 #include <linux/fs.h>
48528 #include <linux/file.h>
48529+#include <linux/security.h>
48530 #include <linux/stat.h>
48531 #include <linux/fcntl.h>
48532 #include <linux/ptrace.h>
48533@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
48534 #endif
48535 # define START_STACK(u) ((void __user *)u.start_stack)
48536
48537+ memset(&dump, 0, sizeof(dump));
48538+
48539 fs = get_fs();
48540 set_fs(KERNEL_DS);
48541 has_dumped = 1;
48542@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
48543
48544 /* If the size of the dump file exceeds the rlimit, then see what would happen
48545 if we wrote the stack, but not the data area. */
48546+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
48547 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
48548 dump.u_dsize = 0;
48549
48550 /* Make sure we have enough room to write the stack and data areas. */
48551+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
48552 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
48553 dump.u_ssize = 0;
48554
48555@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
48556 rlim = rlimit(RLIMIT_DATA);
48557 if (rlim >= RLIM_INFINITY)
48558 rlim = ~0;
48559+
48560+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
48561 if (ex.a_data + ex.a_bss > rlim)
48562 return -ENOMEM;
48563
48564@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
48565
48566 install_exec_creds(bprm);
48567
48568+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48569+ current->mm->pax_flags = 0UL;
48570+#endif
48571+
48572+#ifdef CONFIG_PAX_PAGEEXEC
48573+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
48574+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
48575+
48576+#ifdef CONFIG_PAX_EMUTRAMP
48577+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
48578+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
48579+#endif
48580+
48581+#ifdef CONFIG_PAX_MPROTECT
48582+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
48583+ current->mm->pax_flags |= MF_PAX_MPROTECT;
48584+#endif
48585+
48586+ }
48587+#endif
48588+
48589 if (N_MAGIC(ex) == OMAGIC) {
48590 unsigned long text_addr, map_size;
48591 loff_t pos;
48592@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
48593 }
48594
48595 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
48596- PROT_READ | PROT_WRITE | PROT_EXEC,
48597+ PROT_READ | PROT_WRITE,
48598 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
48599 fd_offset + ex.a_text);
48600 if (error != N_DATADDR(ex)) {
48601diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
48602index f8a0b0e..8186af0 100644
48603--- a/fs/binfmt_elf.c
48604+++ b/fs/binfmt_elf.c
48605@@ -34,6 +34,7 @@
48606 #include <linux/utsname.h>
48607 #include <linux/coredump.h>
48608 #include <linux/sched.h>
48609+#include <linux/xattr.h>
48610 #include <asm/uaccess.h>
48611 #include <asm/param.h>
48612 #include <asm/page.h>
48613@@ -60,6 +61,10 @@ static int elf_core_dump(struct coredump_params *cprm);
48614 #define elf_core_dump NULL
48615 #endif
48616
48617+#ifdef CONFIG_PAX_MPROTECT
48618+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
48619+#endif
48620+
48621 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
48622 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
48623 #else
48624@@ -79,6 +84,11 @@ static struct linux_binfmt elf_format = {
48625 .load_binary = load_elf_binary,
48626 .load_shlib = load_elf_library,
48627 .core_dump = elf_core_dump,
48628+
48629+#ifdef CONFIG_PAX_MPROTECT
48630+ .handle_mprotect= elf_handle_mprotect,
48631+#endif
48632+
48633 .min_coredump = ELF_EXEC_PAGESIZE,
48634 };
48635
48636@@ -86,6 +96,8 @@ static struct linux_binfmt elf_format = {
48637
48638 static int set_brk(unsigned long start, unsigned long end)
48639 {
48640+ unsigned long e = end;
48641+
48642 start = ELF_PAGEALIGN(start);
48643 end = ELF_PAGEALIGN(end);
48644 if (end > start) {
48645@@ -94,7 +106,7 @@ static int set_brk(unsigned long start, unsigned long end)
48646 if (BAD_ADDR(addr))
48647 return addr;
48648 }
48649- current->mm->start_brk = current->mm->brk = end;
48650+ current->mm->start_brk = current->mm->brk = e;
48651 return 0;
48652 }
48653
48654@@ -155,12 +167,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48655 elf_addr_t __user *u_rand_bytes;
48656 const char *k_platform = ELF_PLATFORM;
48657 const char *k_base_platform = ELF_BASE_PLATFORM;
48658- unsigned char k_rand_bytes[16];
48659+ u32 k_rand_bytes[4];
48660 int items;
48661 elf_addr_t *elf_info;
48662 int ei_index = 0;
48663 const struct cred *cred = current_cred();
48664 struct vm_area_struct *vma;
48665+ unsigned long saved_auxv[AT_VECTOR_SIZE];
48666
48667 /*
48668 * In some cases (e.g. Hyper-Threading), we want to avoid L1
48669@@ -202,8 +215,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48670 * Generate 16 random bytes for userspace PRNG seeding.
48671 */
48672 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
48673- u_rand_bytes = (elf_addr_t __user *)
48674- STACK_ALLOC(p, sizeof(k_rand_bytes));
48675+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
48676+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
48677+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
48678+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
48679+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
48680+ u_rand_bytes = (elf_addr_t __user *) p;
48681 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
48682 return -EFAULT;
48683
48684@@ -318,9 +335,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48685 return -EFAULT;
48686 current->mm->env_end = p;
48687
48688+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
48689+
48690 /* Put the elf_info on the stack in the right place. */
48691 sp = (elf_addr_t __user *)envp + 1;
48692- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
48693+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
48694 return -EFAULT;
48695 return 0;
48696 }
48697@@ -388,15 +407,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
48698 an ELF header */
48699
48700 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48701- struct file *interpreter, unsigned long *interp_map_addr,
48702- unsigned long no_base)
48703+ struct file *interpreter, unsigned long no_base)
48704 {
48705 struct elf_phdr *elf_phdata;
48706 struct elf_phdr *eppnt;
48707- unsigned long load_addr = 0;
48708+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
48709 int load_addr_set = 0;
48710 unsigned long last_bss = 0, elf_bss = 0;
48711- unsigned long error = ~0UL;
48712+ unsigned long error = -EINVAL;
48713 unsigned long total_size;
48714 int retval, i, size;
48715
48716@@ -442,6 +460,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48717 goto out_close;
48718 }
48719
48720+#ifdef CONFIG_PAX_SEGMEXEC
48721+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48722+ pax_task_size = SEGMEXEC_TASK_SIZE;
48723+#endif
48724+
48725 eppnt = elf_phdata;
48726 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48727 if (eppnt->p_type == PT_LOAD) {
48728@@ -465,8 +488,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48729 map_addr = elf_map(interpreter, load_addr + vaddr,
48730 eppnt, elf_prot, elf_type, total_size);
48731 total_size = 0;
48732- if (!*interp_map_addr)
48733- *interp_map_addr = map_addr;
48734 error = map_addr;
48735 if (BAD_ADDR(map_addr))
48736 goto out_close;
48737@@ -485,8 +506,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48738 k = load_addr + eppnt->p_vaddr;
48739 if (BAD_ADDR(k) ||
48740 eppnt->p_filesz > eppnt->p_memsz ||
48741- eppnt->p_memsz > TASK_SIZE ||
48742- TASK_SIZE - eppnt->p_memsz < k) {
48743+ eppnt->p_memsz > pax_task_size ||
48744+ pax_task_size - eppnt->p_memsz < k) {
48745 error = -ENOMEM;
48746 goto out_close;
48747 }
48748@@ -538,6 +559,315 @@ out:
48749 return error;
48750 }
48751
48752+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48753+#ifdef CONFIG_PAX_SOFTMODE
48754+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48755+{
48756+ unsigned long pax_flags = 0UL;
48757+
48758+#ifdef CONFIG_PAX_PAGEEXEC
48759+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48760+ pax_flags |= MF_PAX_PAGEEXEC;
48761+#endif
48762+
48763+#ifdef CONFIG_PAX_SEGMEXEC
48764+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48765+ pax_flags |= MF_PAX_SEGMEXEC;
48766+#endif
48767+
48768+#ifdef CONFIG_PAX_EMUTRAMP
48769+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48770+ pax_flags |= MF_PAX_EMUTRAMP;
48771+#endif
48772+
48773+#ifdef CONFIG_PAX_MPROTECT
48774+ if (elf_phdata->p_flags & PF_MPROTECT)
48775+ pax_flags |= MF_PAX_MPROTECT;
48776+#endif
48777+
48778+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48779+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48780+ pax_flags |= MF_PAX_RANDMMAP;
48781+#endif
48782+
48783+ return pax_flags;
48784+}
48785+#endif
48786+
48787+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48788+{
48789+ unsigned long pax_flags = 0UL;
48790+
48791+#ifdef CONFIG_PAX_PAGEEXEC
48792+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48793+ pax_flags |= MF_PAX_PAGEEXEC;
48794+#endif
48795+
48796+#ifdef CONFIG_PAX_SEGMEXEC
48797+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48798+ pax_flags |= MF_PAX_SEGMEXEC;
48799+#endif
48800+
48801+#ifdef CONFIG_PAX_EMUTRAMP
48802+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48803+ pax_flags |= MF_PAX_EMUTRAMP;
48804+#endif
48805+
48806+#ifdef CONFIG_PAX_MPROTECT
48807+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48808+ pax_flags |= MF_PAX_MPROTECT;
48809+#endif
48810+
48811+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48812+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48813+ pax_flags |= MF_PAX_RANDMMAP;
48814+#endif
48815+
48816+ return pax_flags;
48817+}
48818+#endif
48819+
48820+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48821+#ifdef CONFIG_PAX_SOFTMODE
48822+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48823+{
48824+ unsigned long pax_flags = 0UL;
48825+
48826+#ifdef CONFIG_PAX_PAGEEXEC
48827+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48828+ pax_flags |= MF_PAX_PAGEEXEC;
48829+#endif
48830+
48831+#ifdef CONFIG_PAX_SEGMEXEC
48832+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48833+ pax_flags |= MF_PAX_SEGMEXEC;
48834+#endif
48835+
48836+#ifdef CONFIG_PAX_EMUTRAMP
48837+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48838+ pax_flags |= MF_PAX_EMUTRAMP;
48839+#endif
48840+
48841+#ifdef CONFIG_PAX_MPROTECT
48842+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48843+ pax_flags |= MF_PAX_MPROTECT;
48844+#endif
48845+
48846+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48847+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48848+ pax_flags |= MF_PAX_RANDMMAP;
48849+#endif
48850+
48851+ return pax_flags;
48852+}
48853+#endif
48854+
48855+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48856+{
48857+ unsigned long pax_flags = 0UL;
48858+
48859+#ifdef CONFIG_PAX_PAGEEXEC
48860+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48861+ pax_flags |= MF_PAX_PAGEEXEC;
48862+#endif
48863+
48864+#ifdef CONFIG_PAX_SEGMEXEC
48865+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48866+ pax_flags |= MF_PAX_SEGMEXEC;
48867+#endif
48868+
48869+#ifdef CONFIG_PAX_EMUTRAMP
48870+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48871+ pax_flags |= MF_PAX_EMUTRAMP;
48872+#endif
48873+
48874+#ifdef CONFIG_PAX_MPROTECT
48875+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48876+ pax_flags |= MF_PAX_MPROTECT;
48877+#endif
48878+
48879+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48880+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48881+ pax_flags |= MF_PAX_RANDMMAP;
48882+#endif
48883+
48884+ return pax_flags;
48885+}
48886+#endif
48887+
48888+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48889+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48890+{
48891+ unsigned long pax_flags = 0UL;
48892+
48893+#ifdef CONFIG_PAX_EI_PAX
48894+
48895+#ifdef CONFIG_PAX_PAGEEXEC
48896+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48897+ pax_flags |= MF_PAX_PAGEEXEC;
48898+#endif
48899+
48900+#ifdef CONFIG_PAX_SEGMEXEC
48901+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48902+ pax_flags |= MF_PAX_SEGMEXEC;
48903+#endif
48904+
48905+#ifdef CONFIG_PAX_EMUTRAMP
48906+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48907+ pax_flags |= MF_PAX_EMUTRAMP;
48908+#endif
48909+
48910+#ifdef CONFIG_PAX_MPROTECT
48911+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48912+ pax_flags |= MF_PAX_MPROTECT;
48913+#endif
48914+
48915+#ifdef CONFIG_PAX_ASLR
48916+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48917+ pax_flags |= MF_PAX_RANDMMAP;
48918+#endif
48919+
48920+#else
48921+
48922+#ifdef CONFIG_PAX_PAGEEXEC
48923+ pax_flags |= MF_PAX_PAGEEXEC;
48924+#endif
48925+
48926+#ifdef CONFIG_PAX_SEGMEXEC
48927+ pax_flags |= MF_PAX_SEGMEXEC;
48928+#endif
48929+
48930+#ifdef CONFIG_PAX_MPROTECT
48931+ pax_flags |= MF_PAX_MPROTECT;
48932+#endif
48933+
48934+#ifdef CONFIG_PAX_RANDMMAP
48935+ if (randomize_va_space)
48936+ pax_flags |= MF_PAX_RANDMMAP;
48937+#endif
48938+
48939+#endif
48940+
48941+ return pax_flags;
48942+}
48943+
48944+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48945+{
48946+
48947+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48948+ unsigned long i;
48949+
48950+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48951+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48952+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48953+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48954+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48955+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48956+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48957+ return ~0UL;
48958+
48959+#ifdef CONFIG_PAX_SOFTMODE
48960+ if (pax_softmode)
48961+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
48962+ else
48963+#endif
48964+
48965+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
48966+ break;
48967+ }
48968+#endif
48969+
48970+ return ~0UL;
48971+}
48972+
48973+static unsigned long pax_parse_xattr_pax(struct file * const file)
48974+{
48975+
48976+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48977+ ssize_t xattr_size, i;
48978+ unsigned char xattr_value[sizeof("pemrs") - 1];
48979+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
48980+
48981+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
48982+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
48983+ return ~0UL;
48984+
48985+ for (i = 0; i < xattr_size; i++)
48986+ switch (xattr_value[i]) {
48987+ default:
48988+ return ~0UL;
48989+
48990+#define parse_flag(option1, option2, flag) \
48991+ case option1: \
48992+ if (pax_flags_hardmode & MF_PAX_##flag) \
48993+ return ~0UL; \
48994+ pax_flags_hardmode |= MF_PAX_##flag; \
48995+ break; \
48996+ case option2: \
48997+ if (pax_flags_softmode & MF_PAX_##flag) \
48998+ return ~0UL; \
48999+ pax_flags_softmode |= MF_PAX_##flag; \
49000+ break;
49001+
49002+ parse_flag('p', 'P', PAGEEXEC);
49003+ parse_flag('e', 'E', EMUTRAMP);
49004+ parse_flag('m', 'M', MPROTECT);
49005+ parse_flag('r', 'R', RANDMMAP);
49006+ parse_flag('s', 'S', SEGMEXEC);
49007+
49008+#undef parse_flag
49009+ }
49010+
49011+ if (pax_flags_hardmode & pax_flags_softmode)
49012+ return ~0UL;
49013+
49014+#ifdef CONFIG_PAX_SOFTMODE
49015+ if (pax_softmode)
49016+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
49017+ else
49018+#endif
49019+
49020+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
49021+#else
49022+ return ~0UL;
49023+#endif
49024+
49025+}
49026+
49027+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
49028+{
49029+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
49030+
49031+ pax_flags = pax_parse_ei_pax(elf_ex);
49032+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
49033+ xattr_pax_flags = pax_parse_xattr_pax(file);
49034+
49035+ if (pt_pax_flags == ~0UL)
49036+ pt_pax_flags = xattr_pax_flags;
49037+ else if (xattr_pax_flags == ~0UL)
49038+ xattr_pax_flags = pt_pax_flags;
49039+ if (pt_pax_flags != xattr_pax_flags)
49040+ return -EINVAL;
49041+ if (pt_pax_flags != ~0UL)
49042+ pax_flags = pt_pax_flags;
49043+
49044+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
49045+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49046+ if ((__supported_pte_mask & _PAGE_NX))
49047+ pax_flags &= ~MF_PAX_SEGMEXEC;
49048+ else
49049+ pax_flags &= ~MF_PAX_PAGEEXEC;
49050+ }
49051+#endif
49052+
49053+ if (0 > pax_check_flags(&pax_flags))
49054+ return -EINVAL;
49055+
49056+ current->mm->pax_flags = pax_flags;
49057+ return 0;
49058+}
49059+#endif
49060+
49061 /*
49062 * These are the functions used to load ELF style executables and shared
49063 * libraries. There is no binary dependent code anywhere else.
49064@@ -554,6 +884,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
49065 {
49066 unsigned int random_variable = 0;
49067
49068+#ifdef CONFIG_PAX_RANDUSTACK
49069+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
49070+ return stack_top - current->mm->delta_stack;
49071+#endif
49072+
49073 if ((current->flags & PF_RANDOMIZE) &&
49074 !(current->personality & ADDR_NO_RANDOMIZE)) {
49075 random_variable = get_random_int() & STACK_RND_MASK;
49076@@ -572,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
49077 unsigned long load_addr = 0, load_bias = 0;
49078 int load_addr_set = 0;
49079 char * elf_interpreter = NULL;
49080- unsigned long error;
49081+ unsigned long error = 0;
49082 struct elf_phdr *elf_ppnt, *elf_phdata;
49083 unsigned long elf_bss, elf_brk;
49084 int retval, i;
49085@@ -582,12 +917,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
49086 unsigned long start_code, end_code, start_data, end_data;
49087 unsigned long reloc_func_desc __maybe_unused = 0;
49088 int executable_stack = EXSTACK_DEFAULT;
49089- unsigned long def_flags = 0;
49090 struct pt_regs *regs = current_pt_regs();
49091 struct {
49092 struct elfhdr elf_ex;
49093 struct elfhdr interp_elf_ex;
49094 } *loc;
49095+ unsigned long pax_task_size = TASK_SIZE;
49096
49097 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
49098 if (!loc) {
49099@@ -723,11 +1058,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
49100 goto out_free_dentry;
49101
49102 /* OK, This is the point of no return */
49103- current->mm->def_flags = def_flags;
49104+
49105+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49106+ current->mm->pax_flags = 0UL;
49107+#endif
49108+
49109+#ifdef CONFIG_PAX_DLRESOLVE
49110+ current->mm->call_dl_resolve = 0UL;
49111+#endif
49112+
49113+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
49114+ current->mm->call_syscall = 0UL;
49115+#endif
49116+
49117+#ifdef CONFIG_PAX_ASLR
49118+ current->mm->delta_mmap = 0UL;
49119+ current->mm->delta_stack = 0UL;
49120+#endif
49121+
49122+ current->mm->def_flags = 0;
49123+
49124+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49125+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
49126+ send_sig(SIGKILL, current, 0);
49127+ goto out_free_dentry;
49128+ }
49129+#endif
49130+
49131+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49132+ pax_set_initial_flags(bprm);
49133+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
49134+ if (pax_set_initial_flags_func)
49135+ (pax_set_initial_flags_func)(bprm);
49136+#endif
49137+
49138+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49139+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
49140+ current->mm->context.user_cs_limit = PAGE_SIZE;
49141+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
49142+ }
49143+#endif
49144+
49145+#ifdef CONFIG_PAX_SEGMEXEC
49146+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
49147+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
49148+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
49149+ pax_task_size = SEGMEXEC_TASK_SIZE;
49150+ current->mm->def_flags |= VM_NOHUGEPAGE;
49151+ }
49152+#endif
49153+
49154+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
49155+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49156+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
49157+ put_cpu();
49158+ }
49159+#endif
49160
49161 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
49162 may depend on the personality. */
49163 SET_PERSONALITY(loc->elf_ex);
49164+
49165+#ifdef CONFIG_PAX_ASLR
49166+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49167+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
49168+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
49169+ }
49170+#endif
49171+
49172+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49173+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49174+ executable_stack = EXSTACK_DISABLE_X;
49175+ current->personality &= ~READ_IMPLIES_EXEC;
49176+ } else
49177+#endif
49178+
49179 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
49180 current->personality |= READ_IMPLIES_EXEC;
49181
49182@@ -819,6 +1224,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
49183 #else
49184 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
49185 #endif
49186+
49187+#ifdef CONFIG_PAX_RANDMMAP
49188+ /* PaX: randomize base address at the default exe base if requested */
49189+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
49190+#ifdef CONFIG_SPARC64
49191+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
49192+#else
49193+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
49194+#endif
49195+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
49196+ elf_flags |= MAP_FIXED;
49197+ }
49198+#endif
49199+
49200 }
49201
49202 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
49203@@ -851,9 +1270,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
49204 * allowed task size. Note that p_filesz must always be
49205 * <= p_memsz so it is only necessary to check p_memsz.
49206 */
49207- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49208- elf_ppnt->p_memsz > TASK_SIZE ||
49209- TASK_SIZE - elf_ppnt->p_memsz < k) {
49210+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49211+ elf_ppnt->p_memsz > pax_task_size ||
49212+ pax_task_size - elf_ppnt->p_memsz < k) {
49213 /* set_brk can never work. Avoid overflows. */
49214 send_sig(SIGKILL, current, 0);
49215 retval = -EINVAL;
49216@@ -892,17 +1311,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
49217 goto out_free_dentry;
49218 }
49219 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
49220- send_sig(SIGSEGV, current, 0);
49221- retval = -EFAULT; /* Nobody gets to see this, but.. */
49222- goto out_free_dentry;
49223+ /*
49224+ * This bss-zeroing can fail if the ELF
49225+ * file specifies odd protections. So
49226+ * we don't check the return value
49227+ */
49228 }
49229
49230+#ifdef CONFIG_PAX_RANDMMAP
49231+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49232+ unsigned long start, size, flags;
49233+ vm_flags_t vm_flags;
49234+
49235+ start = ELF_PAGEALIGN(elf_brk);
49236+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
49237+ flags = MAP_FIXED | MAP_PRIVATE;
49238+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
49239+
49240+ down_write(&current->mm->mmap_sem);
49241+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
49242+ retval = -ENOMEM;
49243+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
49244+// if (current->personality & ADDR_NO_RANDOMIZE)
49245+// vm_flags |= VM_READ | VM_MAYREAD;
49246+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
49247+ retval = IS_ERR_VALUE(start) ? start : 0;
49248+ }
49249+ up_write(&current->mm->mmap_sem);
49250+ if (retval == 0)
49251+ retval = set_brk(start + size, start + size + PAGE_SIZE);
49252+ if (retval < 0) {
49253+ send_sig(SIGKILL, current, 0);
49254+ goto out_free_dentry;
49255+ }
49256+ }
49257+#endif
49258+
49259 if (elf_interpreter) {
49260- unsigned long interp_map_addr = 0;
49261-
49262 elf_entry = load_elf_interp(&loc->interp_elf_ex,
49263 interpreter,
49264- &interp_map_addr,
49265 load_bias);
49266 if (!IS_ERR((void *)elf_entry)) {
49267 /*
49268@@ -1124,7 +1571,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
49269 * Decide what to dump of a segment, part, all or none.
49270 */
49271 static unsigned long vma_dump_size(struct vm_area_struct *vma,
49272- unsigned long mm_flags)
49273+ unsigned long mm_flags, long signr)
49274 {
49275 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
49276
49277@@ -1162,7 +1609,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
49278 if (vma->vm_file == NULL)
49279 return 0;
49280
49281- if (FILTER(MAPPED_PRIVATE))
49282+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49283 goto whole;
49284
49285 /*
49286@@ -1387,9 +1834,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49287 {
49288 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49289 int i = 0;
49290- do
49291+ do {
49292 i += 2;
49293- while (auxv[i - 2] != AT_NULL);
49294+ } while (auxv[i - 2] != AT_NULL);
49295 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49296 }
49297
49298@@ -1398,7 +1845,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
49299 {
49300 mm_segment_t old_fs = get_fs();
49301 set_fs(KERNEL_DS);
49302- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
49303+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
49304 set_fs(old_fs);
49305 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
49306 }
49307@@ -2019,14 +2466,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49308 }
49309
49310 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49311- unsigned long mm_flags)
49312+ struct coredump_params *cprm)
49313 {
49314 struct vm_area_struct *vma;
49315 size_t size = 0;
49316
49317 for (vma = first_vma(current, gate_vma); vma != NULL;
49318 vma = next_vma(vma, gate_vma))
49319- size += vma_dump_size(vma, mm_flags);
49320+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49321 return size;
49322 }
49323
49324@@ -2119,7 +2566,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49325
49326 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49327
49328- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49329+ offset += elf_core_vma_data_size(gate_vma, cprm);
49330 offset += elf_core_extra_data_size();
49331 e_shoff = offset;
49332
49333@@ -2133,10 +2580,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49334 offset = dataoff;
49335
49336 size += sizeof(*elf);
49337+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49338 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49339 goto end_coredump;
49340
49341 size += sizeof(*phdr4note);
49342+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49343 if (size > cprm->limit
49344 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49345 goto end_coredump;
49346@@ -2150,7 +2599,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49347 phdr.p_offset = offset;
49348 phdr.p_vaddr = vma->vm_start;
49349 phdr.p_paddr = 0;
49350- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49351+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49352 phdr.p_memsz = vma->vm_end - vma->vm_start;
49353 offset += phdr.p_filesz;
49354 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49355@@ -2161,6 +2610,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49356 phdr.p_align = ELF_EXEC_PAGESIZE;
49357
49358 size += sizeof(phdr);
49359+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49360 if (size > cprm->limit
49361 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49362 goto end_coredump;
49363@@ -2185,7 +2635,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49364 unsigned long addr;
49365 unsigned long end;
49366
49367- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49368+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49369
49370 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49371 struct page *page;
49372@@ -2194,6 +2644,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49373 page = get_dump_page(addr);
49374 if (page) {
49375 void *kaddr = kmap(page);
49376+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49377 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49378 !dump_write(cprm->file, kaddr,
49379 PAGE_SIZE);
49380@@ -2211,6 +2662,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49381
49382 if (e_phnum == PN_XNUM) {
49383 size += sizeof(*shdr4extnum);
49384+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49385 if (size > cprm->limit
49386 || !dump_write(cprm->file, shdr4extnum,
49387 sizeof(*shdr4extnum)))
49388@@ -2231,6 +2683,97 @@ out:
49389
49390 #endif /* CONFIG_ELF_CORE */
49391
49392+#ifdef CONFIG_PAX_MPROTECT
49393+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49394+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49395+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49396+ *
49397+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49398+ * basis because we want to allow the common case and not the special ones.
49399+ */
49400+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49401+{
49402+ struct elfhdr elf_h;
49403+ struct elf_phdr elf_p;
49404+ unsigned long i;
49405+ unsigned long oldflags;
49406+ bool is_textrel_rw, is_textrel_rx, is_relro;
49407+
49408+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49409+ return;
49410+
49411+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49412+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49413+
49414+#ifdef CONFIG_PAX_ELFRELOCS
49415+ /* possible TEXTREL */
49416+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49417+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49418+#else
49419+ is_textrel_rw = false;
49420+ is_textrel_rx = false;
49421+#endif
49422+
49423+ /* possible RELRO */
49424+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
49425+
49426+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
49427+ return;
49428+
49429+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
49430+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
49431+
49432+#ifdef CONFIG_PAX_ETEXECRELOCS
49433+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49434+#else
49435+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
49436+#endif
49437+
49438+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49439+ !elf_check_arch(&elf_h) ||
49440+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
49441+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
49442+ return;
49443+
49444+ for (i = 0UL; i < elf_h.e_phnum; i++) {
49445+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
49446+ return;
49447+ switch (elf_p.p_type) {
49448+ case PT_DYNAMIC:
49449+ if (!is_textrel_rw && !is_textrel_rx)
49450+ continue;
49451+ i = 0UL;
49452+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
49453+ elf_dyn dyn;
49454+
49455+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
49456+ return;
49457+ if (dyn.d_tag == DT_NULL)
49458+ return;
49459+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
49460+ gr_log_textrel(vma);
49461+ if (is_textrel_rw)
49462+ vma->vm_flags |= VM_MAYWRITE;
49463+ else
49464+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
49465+ vma->vm_flags &= ~VM_MAYWRITE;
49466+ return;
49467+ }
49468+ i++;
49469+ }
49470+ return;
49471+
49472+ case PT_GNU_RELRO:
49473+ if (!is_relro)
49474+ continue;
49475+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
49476+ vma->vm_flags &= ~VM_MAYWRITE;
49477+ return;
49478+ }
49479+ }
49480+}
49481+#endif
49482+
49483 static int __init init_elf_binfmt(void)
49484 {
49485 register_binfmt(&elf_format);
49486diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
49487index d50bbe5..af3b649 100644
49488--- a/fs/binfmt_flat.c
49489+++ b/fs/binfmt_flat.c
49490@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
49491 realdatastart = (unsigned long) -ENOMEM;
49492 printk("Unable to allocate RAM for process data, errno %d\n",
49493 (int)-realdatastart);
49494+ down_write(&current->mm->mmap_sem);
49495 vm_munmap(textpos, text_len);
49496+ up_write(&current->mm->mmap_sem);
49497 ret = realdatastart;
49498 goto err;
49499 }
49500@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49501 }
49502 if (IS_ERR_VALUE(result)) {
49503 printk("Unable to read data+bss, errno %d\n", (int)-result);
49504+ down_write(&current->mm->mmap_sem);
49505 vm_munmap(textpos, text_len);
49506 vm_munmap(realdatastart, len);
49507+ up_write(&current->mm->mmap_sem);
49508 ret = result;
49509 goto err;
49510 }
49511@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49512 }
49513 if (IS_ERR_VALUE(result)) {
49514 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
49515+ down_write(&current->mm->mmap_sem);
49516 vm_munmap(textpos, text_len + data_len + extra +
49517 MAX_SHARED_LIBS * sizeof(unsigned long));
49518+ up_write(&current->mm->mmap_sem);
49519 ret = result;
49520 goto err;
49521 }
49522diff --git a/fs/bio.c b/fs/bio.c
49523index 94bbc04..6fe78a4 100644
49524--- a/fs/bio.c
49525+++ b/fs/bio.c
49526@@ -1096,7 +1096,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
49527 /*
49528 * Overflow, abort
49529 */
49530- if (end < start)
49531+ if (end < start || end - start > INT_MAX - nr_pages)
49532 return ERR_PTR(-EINVAL);
49533
49534 nr_pages += end - start;
49535@@ -1230,7 +1230,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
49536 /*
49537 * Overflow, abort
49538 */
49539- if (end < start)
49540+ if (end < start || end - start > INT_MAX - nr_pages)
49541 return ERR_PTR(-EINVAL);
49542
49543 nr_pages += end - start;
49544@@ -1492,7 +1492,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
49545 const int read = bio_data_dir(bio) == READ;
49546 struct bio_map_data *bmd = bio->bi_private;
49547 int i;
49548- char *p = bmd->sgvecs[0].iov_base;
49549+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
49550
49551 bio_for_each_segment_all(bvec, bio, i) {
49552 char *addr = page_address(bvec->bv_page);
49553diff --git a/fs/block_dev.c b/fs/block_dev.c
49554index 2091db8..81dafe9 100644
49555--- a/fs/block_dev.c
49556+++ b/fs/block_dev.c
49557@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
49558 else if (bdev->bd_contains == bdev)
49559 return true; /* is a whole device which isn't held */
49560
49561- else if (whole->bd_holder == bd_may_claim)
49562+ else if (whole->bd_holder == (void *)bd_may_claim)
49563 return true; /* is a partition of a device that is being partitioned */
49564 else if (whole->bd_holder != NULL)
49565 return false; /* is a partition of a held device */
49566diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
49567index 02fae7f..48da375 100644
49568--- a/fs/btrfs/ctree.c
49569+++ b/fs/btrfs/ctree.c
49570@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
49571 free_extent_buffer(buf);
49572 add_root_to_dirty_list(root);
49573 } else {
49574- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
49575- parent_start = parent->start;
49576- else
49577+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
49578+ if (parent)
49579+ parent_start = parent->start;
49580+ else
49581+ parent_start = 0;
49582+ } else
49583 parent_start = 0;
49584
49585 WARN_ON(trans->transid != btrfs_header_generation(parent));
49586diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
49587index 0f81d67..0ad55fe 100644
49588--- a/fs/btrfs/ioctl.c
49589+++ b/fs/btrfs/ioctl.c
49590@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49591 for (i = 0; i < num_types; i++) {
49592 struct btrfs_space_info *tmp;
49593
49594+ /* Don't copy in more than we allocated */
49595 if (!slot_count)
49596 break;
49597
49598+ slot_count--;
49599+
49600 info = NULL;
49601 rcu_read_lock();
49602 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
49603@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49604 memcpy(dest, &space, sizeof(space));
49605 dest++;
49606 space_args.total_spaces++;
49607- slot_count--;
49608 }
49609- if (!slot_count)
49610- break;
49611 }
49612 up_read(&info->groups_sem);
49613 }
49614diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
49615index f0857e0..e7023c5 100644
49616--- a/fs/btrfs/super.c
49617+++ b/fs/btrfs/super.c
49618@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
49619 function, line, errstr);
49620 return;
49621 }
49622- ACCESS_ONCE(trans->transaction->aborted) = errno;
49623+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
49624 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
49625 }
49626 /*
49627diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
49628index 622f469..e8d2d55 100644
49629--- a/fs/cachefiles/bind.c
49630+++ b/fs/cachefiles/bind.c
49631@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
49632 args);
49633
49634 /* start by checking things over */
49635- ASSERT(cache->fstop_percent >= 0 &&
49636- cache->fstop_percent < cache->fcull_percent &&
49637+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
49638 cache->fcull_percent < cache->frun_percent &&
49639 cache->frun_percent < 100);
49640
49641- ASSERT(cache->bstop_percent >= 0 &&
49642- cache->bstop_percent < cache->bcull_percent &&
49643+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
49644 cache->bcull_percent < cache->brun_percent &&
49645 cache->brun_percent < 100);
49646
49647diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
49648index 0a1467b..6a53245 100644
49649--- a/fs/cachefiles/daemon.c
49650+++ b/fs/cachefiles/daemon.c
49651@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
49652 if (n > buflen)
49653 return -EMSGSIZE;
49654
49655- if (copy_to_user(_buffer, buffer, n) != 0)
49656+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
49657 return -EFAULT;
49658
49659 return n;
49660@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
49661 if (test_bit(CACHEFILES_DEAD, &cache->flags))
49662 return -EIO;
49663
49664- if (datalen < 0 || datalen > PAGE_SIZE - 1)
49665+ if (datalen > PAGE_SIZE - 1)
49666 return -EOPNOTSUPP;
49667
49668 /* drag the command string into the kernel so we can parse it */
49669@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
49670 if (args[0] != '%' || args[1] != '\0')
49671 return -EINVAL;
49672
49673- if (fstop < 0 || fstop >= cache->fcull_percent)
49674+ if (fstop >= cache->fcull_percent)
49675 return cachefiles_daemon_range_error(cache, args);
49676
49677 cache->fstop_percent = fstop;
49678@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
49679 if (args[0] != '%' || args[1] != '\0')
49680 return -EINVAL;
49681
49682- if (bstop < 0 || bstop >= cache->bcull_percent)
49683+ if (bstop >= cache->bcull_percent)
49684 return cachefiles_daemon_range_error(cache, args);
49685
49686 cache->bstop_percent = bstop;
49687diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
49688index 4938251..7e01445 100644
49689--- a/fs/cachefiles/internal.h
49690+++ b/fs/cachefiles/internal.h
49691@@ -59,7 +59,7 @@ struct cachefiles_cache {
49692 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49693 struct rb_root active_nodes; /* active nodes (can't be culled) */
49694 rwlock_t active_lock; /* lock for active_nodes */
49695- atomic_t gravecounter; /* graveyard uniquifier */
49696+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49697 unsigned frun_percent; /* when to stop culling (% files) */
49698 unsigned fcull_percent; /* when to start culling (% files) */
49699 unsigned fstop_percent; /* when to stop allocating (% files) */
49700@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49701 * proc.c
49702 */
49703 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49704-extern atomic_t cachefiles_lookup_histogram[HZ];
49705-extern atomic_t cachefiles_mkdir_histogram[HZ];
49706-extern atomic_t cachefiles_create_histogram[HZ];
49707+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49708+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49709+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49710
49711 extern int __init cachefiles_proc_init(void);
49712 extern void cachefiles_proc_cleanup(void);
49713 static inline
49714-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49715+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49716 {
49717 unsigned long jif = jiffies - start_jif;
49718 if (jif >= HZ)
49719 jif = HZ - 1;
49720- atomic_inc(&histogram[jif]);
49721+ atomic_inc_unchecked(&histogram[jif]);
49722 }
49723
49724 #else
49725diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49726index 8c01c5fc..15f982e 100644
49727--- a/fs/cachefiles/namei.c
49728+++ b/fs/cachefiles/namei.c
49729@@ -317,7 +317,7 @@ try_again:
49730 /* first step is to make up a grave dentry in the graveyard */
49731 sprintf(nbuffer, "%08x%08x",
49732 (uint32_t) get_seconds(),
49733- (uint32_t) atomic_inc_return(&cache->gravecounter));
49734+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49735
49736 /* do the multiway lock magic */
49737 trap = lock_rename(cache->graveyard, dir);
49738diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49739index eccd339..4c1d995 100644
49740--- a/fs/cachefiles/proc.c
49741+++ b/fs/cachefiles/proc.c
49742@@ -14,9 +14,9 @@
49743 #include <linux/seq_file.h>
49744 #include "internal.h"
49745
49746-atomic_t cachefiles_lookup_histogram[HZ];
49747-atomic_t cachefiles_mkdir_histogram[HZ];
49748-atomic_t cachefiles_create_histogram[HZ];
49749+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49750+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49751+atomic_unchecked_t cachefiles_create_histogram[HZ];
49752
49753 /*
49754 * display the latency histogram
49755@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49756 return 0;
49757 default:
49758 index = (unsigned long) v - 3;
49759- x = atomic_read(&cachefiles_lookup_histogram[index]);
49760- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49761- z = atomic_read(&cachefiles_create_histogram[index]);
49762+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49763+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49764+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49765 if (x == 0 && y == 0 && z == 0)
49766 return 0;
49767
49768diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49769index 317f9ee..3d24511 100644
49770--- a/fs/cachefiles/rdwr.c
49771+++ b/fs/cachefiles/rdwr.c
49772@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49773 old_fs = get_fs();
49774 set_fs(KERNEL_DS);
49775 ret = file->f_op->write(
49776- file, (const void __user *) data, len, &pos);
49777+ file, (const void __force_user *) data, len, &pos);
49778 set_fs(old_fs);
49779 kunmap(page);
49780 file_end_write(file);
49781diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49782index f02d82b..2632cf86 100644
49783--- a/fs/ceph/dir.c
49784+++ b/fs/ceph/dir.c
49785@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49786 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49787 struct ceph_mds_client *mdsc = fsc->mdsc;
49788 unsigned frag = fpos_frag(filp->f_pos);
49789- int off = fpos_off(filp->f_pos);
49790+ unsigned int off = fpos_off(filp->f_pos);
49791 int err;
49792 u32 ftype;
49793 struct ceph_mds_reply_info_parsed *rinfo;
49794diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49795index d597483..747901b 100644
49796--- a/fs/cifs/cifs_debug.c
49797+++ b/fs/cifs/cifs_debug.c
49798@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49799
49800 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49801 #ifdef CONFIG_CIFS_STATS2
49802- atomic_set(&totBufAllocCount, 0);
49803- atomic_set(&totSmBufAllocCount, 0);
49804+ atomic_set_unchecked(&totBufAllocCount, 0);
49805+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49806 #endif /* CONFIG_CIFS_STATS2 */
49807 spin_lock(&cifs_tcp_ses_lock);
49808 list_for_each(tmp1, &cifs_tcp_ses_list) {
49809@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49810 tcon = list_entry(tmp3,
49811 struct cifs_tcon,
49812 tcon_list);
49813- atomic_set(&tcon->num_smbs_sent, 0);
49814+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49815 if (server->ops->clear_stats)
49816 server->ops->clear_stats(tcon);
49817 }
49818@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49819 smBufAllocCount.counter, cifs_min_small);
49820 #ifdef CONFIG_CIFS_STATS2
49821 seq_printf(m, "Total Large %d Small %d Allocations\n",
49822- atomic_read(&totBufAllocCount),
49823- atomic_read(&totSmBufAllocCount));
49824+ atomic_read_unchecked(&totBufAllocCount),
49825+ atomic_read_unchecked(&totSmBufAllocCount));
49826 #endif /* CONFIG_CIFS_STATS2 */
49827
49828 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49829@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49830 if (tcon->need_reconnect)
49831 seq_puts(m, "\tDISCONNECTED ");
49832 seq_printf(m, "\nSMBs: %d",
49833- atomic_read(&tcon->num_smbs_sent));
49834+ atomic_read_unchecked(&tcon->num_smbs_sent));
49835 if (server->ops->print_stats)
49836 server->ops->print_stats(m, tcon);
49837 }
49838diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49839index 3752b9f..8db5569 100644
49840--- a/fs/cifs/cifsfs.c
49841+++ b/fs/cifs/cifsfs.c
49842@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
49843 */
49844 cifs_req_cachep = kmem_cache_create("cifs_request",
49845 CIFSMaxBufSize + max_hdr_size, 0,
49846- SLAB_HWCACHE_ALIGN, NULL);
49847+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49848 if (cifs_req_cachep == NULL)
49849 return -ENOMEM;
49850
49851@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
49852 efficient to alloc 1 per page off the slab compared to 17K (5page)
49853 alloc of large cifs buffers even when page debugging is on */
49854 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49855- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49856+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49857 NULL);
49858 if (cifs_sm_req_cachep == NULL) {
49859 mempool_destroy(cifs_req_poolp);
49860@@ -1147,8 +1147,8 @@ init_cifs(void)
49861 atomic_set(&bufAllocCount, 0);
49862 atomic_set(&smBufAllocCount, 0);
49863 #ifdef CONFIG_CIFS_STATS2
49864- atomic_set(&totBufAllocCount, 0);
49865- atomic_set(&totSmBufAllocCount, 0);
49866+ atomic_set_unchecked(&totBufAllocCount, 0);
49867+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49868 #endif /* CONFIG_CIFS_STATS2 */
49869
49870 atomic_set(&midCount, 0);
49871diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49872index 4f07f6f..55de8ce 100644
49873--- a/fs/cifs/cifsglob.h
49874+++ b/fs/cifs/cifsglob.h
49875@@ -751,35 +751,35 @@ struct cifs_tcon {
49876 __u16 Flags; /* optional support bits */
49877 enum statusEnum tidStatus;
49878 #ifdef CONFIG_CIFS_STATS
49879- atomic_t num_smbs_sent;
49880+ atomic_unchecked_t num_smbs_sent;
49881 union {
49882 struct {
49883- atomic_t num_writes;
49884- atomic_t num_reads;
49885- atomic_t num_flushes;
49886- atomic_t num_oplock_brks;
49887- atomic_t num_opens;
49888- atomic_t num_closes;
49889- atomic_t num_deletes;
49890- atomic_t num_mkdirs;
49891- atomic_t num_posixopens;
49892- atomic_t num_posixmkdirs;
49893- atomic_t num_rmdirs;
49894- atomic_t num_renames;
49895- atomic_t num_t2renames;
49896- atomic_t num_ffirst;
49897- atomic_t num_fnext;
49898- atomic_t num_fclose;
49899- atomic_t num_hardlinks;
49900- atomic_t num_symlinks;
49901- atomic_t num_locks;
49902- atomic_t num_acl_get;
49903- atomic_t num_acl_set;
49904+ atomic_unchecked_t num_writes;
49905+ atomic_unchecked_t num_reads;
49906+ atomic_unchecked_t num_flushes;
49907+ atomic_unchecked_t num_oplock_brks;
49908+ atomic_unchecked_t num_opens;
49909+ atomic_unchecked_t num_closes;
49910+ atomic_unchecked_t num_deletes;
49911+ atomic_unchecked_t num_mkdirs;
49912+ atomic_unchecked_t num_posixopens;
49913+ atomic_unchecked_t num_posixmkdirs;
49914+ atomic_unchecked_t num_rmdirs;
49915+ atomic_unchecked_t num_renames;
49916+ atomic_unchecked_t num_t2renames;
49917+ atomic_unchecked_t num_ffirst;
49918+ atomic_unchecked_t num_fnext;
49919+ atomic_unchecked_t num_fclose;
49920+ atomic_unchecked_t num_hardlinks;
49921+ atomic_unchecked_t num_symlinks;
49922+ atomic_unchecked_t num_locks;
49923+ atomic_unchecked_t num_acl_get;
49924+ atomic_unchecked_t num_acl_set;
49925 } cifs_stats;
49926 #ifdef CONFIG_CIFS_SMB2
49927 struct {
49928- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49929- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49930+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49931+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49932 } smb2_stats;
49933 #endif /* CONFIG_CIFS_SMB2 */
49934 } stats;
49935@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49936 }
49937
49938 #ifdef CONFIG_CIFS_STATS
49939-#define cifs_stats_inc atomic_inc
49940+#define cifs_stats_inc atomic_inc_unchecked
49941
49942 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49943 unsigned int bytes)
49944@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49945 /* Various Debug counters */
49946 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49947 #ifdef CONFIG_CIFS_STATS2
49948-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
49949-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
49950+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
49951+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
49952 #endif
49953 GLOBAL_EXTERN atomic_t smBufAllocCount;
49954 GLOBAL_EXTERN atomic_t midCount;
49955diff --git a/fs/cifs/link.c b/fs/cifs/link.c
49956index b83c3f5..6437caa 100644
49957--- a/fs/cifs/link.c
49958+++ b/fs/cifs/link.c
49959@@ -616,7 +616,7 @@ symlink_exit:
49960
49961 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
49962 {
49963- char *p = nd_get_link(nd);
49964+ const char *p = nd_get_link(nd);
49965 if (!IS_ERR(p))
49966 kfree(p);
49967 }
49968diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
49969index 1bec014..f329411 100644
49970--- a/fs/cifs/misc.c
49971+++ b/fs/cifs/misc.c
49972@@ -169,7 +169,7 @@ cifs_buf_get(void)
49973 memset(ret_buf, 0, buf_size + 3);
49974 atomic_inc(&bufAllocCount);
49975 #ifdef CONFIG_CIFS_STATS2
49976- atomic_inc(&totBufAllocCount);
49977+ atomic_inc_unchecked(&totBufAllocCount);
49978 #endif /* CONFIG_CIFS_STATS2 */
49979 }
49980
49981@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
49982 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
49983 atomic_inc(&smBufAllocCount);
49984 #ifdef CONFIG_CIFS_STATS2
49985- atomic_inc(&totSmBufAllocCount);
49986+ atomic_inc_unchecked(&totSmBufAllocCount);
49987 #endif /* CONFIG_CIFS_STATS2 */
49988
49989 }
49990diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
49991index 3efdb9d..e845a5e 100644
49992--- a/fs/cifs/smb1ops.c
49993+++ b/fs/cifs/smb1ops.c
49994@@ -591,27 +591,27 @@ static void
49995 cifs_clear_stats(struct cifs_tcon *tcon)
49996 {
49997 #ifdef CONFIG_CIFS_STATS
49998- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
49999- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
50000- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
50001- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
50002- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
50003- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
50004- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
50005- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
50006- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
50007- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
50008- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
50009- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
50010- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
50011- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
50012- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
50013- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
50014- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
50015- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
50016- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
50017- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
50018- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
50019+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
50020+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
50021+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
50022+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
50023+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
50024+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
50025+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
50026+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
50027+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
50028+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
50029+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
50030+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
50031+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
50032+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
50033+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
50034+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
50035+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
50036+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
50037+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
50038+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
50039+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
50040 #endif
50041 }
50042
50043@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50044 {
50045 #ifdef CONFIG_CIFS_STATS
50046 seq_printf(m, " Oplocks breaks: %d",
50047- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
50048+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
50049 seq_printf(m, "\nReads: %d Bytes: %llu",
50050- atomic_read(&tcon->stats.cifs_stats.num_reads),
50051+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
50052 (long long)(tcon->bytes_read));
50053 seq_printf(m, "\nWrites: %d Bytes: %llu",
50054- atomic_read(&tcon->stats.cifs_stats.num_writes),
50055+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
50056 (long long)(tcon->bytes_written));
50057 seq_printf(m, "\nFlushes: %d",
50058- atomic_read(&tcon->stats.cifs_stats.num_flushes));
50059+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
50060 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
50061- atomic_read(&tcon->stats.cifs_stats.num_locks),
50062- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
50063- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
50064+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
50065+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
50066+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
50067 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
50068- atomic_read(&tcon->stats.cifs_stats.num_opens),
50069- atomic_read(&tcon->stats.cifs_stats.num_closes),
50070- atomic_read(&tcon->stats.cifs_stats.num_deletes));
50071+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
50072+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
50073+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
50074 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
50075- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
50076- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
50077+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
50078+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
50079 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
50080- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
50081- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
50082+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
50083+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
50084 seq_printf(m, "\nRenames: %d T2 Renames %d",
50085- atomic_read(&tcon->stats.cifs_stats.num_renames),
50086- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
50087+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
50088+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
50089 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
50090- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
50091- atomic_read(&tcon->stats.cifs_stats.num_fnext),
50092- atomic_read(&tcon->stats.cifs_stats.num_fclose));
50093+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
50094+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
50095+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
50096 #endif
50097 }
50098
50099diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
50100index f2e76f3..c44fac7 100644
50101--- a/fs/cifs/smb2ops.c
50102+++ b/fs/cifs/smb2ops.c
50103@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
50104 #ifdef CONFIG_CIFS_STATS
50105 int i;
50106 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
50107- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50108- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50109+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50110+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50111 }
50112 #endif
50113 }
50114@@ -284,66 +284,66 @@ static void
50115 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50116 {
50117 #ifdef CONFIG_CIFS_STATS
50118- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50119- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50120+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50121+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50122 seq_printf(m, "\nNegotiates: %d sent %d failed",
50123- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
50124- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
50125+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
50126+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
50127 seq_printf(m, "\nSessionSetups: %d sent %d failed",
50128- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
50129- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
50130+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
50131+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
50132 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
50133 seq_printf(m, "\nLogoffs: %d sent %d failed",
50134- atomic_read(&sent[SMB2_LOGOFF_HE]),
50135- atomic_read(&failed[SMB2_LOGOFF_HE]));
50136+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
50137+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
50138 seq_printf(m, "\nTreeConnects: %d sent %d failed",
50139- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
50140- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
50141+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
50142+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
50143 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
50144- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
50145- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
50146+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
50147+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
50148 seq_printf(m, "\nCreates: %d sent %d failed",
50149- atomic_read(&sent[SMB2_CREATE_HE]),
50150- atomic_read(&failed[SMB2_CREATE_HE]));
50151+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
50152+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
50153 seq_printf(m, "\nCloses: %d sent %d failed",
50154- atomic_read(&sent[SMB2_CLOSE_HE]),
50155- atomic_read(&failed[SMB2_CLOSE_HE]));
50156+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
50157+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
50158 seq_printf(m, "\nFlushes: %d sent %d failed",
50159- atomic_read(&sent[SMB2_FLUSH_HE]),
50160- atomic_read(&failed[SMB2_FLUSH_HE]));
50161+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
50162+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
50163 seq_printf(m, "\nReads: %d sent %d failed",
50164- atomic_read(&sent[SMB2_READ_HE]),
50165- atomic_read(&failed[SMB2_READ_HE]));
50166+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
50167+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
50168 seq_printf(m, "\nWrites: %d sent %d failed",
50169- atomic_read(&sent[SMB2_WRITE_HE]),
50170- atomic_read(&failed[SMB2_WRITE_HE]));
50171+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
50172+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
50173 seq_printf(m, "\nLocks: %d sent %d failed",
50174- atomic_read(&sent[SMB2_LOCK_HE]),
50175- atomic_read(&failed[SMB2_LOCK_HE]));
50176+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
50177+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
50178 seq_printf(m, "\nIOCTLs: %d sent %d failed",
50179- atomic_read(&sent[SMB2_IOCTL_HE]),
50180- atomic_read(&failed[SMB2_IOCTL_HE]));
50181+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
50182+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
50183 seq_printf(m, "\nCancels: %d sent %d failed",
50184- atomic_read(&sent[SMB2_CANCEL_HE]),
50185- atomic_read(&failed[SMB2_CANCEL_HE]));
50186+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
50187+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
50188 seq_printf(m, "\nEchos: %d sent %d failed",
50189- atomic_read(&sent[SMB2_ECHO_HE]),
50190- atomic_read(&failed[SMB2_ECHO_HE]));
50191+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
50192+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
50193 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
50194- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
50195- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
50196+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
50197+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
50198 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
50199- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
50200- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
50201+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
50202+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
50203 seq_printf(m, "\nQueryInfos: %d sent %d failed",
50204- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
50205- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
50206+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
50207+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
50208 seq_printf(m, "\nSetInfos: %d sent %d failed",
50209- atomic_read(&sent[SMB2_SET_INFO_HE]),
50210- atomic_read(&failed[SMB2_SET_INFO_HE]));
50211+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
50212+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
50213 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
50214- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
50215- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
50216+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
50217+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
50218 #endif
50219 }
50220
50221diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
50222index 2b95ce2..d079d75 100644
50223--- a/fs/cifs/smb2pdu.c
50224+++ b/fs/cifs/smb2pdu.c
50225@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
50226 default:
50227 cifs_dbg(VFS, "info level %u isn't supported\n",
50228 srch_inf->info_level);
50229- rc = -EINVAL;
50230- goto qdir_exit;
50231+ return -EINVAL;
50232 }
50233
50234 req->FileIndex = cpu_to_le32(index);
50235diff --git a/fs/coda/cache.c b/fs/coda/cache.c
50236index 1da168c..8bc7ff6 100644
50237--- a/fs/coda/cache.c
50238+++ b/fs/coda/cache.c
50239@@ -24,7 +24,7 @@
50240 #include "coda_linux.h"
50241 #include "coda_cache.h"
50242
50243-static atomic_t permission_epoch = ATOMIC_INIT(0);
50244+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
50245
50246 /* replace or extend an acl cache hit */
50247 void coda_cache_enter(struct inode *inode, int mask)
50248@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50249 struct coda_inode_info *cii = ITOC(inode);
50250
50251 spin_lock(&cii->c_lock);
50252- cii->c_cached_epoch = atomic_read(&permission_epoch);
50253+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50254 if (!uid_eq(cii->c_uid, current_fsuid())) {
50255 cii->c_uid = current_fsuid();
50256 cii->c_cached_perm = mask;
50257@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50258 {
50259 struct coda_inode_info *cii = ITOC(inode);
50260 spin_lock(&cii->c_lock);
50261- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50262+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50263 spin_unlock(&cii->c_lock);
50264 }
50265
50266 /* remove all acl caches */
50267 void coda_cache_clear_all(struct super_block *sb)
50268 {
50269- atomic_inc(&permission_epoch);
50270+ atomic_inc_unchecked(&permission_epoch);
50271 }
50272
50273
50274@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50275 spin_lock(&cii->c_lock);
50276 hit = (mask & cii->c_cached_perm) == mask &&
50277 uid_eq(cii->c_uid, current_fsuid()) &&
50278- cii->c_cached_epoch == atomic_read(&permission_epoch);
50279+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50280 spin_unlock(&cii->c_lock);
50281
50282 return hit;
50283diff --git a/fs/compat.c b/fs/compat.c
50284index fc3b55d..7b568ae 100644
50285--- a/fs/compat.c
50286+++ b/fs/compat.c
50287@@ -54,7 +54,7 @@
50288 #include <asm/ioctls.h>
50289 #include "internal.h"
50290
50291-int compat_log = 1;
50292+int compat_log = 0;
50293
50294 int compat_printk(const char *fmt, ...)
50295 {
50296@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50297
50298 set_fs(KERNEL_DS);
50299 /* The __user pointer cast is valid because of the set_fs() */
50300- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50301+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50302 set_fs(oldfs);
50303 /* truncating is ok because it's a user address */
50304 if (!ret)
50305@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50306 goto out;
50307
50308 ret = -EINVAL;
50309- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50310+ if (nr_segs > UIO_MAXIOV)
50311 goto out;
50312 if (nr_segs > fast_segs) {
50313 ret = -ENOMEM;
50314@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
50315
50316 struct compat_readdir_callback {
50317 struct compat_old_linux_dirent __user *dirent;
50318+ struct file * file;
50319 int result;
50320 };
50321
50322@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50323 buf->result = -EOVERFLOW;
50324 return -EOVERFLOW;
50325 }
50326+
50327+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50328+ return 0;
50329+
50330 buf->result++;
50331 dirent = buf->dirent;
50332 if (!access_ok(VERIFY_WRITE, dirent,
50333@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50334
50335 buf.result = 0;
50336 buf.dirent = dirent;
50337+ buf.file = f.file;
50338
50339 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50340 if (buf.result)
50341@@ -899,6 +905,7 @@ struct compat_linux_dirent {
50342 struct compat_getdents_callback {
50343 struct compat_linux_dirent __user *current_dir;
50344 struct compat_linux_dirent __user *previous;
50345+ struct file * file;
50346 int count;
50347 int error;
50348 };
50349@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50350 buf->error = -EOVERFLOW;
50351 return -EOVERFLOW;
50352 }
50353+
50354+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50355+ return 0;
50356+
50357 dirent = buf->previous;
50358 if (dirent) {
50359 if (__put_user(offset, &dirent->d_off))
50360@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50361 buf.previous = NULL;
50362 buf.count = count;
50363 buf.error = 0;
50364+ buf.file = f.file;
50365
50366 error = vfs_readdir(f.file, compat_filldir, &buf);
50367 if (error >= 0)
50368@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50369 struct compat_getdents_callback64 {
50370 struct linux_dirent64 __user *current_dir;
50371 struct linux_dirent64 __user *previous;
50372+ struct file * file;
50373 int count;
50374 int error;
50375 };
50376@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
50377 buf->error = -EINVAL; /* only used if we fail.. */
50378 if (reclen > buf->count)
50379 return -EINVAL;
50380+
50381+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50382+ return 0;
50383+
50384 dirent = buf->previous;
50385
50386 if (dirent) {
50387@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
50388 buf.previous = NULL;
50389 buf.count = count;
50390 buf.error = 0;
50391+ buf.file = f.file;
50392
50393 error = vfs_readdir(f.file, compat_filldir64, &buf);
50394 if (error >= 0)
50395 error = buf.error;
50396 lastdirent = buf.previous;
50397 if (lastdirent) {
50398- typeof(lastdirent->d_off) d_off = f.file->f_pos;
50399+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
50400 if (__put_user_unaligned(d_off, &lastdirent->d_off))
50401 error = -EFAULT;
50402 else
50403diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
50404index a81147e..20bf2b5 100644
50405--- a/fs/compat_binfmt_elf.c
50406+++ b/fs/compat_binfmt_elf.c
50407@@ -30,11 +30,13 @@
50408 #undef elf_phdr
50409 #undef elf_shdr
50410 #undef elf_note
50411+#undef elf_dyn
50412 #undef elf_addr_t
50413 #define elfhdr elf32_hdr
50414 #define elf_phdr elf32_phdr
50415 #define elf_shdr elf32_shdr
50416 #define elf_note elf32_note
50417+#define elf_dyn Elf32_Dyn
50418 #define elf_addr_t Elf32_Addr
50419
50420 /*
50421diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
50422index 996cdc5..15e2f33 100644
50423--- a/fs/compat_ioctl.c
50424+++ b/fs/compat_ioctl.c
50425@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
50426 return -EFAULT;
50427 if (__get_user(udata, &ss32->iomem_base))
50428 return -EFAULT;
50429- ss.iomem_base = compat_ptr(udata);
50430+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
50431 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
50432 __get_user(ss.port_high, &ss32->port_high))
50433 return -EFAULT;
50434@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
50435 for (i = 0; i < nmsgs; i++) {
50436 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
50437 return -EFAULT;
50438- if (get_user(datap, &umsgs[i].buf) ||
50439- put_user(compat_ptr(datap), &tmsgs[i].buf))
50440+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
50441+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
50442 return -EFAULT;
50443 }
50444 return sys_ioctl(fd, cmd, (unsigned long)tdata);
50445@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
50446 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
50447 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
50448 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
50449- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50450+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50451 return -EFAULT;
50452
50453 return ioctl_preallocate(file, p);
50454@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
50455 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
50456 {
50457 unsigned int a, b;
50458- a = *(unsigned int *)p;
50459- b = *(unsigned int *)q;
50460+ a = *(const unsigned int *)p;
50461+ b = *(const unsigned int *)q;
50462 if (a > b)
50463 return 1;
50464 if (a < b)
50465diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
50466index 7aabc6a..34c1197 100644
50467--- a/fs/configfs/dir.c
50468+++ b/fs/configfs/dir.c
50469@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50470 }
50471 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
50472 struct configfs_dirent *next;
50473- const char * name;
50474+ const unsigned char * name;
50475+ char d_name[sizeof(next->s_dentry->d_iname)];
50476 int len;
50477 struct inode *inode = NULL;
50478
50479@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50480 continue;
50481
50482 name = configfs_get_name(next);
50483- len = strlen(name);
50484+ if (next->s_dentry && name == next->s_dentry->d_iname) {
50485+ len = next->s_dentry->d_name.len;
50486+ memcpy(d_name, name, len);
50487+ name = d_name;
50488+ } else
50489+ len = strlen(name);
50490
50491 /*
50492 * We'll have a dentry and an inode for
50493diff --git a/fs/coredump.c b/fs/coredump.c
50494index dafafba..10b3b27 100644
50495--- a/fs/coredump.c
50496+++ b/fs/coredump.c
50497@@ -52,7 +52,7 @@ struct core_name {
50498 char *corename;
50499 int used, size;
50500 };
50501-static atomic_t call_count = ATOMIC_INIT(1);
50502+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
50503
50504 /* The maximal length of core_pattern is also specified in sysctl.c */
50505
50506@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
50507 {
50508 char *old_corename = cn->corename;
50509
50510- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
50511+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
50512 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
50513
50514 if (!cn->corename) {
50515@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
50516 int pid_in_pattern = 0;
50517 int err = 0;
50518
50519- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
50520+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
50521 cn->corename = kmalloc(cn->size, GFP_KERNEL);
50522 cn->used = 0;
50523
50524@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
50525 struct pipe_inode_info *pipe = file->private_data;
50526
50527 pipe_lock(pipe);
50528- pipe->readers++;
50529- pipe->writers--;
50530+ atomic_inc(&pipe->readers);
50531+ atomic_dec(&pipe->writers);
50532 wake_up_interruptible_sync(&pipe->wait);
50533 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50534 pipe_unlock(pipe);
50535@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
50536 * We actually want wait_event_freezable() but then we need
50537 * to clear TIF_SIGPENDING and improve dump_interrupted().
50538 */
50539- wait_event_interruptible(pipe->wait, pipe->readers == 1);
50540+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
50541
50542 pipe_lock(pipe);
50543- pipe->readers--;
50544- pipe->writers++;
50545+ atomic_dec(&pipe->readers);
50546+ atomic_inc(&pipe->writers);
50547 pipe_unlock(pipe);
50548 }
50549
50550@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
50551 struct files_struct *displaced;
50552 bool need_nonrelative = false;
50553 bool core_dumped = false;
50554- static atomic_t core_dump_count = ATOMIC_INIT(0);
50555+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50556+ long signr = siginfo->si_signo;
50557 struct coredump_params cprm = {
50558 .siginfo = siginfo,
50559 .regs = signal_pt_regs(),
50560@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
50561 .mm_flags = mm->flags,
50562 };
50563
50564- audit_core_dumps(siginfo->si_signo);
50565+ audit_core_dumps(signr);
50566+
50567+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50568+ gr_handle_brute_attach(cprm.mm_flags);
50569
50570 binfmt = mm->binfmt;
50571 if (!binfmt || !binfmt->core_dump)
50572@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
50573 need_nonrelative = true;
50574 }
50575
50576- retval = coredump_wait(siginfo->si_signo, &core_state);
50577+ retval = coredump_wait(signr, &core_state);
50578 if (retval < 0)
50579 goto fail_creds;
50580
50581@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
50582 }
50583 cprm.limit = RLIM_INFINITY;
50584
50585- dump_count = atomic_inc_return(&core_dump_count);
50586+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
50587 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50588 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50589 task_tgid_vnr(current), current->comm);
50590@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
50591 } else {
50592 struct inode *inode;
50593
50594+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50595+
50596 if (cprm.limit < binfmt->min_coredump)
50597 goto fail_unlock;
50598
50599@@ -666,7 +672,7 @@ close_fail:
50600 filp_close(cprm.file, NULL);
50601 fail_dropcount:
50602 if (ispipe)
50603- atomic_dec(&core_dump_count);
50604+ atomic_dec_unchecked(&core_dump_count);
50605 fail_unlock:
50606 kfree(cn.corename);
50607 fail_corename:
50608@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
50609 {
50610 return !dump_interrupted() &&
50611 access_ok(VERIFY_READ, addr, nr) &&
50612- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
50613+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
50614 }
50615 EXPORT_SYMBOL(dump_write);
50616
50617diff --git a/fs/dcache.c b/fs/dcache.c
50618index f09b908..4dd10d8 100644
50619--- a/fs/dcache.c
50620+++ b/fs/dcache.c
50621@@ -3086,7 +3086,7 @@ void __init vfs_caches_init(unsigned long mempages)
50622 mempages -= reserve;
50623
50624 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50625- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50626+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY, NULL);
50627
50628 dcache_init();
50629 inode_init();
50630diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50631index 4888cb3..e0f7cf8 100644
50632--- a/fs/debugfs/inode.c
50633+++ b/fs/debugfs/inode.c
50634@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50635 */
50636 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50637 {
50638+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50639+ return __create_file(name, S_IFDIR | S_IRWXU,
50640+#else
50641 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50642+#endif
50643 parent, NULL, NULL);
50644 }
50645 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50646diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50647index 5eab400..810a3f5 100644
50648--- a/fs/ecryptfs/inode.c
50649+++ b/fs/ecryptfs/inode.c
50650@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50651 old_fs = get_fs();
50652 set_fs(get_ds());
50653 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50654- (char __user *)lower_buf,
50655+ (char __force_user *)lower_buf,
50656 PATH_MAX);
50657 set_fs(old_fs);
50658 if (rc < 0)
50659@@ -706,7 +706,7 @@ out:
50660 static void
50661 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50662 {
50663- char *buf = nd_get_link(nd);
50664+ const char *buf = nd_get_link(nd);
50665 if (!IS_ERR(buf)) {
50666 /* Free the char* */
50667 kfree(buf);
50668diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50669index e4141f2..d8263e8 100644
50670--- a/fs/ecryptfs/miscdev.c
50671+++ b/fs/ecryptfs/miscdev.c
50672@@ -304,7 +304,7 @@ check_list:
50673 goto out_unlock_msg_ctx;
50674 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50675 if (msg_ctx->msg) {
50676- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50677+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50678 goto out_unlock_msg_ctx;
50679 i += packet_length_size;
50680 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50681diff --git a/fs/exec.c b/fs/exec.c
50682index ffd7a81..e38107f 100644
50683--- a/fs/exec.c
50684+++ b/fs/exec.c
50685@@ -55,8 +55,20 @@
50686 #include <linux/pipe_fs_i.h>
50687 #include <linux/oom.h>
50688 #include <linux/compat.h>
50689+#include <linux/random.h>
50690+#include <linux/seq_file.h>
50691+#include <linux/coredump.h>
50692+#include <linux/mman.h>
50693+
50694+#ifdef CONFIG_PAX_REFCOUNT
50695+#include <linux/kallsyms.h>
50696+#include <linux/kdebug.h>
50697+#endif
50698+
50699+#include <trace/events/fs.h>
50700
50701 #include <asm/uaccess.h>
50702+#include <asm/sections.h>
50703 #include <asm/mmu_context.h>
50704 #include <asm/tlb.h>
50705
50706@@ -66,17 +78,32 @@
50707
50708 #include <trace/events/sched.h>
50709
50710+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50711+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50712+{
50713+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50714+}
50715+#endif
50716+
50717+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50718+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50719+EXPORT_SYMBOL(pax_set_initial_flags_func);
50720+#endif
50721+
50722 int suid_dumpable = 0;
50723
50724 static LIST_HEAD(formats);
50725 static DEFINE_RWLOCK(binfmt_lock);
50726
50727+extern int gr_process_kernel_exec_ban(void);
50728+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
50729+
50730 void __register_binfmt(struct linux_binfmt * fmt, int insert)
50731 {
50732 BUG_ON(!fmt);
50733 write_lock(&binfmt_lock);
50734- insert ? list_add(&fmt->lh, &formats) :
50735- list_add_tail(&fmt->lh, &formats);
50736+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50737+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50738 write_unlock(&binfmt_lock);
50739 }
50740
50741@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
50742 void unregister_binfmt(struct linux_binfmt * fmt)
50743 {
50744 write_lock(&binfmt_lock);
50745- list_del(&fmt->lh);
50746+ pax_list_del((struct list_head *)&fmt->lh);
50747 write_unlock(&binfmt_lock);
50748 }
50749
50750@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50751 int write)
50752 {
50753 struct page *page;
50754- int ret;
50755
50756-#ifdef CONFIG_STACK_GROWSUP
50757- if (write) {
50758- ret = expand_downwards(bprm->vma, pos);
50759- if (ret < 0)
50760- return NULL;
50761- }
50762-#endif
50763- ret = get_user_pages(current, bprm->mm, pos,
50764- 1, write, 1, &page, NULL);
50765- if (ret <= 0)
50766+ if (0 > expand_downwards(bprm->vma, pos))
50767+ return NULL;
50768+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50769 return NULL;
50770
50771 if (write) {
50772@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50773 if (size <= ARG_MAX)
50774 return page;
50775
50776+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50777+ // only allow 512KB for argv+env on suid/sgid binaries
50778+ // to prevent easy ASLR exhaustion
50779+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50780+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50781+ (size > (512 * 1024))) {
50782+ put_page(page);
50783+ return NULL;
50784+ }
50785+#endif
50786+
50787 /*
50788 * Limit to 1/4-th the stack size for the argv+env strings.
50789 * This ensures that:
50790@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50791 vma->vm_end = STACK_TOP_MAX;
50792 vma->vm_start = vma->vm_end - PAGE_SIZE;
50793 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50794+
50795+#ifdef CONFIG_PAX_SEGMEXEC
50796+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50797+#endif
50798+
50799 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50800 INIT_LIST_HEAD(&vma->anon_vma_chain);
50801
50802@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50803 mm->stack_vm = mm->total_vm = 1;
50804 up_write(&mm->mmap_sem);
50805 bprm->p = vma->vm_end - sizeof(void *);
50806+
50807+#ifdef CONFIG_PAX_RANDUSTACK
50808+ if (randomize_va_space)
50809+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
50810+#endif
50811+
50812 return 0;
50813 err:
50814 up_write(&mm->mmap_sem);
50815@@ -396,7 +437,7 @@ struct user_arg_ptr {
50816 } ptr;
50817 };
50818
50819-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50820+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50821 {
50822 const char __user *native;
50823
50824@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50825 compat_uptr_t compat;
50826
50827 if (get_user(compat, argv.ptr.compat + nr))
50828- return ERR_PTR(-EFAULT);
50829+ return (const char __force_user *)ERR_PTR(-EFAULT);
50830
50831 return compat_ptr(compat);
50832 }
50833 #endif
50834
50835 if (get_user(native, argv.ptr.native + nr))
50836- return ERR_PTR(-EFAULT);
50837+ return (const char __force_user *)ERR_PTR(-EFAULT);
50838
50839 return native;
50840 }
50841@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
50842 if (!p)
50843 break;
50844
50845- if (IS_ERR(p))
50846+ if (IS_ERR((const char __force_kernel *)p))
50847 return -EFAULT;
50848
50849 if (i >= max)
50850@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50851
50852 ret = -EFAULT;
50853 str = get_user_arg_ptr(argv, argc);
50854- if (IS_ERR(str))
50855+ if (IS_ERR((const char __force_kernel *)str))
50856 goto out;
50857
50858 len = strnlen_user(str, MAX_ARG_STRLEN);
50859@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50860 int r;
50861 mm_segment_t oldfs = get_fs();
50862 struct user_arg_ptr argv = {
50863- .ptr.native = (const char __user *const __user *)__argv,
50864+ .ptr.native = (const char __force_user * const __force_user *)__argv,
50865 };
50866
50867 set_fs(KERNEL_DS);
50868@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50869 unsigned long new_end = old_end - shift;
50870 struct mmu_gather tlb;
50871
50872- BUG_ON(new_start > new_end);
50873+ if (new_start >= new_end || new_start < mmap_min_addr)
50874+ return -ENOMEM;
50875
50876 /*
50877 * ensure there are no vmas between where we want to go
50878@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50879 if (vma != find_vma(mm, new_start))
50880 return -EFAULT;
50881
50882+#ifdef CONFIG_PAX_SEGMEXEC
50883+ BUG_ON(pax_find_mirror_vma(vma));
50884+#endif
50885+
50886 /*
50887 * cover the whole range: [new_start, old_end)
50888 */
50889@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50890 stack_top = arch_align_stack(stack_top);
50891 stack_top = PAGE_ALIGN(stack_top);
50892
50893- if (unlikely(stack_top < mmap_min_addr) ||
50894- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50895- return -ENOMEM;
50896-
50897 stack_shift = vma->vm_end - stack_top;
50898
50899 bprm->p -= stack_shift;
50900@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50901 bprm->exec -= stack_shift;
50902
50903 down_write(&mm->mmap_sem);
50904+
50905+ /* Move stack pages down in memory. */
50906+ if (stack_shift) {
50907+ ret = shift_arg_pages(vma, stack_shift);
50908+ if (ret)
50909+ goto out_unlock;
50910+ }
50911+
50912 vm_flags = VM_STACK_FLAGS;
50913
50914+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50915+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50916+ vm_flags &= ~VM_EXEC;
50917+
50918+#ifdef CONFIG_PAX_MPROTECT
50919+ if (mm->pax_flags & MF_PAX_MPROTECT)
50920+ vm_flags &= ~VM_MAYEXEC;
50921+#endif
50922+
50923+ }
50924+#endif
50925+
50926 /*
50927 * Adjust stack execute permissions; explicitly enable for
50928 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50929@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50930 goto out_unlock;
50931 BUG_ON(prev != vma);
50932
50933- /* Move stack pages down in memory. */
50934- if (stack_shift) {
50935- ret = shift_arg_pages(vma, stack_shift);
50936- if (ret)
50937- goto out_unlock;
50938- }
50939-
50940 /* mprotect_fixup is overkill to remove the temporary stack flags */
50941 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50942
50943@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50944 #endif
50945 current->mm->start_stack = bprm->p;
50946 ret = expand_stack(vma, stack_base);
50947+
50948+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
50949+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
50950+ unsigned long size;
50951+ vm_flags_t vm_flags;
50952+
50953+ size = STACK_TOP - vma->vm_end;
50954+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
50955+
50956+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
50957+
50958+#ifdef CONFIG_X86
50959+ if (!ret) {
50960+ size = mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
50961+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
50962+ }
50963+#endif
50964+
50965+ }
50966+#endif
50967+
50968 if (ret)
50969 ret = -EFAULT;
50970
50971@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
50972
50973 fsnotify_open(file);
50974
50975+ trace_open_exec(name);
50976+
50977 err = deny_write_access(file);
50978 if (err)
50979 goto exit;
50980@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
50981 old_fs = get_fs();
50982 set_fs(get_ds());
50983 /* The cast to a user pointer is valid due to the set_fs() */
50984- result = vfs_read(file, (void __user *)addr, count, &pos);
50985+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
50986 set_fs(old_fs);
50987 return result;
50988 }
50989@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
50990 }
50991 rcu_read_unlock();
50992
50993- if (p->fs->users > n_fs) {
50994+ if (atomic_read(&p->fs->users) > n_fs) {
50995 bprm->unsafe |= LSM_UNSAFE_SHARE;
50996 } else {
50997 res = -EAGAIN;
50998@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
50999
51000 EXPORT_SYMBOL(search_binary_handler);
51001
51002+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51003+static DEFINE_PER_CPU(u64, exec_counter);
51004+static int __init init_exec_counters(void)
51005+{
51006+ unsigned int cpu;
51007+
51008+ for_each_possible_cpu(cpu) {
51009+ per_cpu(exec_counter, cpu) = (u64)cpu;
51010+ }
51011+
51012+ return 0;
51013+}
51014+early_initcall(init_exec_counters);
51015+static inline void increment_exec_counter(void)
51016+{
51017+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
51018+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
51019+}
51020+#else
51021+static inline void increment_exec_counter(void) {}
51022+#endif
51023+
51024+extern void gr_handle_exec_args(struct linux_binprm *bprm,
51025+ struct user_arg_ptr argv);
51026+
51027 /*
51028 * sys_execve() executes a new program.
51029 */
51030@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
51031 struct user_arg_ptr argv,
51032 struct user_arg_ptr envp)
51033 {
51034+#ifdef CONFIG_GRKERNSEC
51035+ struct file *old_exec_file;
51036+ struct acl_subject_label *old_acl;
51037+ struct rlimit old_rlim[RLIM_NLIMITS];
51038+#endif
51039 struct linux_binprm *bprm;
51040 struct file *file;
51041 struct files_struct *displaced;
51042@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
51043 int retval;
51044 const struct cred *cred = current_cred();
51045
51046+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
51047+
51048 /*
51049 * We move the actual failure in case of RLIMIT_NPROC excess from
51050 * set*uid() to execve() because too many poorly written programs
51051@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
51052 if (IS_ERR(file))
51053 goto out_unmark;
51054
51055+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
51056+ retval = -EPERM;
51057+ goto out_file;
51058+ }
51059+
51060 sched_exec();
51061
51062 bprm->file = file;
51063 bprm->filename = filename;
51064 bprm->interp = filename;
51065
51066+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
51067+ retval = -EACCES;
51068+ goto out_file;
51069+ }
51070+
51071 retval = bprm_mm_init(bprm);
51072 if (retval)
51073 goto out_file;
51074@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
51075 if (retval < 0)
51076 goto out;
51077
51078+#ifdef CONFIG_GRKERNSEC
51079+ old_acl = current->acl;
51080+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
51081+ old_exec_file = current->exec_file;
51082+ get_file(file);
51083+ current->exec_file = file;
51084+#endif
51085+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51086+ /* limit suid stack to 8MB
51087+ * we saved the old limits above and will restore them if this exec fails
51088+ */
51089+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
51090+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
51091+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
51092+#endif
51093+
51094+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
51095+ retval = -EPERM;
51096+ goto out_fail;
51097+ }
51098+
51099+ if (!gr_tpe_allow(file)) {
51100+ retval = -EACCES;
51101+ goto out_fail;
51102+ }
51103+
51104+ if (gr_check_crash_exec(file)) {
51105+ retval = -EACCES;
51106+ goto out_fail;
51107+ }
51108+
51109+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
51110+ bprm->unsafe);
51111+ if (retval < 0)
51112+ goto out_fail;
51113+
51114 retval = copy_strings_kernel(1, &bprm->filename, bprm);
51115 if (retval < 0)
51116- goto out;
51117+ goto out_fail;
51118
51119 bprm->exec = bprm->p;
51120 retval = copy_strings(bprm->envc, envp, bprm);
51121 if (retval < 0)
51122- goto out;
51123+ goto out_fail;
51124
51125 retval = copy_strings(bprm->argc, argv, bprm);
51126 if (retval < 0)
51127- goto out;
51128+ goto out_fail;
51129+
51130+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
51131+
51132+ gr_handle_exec_args(bprm, argv);
51133
51134 retval = search_binary_handler(bprm);
51135 if (retval < 0)
51136- goto out;
51137+ goto out_fail;
51138+#ifdef CONFIG_GRKERNSEC
51139+ if (old_exec_file)
51140+ fput(old_exec_file);
51141+#endif
51142
51143 /* execve succeeded */
51144+
51145+ increment_exec_counter();
51146 current->fs->in_exec = 0;
51147 current->in_execve = 0;
51148 acct_update_integrals(current);
51149@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
51150 put_files_struct(displaced);
51151 return retval;
51152
51153+out_fail:
51154+#ifdef CONFIG_GRKERNSEC
51155+ current->acl = old_acl;
51156+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
51157+ fput(current->exec_file);
51158+ current->exec_file = old_exec_file;
51159+#endif
51160+
51161 out:
51162 if (bprm->mm) {
51163 acct_arg_size(bprm, 0);
51164@@ -1701,3 +1875,283 @@ asmlinkage long compat_sys_execve(const char __user * filename,
51165 return error;
51166 }
51167 #endif
51168+
51169+int pax_check_flags(unsigned long *flags)
51170+{
51171+ int retval = 0;
51172+
51173+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
51174+ if (*flags & MF_PAX_SEGMEXEC)
51175+ {
51176+ *flags &= ~MF_PAX_SEGMEXEC;
51177+ retval = -EINVAL;
51178+ }
51179+#endif
51180+
51181+ if ((*flags & MF_PAX_PAGEEXEC)
51182+
51183+#ifdef CONFIG_PAX_PAGEEXEC
51184+ && (*flags & MF_PAX_SEGMEXEC)
51185+#endif
51186+
51187+ )
51188+ {
51189+ *flags &= ~MF_PAX_PAGEEXEC;
51190+ retval = -EINVAL;
51191+ }
51192+
51193+ if ((*flags & MF_PAX_MPROTECT)
51194+
51195+#ifdef CONFIG_PAX_MPROTECT
51196+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51197+#endif
51198+
51199+ )
51200+ {
51201+ *flags &= ~MF_PAX_MPROTECT;
51202+ retval = -EINVAL;
51203+ }
51204+
51205+ if ((*flags & MF_PAX_EMUTRAMP)
51206+
51207+#ifdef CONFIG_PAX_EMUTRAMP
51208+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51209+#endif
51210+
51211+ )
51212+ {
51213+ *flags &= ~MF_PAX_EMUTRAMP;
51214+ retval = -EINVAL;
51215+ }
51216+
51217+ return retval;
51218+}
51219+
51220+EXPORT_SYMBOL(pax_check_flags);
51221+
51222+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51223+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
51224+{
51225+ struct task_struct *tsk = current;
51226+ struct mm_struct *mm = current->mm;
51227+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
51228+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
51229+ char *path_exec = NULL;
51230+ char *path_fault = NULL;
51231+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
51232+ siginfo_t info = { };
51233+
51234+ if (buffer_exec && buffer_fault) {
51235+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51236+
51237+ down_read(&mm->mmap_sem);
51238+ vma = mm->mmap;
51239+ while (vma && (!vma_exec || !vma_fault)) {
51240+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51241+ vma_exec = vma;
51242+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51243+ vma_fault = vma;
51244+ vma = vma->vm_next;
51245+ }
51246+ if (vma_exec) {
51247+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51248+ if (IS_ERR(path_exec))
51249+ path_exec = "<path too long>";
51250+ else {
51251+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51252+ if (path_exec) {
51253+ *path_exec = 0;
51254+ path_exec = buffer_exec;
51255+ } else
51256+ path_exec = "<path too long>";
51257+ }
51258+ }
51259+ if (vma_fault) {
51260+ start = vma_fault->vm_start;
51261+ end = vma_fault->vm_end;
51262+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51263+ if (vma_fault->vm_file) {
51264+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51265+ if (IS_ERR(path_fault))
51266+ path_fault = "<path too long>";
51267+ else {
51268+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51269+ if (path_fault) {
51270+ *path_fault = 0;
51271+ path_fault = buffer_fault;
51272+ } else
51273+ path_fault = "<path too long>";
51274+ }
51275+ } else
51276+ path_fault = "<anonymous mapping>";
51277+ }
51278+ up_read(&mm->mmap_sem);
51279+ }
51280+ if (tsk->signal->curr_ip)
51281+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51282+ else
51283+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51284+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51285+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51286+ free_page((unsigned long)buffer_exec);
51287+ free_page((unsigned long)buffer_fault);
51288+ pax_report_insns(regs, pc, sp);
51289+ info.si_signo = SIGKILL;
51290+ info.si_errno = 0;
51291+ info.si_code = SI_KERNEL;
51292+ info.si_pid = 0;
51293+ info.si_uid = 0;
51294+ do_coredump(&info);
51295+}
51296+#endif
51297+
51298+#ifdef CONFIG_PAX_REFCOUNT
51299+void pax_report_refcount_overflow(struct pt_regs *regs)
51300+{
51301+ if (current->signal->curr_ip)
51302+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
51303+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
51304+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51305+ else
51306+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
51307+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51308+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
51309+ show_regs(regs);
51310+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
51311+}
51312+#endif
51313+
51314+#ifdef CONFIG_PAX_USERCOPY
51315+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
51316+static noinline int check_stack_object(const void *obj, unsigned long len)
51317+{
51318+ const void * const stack = task_stack_page(current);
51319+ const void * const stackend = stack + THREAD_SIZE;
51320+
51321+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51322+ const void *frame = NULL;
51323+ const void *oldframe;
51324+#endif
51325+
51326+ if (obj + len < obj)
51327+ return -1;
51328+
51329+ if (obj + len <= stack || stackend <= obj)
51330+ return 0;
51331+
51332+ if (obj < stack || stackend < obj + len)
51333+ return -1;
51334+
51335+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51336+ oldframe = __builtin_frame_address(1);
51337+ if (oldframe)
51338+ frame = __builtin_frame_address(2);
51339+ /*
51340+ low ----------------------------------------------> high
51341+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
51342+ ^----------------^
51343+ allow copies only within here
51344+ */
51345+ while (stack <= frame && frame < stackend) {
51346+ /* if obj + len extends past the last frame, this
51347+ check won't pass and the next frame will be 0,
51348+ causing us to bail out and correctly report
51349+ the copy as invalid
51350+ */
51351+ if (obj + len <= frame)
51352+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
51353+ oldframe = frame;
51354+ frame = *(const void * const *)frame;
51355+ }
51356+ return -1;
51357+#else
51358+ return 1;
51359+#endif
51360+}
51361+
51362+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
51363+{
51364+ if (current->signal->curr_ip)
51365+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51366+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51367+ else
51368+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51369+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51370+ dump_stack();
51371+ gr_handle_kernel_exploit();
51372+ do_group_exit(SIGKILL);
51373+}
51374+#endif
51375+
51376+#ifdef CONFIG_PAX_USERCOPY
51377+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
51378+{
51379+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
51380+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
51381+#ifdef CONFIG_MODULES
51382+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
51383+#else
51384+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
51385+#endif
51386+
51387+#else
51388+ unsigned long textlow = (unsigned long)_stext;
51389+ unsigned long texthigh = (unsigned long)_etext;
51390+#endif
51391+
51392+ if (high <= textlow || low > texthigh)
51393+ return false;
51394+ else
51395+ return true;
51396+}
51397+#endif
51398+
51399+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
51400+{
51401+
51402+#ifdef CONFIG_PAX_USERCOPY
51403+ const char *type;
51404+
51405+ if (!n)
51406+ return;
51407+
51408+ type = check_heap_object(ptr, n);
51409+ if (!type) {
51410+ int ret = check_stack_object(ptr, n);
51411+ if (ret == 1 || ret == 2)
51412+ return;
51413+ if (ret == 0) {
51414+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
51415+ type = "<kernel text>";
51416+ else
51417+ return;
51418+ } else
51419+ type = "<process stack>";
51420+ }
51421+
51422+ pax_report_usercopy(ptr, n, to_user, type);
51423+#endif
51424+
51425+}
51426+EXPORT_SYMBOL(__check_object_size);
51427+
51428+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
51429+void pax_track_stack(void)
51430+{
51431+ unsigned long sp = (unsigned long)&sp;
51432+ if (sp < current_thread_info()->lowest_stack &&
51433+ sp > (unsigned long)task_stack_page(current))
51434+ current_thread_info()->lowest_stack = sp;
51435+}
51436+EXPORT_SYMBOL(pax_track_stack);
51437+#endif
51438+
51439+#ifdef CONFIG_PAX_SIZE_OVERFLOW
51440+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
51441+{
51442+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
51443+ dump_stack();
51444+ do_group_exit(SIGKILL);
51445+}
51446+EXPORT_SYMBOL(report_size_overflow);
51447+#endif
51448diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
51449index 9f9992b..8b59411 100644
51450--- a/fs/ext2/balloc.c
51451+++ b/fs/ext2/balloc.c
51452@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
51453
51454 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51455 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51456- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51457+ if (free_blocks < root_blocks + 1 &&
51458 !uid_eq(sbi->s_resuid, current_fsuid()) &&
51459 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51460- !in_group_p (sbi->s_resgid))) {
51461+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51462 return 0;
51463 }
51464 return 1;
51465diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
51466index 22548f5..41521d8 100644
51467--- a/fs/ext3/balloc.c
51468+++ b/fs/ext3/balloc.c
51469@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
51470
51471 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51472 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51473- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51474+ if (free_blocks < root_blocks + 1 &&
51475 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
51476 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51477- !in_group_p (sbi->s_resgid))) {
51478+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51479 return 0;
51480 }
51481 return 1;
51482diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
51483index d0f13ea..4b2dadd 100644
51484--- a/fs/ext4/balloc.c
51485+++ b/fs/ext4/balloc.c
51486@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
51487 /* Hm, nope. Are (enough) root reserved clusters available? */
51488 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
51489 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
51490- capable(CAP_SYS_RESOURCE) ||
51491- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
51492+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
51493+ capable_nolog(CAP_SYS_RESOURCE)) {
51494
51495 if (free_clusters >= (nclusters + dirty_clusters +
51496 resv_clusters))
51497diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
51498index 5aae3d1..b5da7f8 100644
51499--- a/fs/ext4/ext4.h
51500+++ b/fs/ext4/ext4.h
51501@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
51502 unsigned long s_mb_last_start;
51503
51504 /* stats for buddy allocator */
51505- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
51506- atomic_t s_bal_success; /* we found long enough chunks */
51507- atomic_t s_bal_allocated; /* in blocks */
51508- atomic_t s_bal_ex_scanned; /* total extents scanned */
51509- atomic_t s_bal_goals; /* goal hits */
51510- atomic_t s_bal_breaks; /* too long searches */
51511- atomic_t s_bal_2orders; /* 2^order hits */
51512+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
51513+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
51514+ atomic_unchecked_t s_bal_allocated; /* in blocks */
51515+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
51516+ atomic_unchecked_t s_bal_goals; /* goal hits */
51517+ atomic_unchecked_t s_bal_breaks; /* too long searches */
51518+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
51519 spinlock_t s_bal_lock;
51520 unsigned long s_mb_buddies_generated;
51521 unsigned long long s_mb_generation_time;
51522- atomic_t s_mb_lost_chunks;
51523- atomic_t s_mb_preallocated;
51524- atomic_t s_mb_discarded;
51525+ atomic_unchecked_t s_mb_lost_chunks;
51526+ atomic_unchecked_t s_mb_preallocated;
51527+ atomic_unchecked_t s_mb_discarded;
51528 atomic_t s_lock_busy;
51529
51530 /* locality groups */
51531diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
51532index def8408..8253d11 100644
51533--- a/fs/ext4/mballoc.c
51534+++ b/fs/ext4/mballoc.c
51535@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
51536 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
51537
51538 if (EXT4_SB(sb)->s_mb_stats)
51539- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
51540+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
51541
51542 break;
51543 }
51544@@ -2170,7 +2170,7 @@ repeat:
51545 ac->ac_status = AC_STATUS_CONTINUE;
51546 ac->ac_flags |= EXT4_MB_HINT_FIRST;
51547 cr = 3;
51548- atomic_inc(&sbi->s_mb_lost_chunks);
51549+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
51550 goto repeat;
51551 }
51552 }
51553@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
51554 if (sbi->s_mb_stats) {
51555 ext4_msg(sb, KERN_INFO,
51556 "mballoc: %u blocks %u reqs (%u success)",
51557- atomic_read(&sbi->s_bal_allocated),
51558- atomic_read(&sbi->s_bal_reqs),
51559- atomic_read(&sbi->s_bal_success));
51560+ atomic_read_unchecked(&sbi->s_bal_allocated),
51561+ atomic_read_unchecked(&sbi->s_bal_reqs),
51562+ atomic_read_unchecked(&sbi->s_bal_success));
51563 ext4_msg(sb, KERN_INFO,
51564 "mballoc: %u extents scanned, %u goal hits, "
51565 "%u 2^N hits, %u breaks, %u lost",
51566- atomic_read(&sbi->s_bal_ex_scanned),
51567- atomic_read(&sbi->s_bal_goals),
51568- atomic_read(&sbi->s_bal_2orders),
51569- atomic_read(&sbi->s_bal_breaks),
51570- atomic_read(&sbi->s_mb_lost_chunks));
51571+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51572+ atomic_read_unchecked(&sbi->s_bal_goals),
51573+ atomic_read_unchecked(&sbi->s_bal_2orders),
51574+ atomic_read_unchecked(&sbi->s_bal_breaks),
51575+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51576 ext4_msg(sb, KERN_INFO,
51577 "mballoc: %lu generated and it took %Lu",
51578 sbi->s_mb_buddies_generated,
51579 sbi->s_mb_generation_time);
51580 ext4_msg(sb, KERN_INFO,
51581 "mballoc: %u preallocated, %u discarded",
51582- atomic_read(&sbi->s_mb_preallocated),
51583- atomic_read(&sbi->s_mb_discarded));
51584+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51585+ atomic_read_unchecked(&sbi->s_mb_discarded));
51586 }
51587
51588 free_percpu(sbi->s_locality_groups);
51589@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51590 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51591
51592 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51593- atomic_inc(&sbi->s_bal_reqs);
51594- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51595+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51596+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51597 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51598- atomic_inc(&sbi->s_bal_success);
51599- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51600+ atomic_inc_unchecked(&sbi->s_bal_success);
51601+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51602 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51603 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51604- atomic_inc(&sbi->s_bal_goals);
51605+ atomic_inc_unchecked(&sbi->s_bal_goals);
51606 if (ac->ac_found > sbi->s_mb_max_to_scan)
51607- atomic_inc(&sbi->s_bal_breaks);
51608+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51609 }
51610
51611 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51612@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51613 trace_ext4_mb_new_inode_pa(ac, pa);
51614
51615 ext4_mb_use_inode_pa(ac, pa);
51616- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51617+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51618
51619 ei = EXT4_I(ac->ac_inode);
51620 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51621@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51622 trace_ext4_mb_new_group_pa(ac, pa);
51623
51624 ext4_mb_use_group_pa(ac, pa);
51625- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51626+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51627
51628 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51629 lg = ac->ac_lg;
51630@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51631 * from the bitmap and continue.
51632 */
51633 }
51634- atomic_add(free, &sbi->s_mb_discarded);
51635+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51636
51637 return err;
51638 }
51639@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51640 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51641 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51642 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51643- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51644+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51645 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51646
51647 return 0;
51648diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
51649index 214461e..3614c89 100644
51650--- a/fs/ext4/mmp.c
51651+++ b/fs/ext4/mmp.c
51652@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
51653 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
51654 const char *function, unsigned int line, const char *msg)
51655 {
51656- __ext4_warning(sb, function, line, msg);
51657+ __ext4_warning(sb, function, line, "%s", msg);
51658 __ext4_warning(sb, function, line,
51659 "MMP failure info: last update time: %llu, last update "
51660 "node: %s, last update device: %s\n",
51661diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
51662index b27c96d..6ed4df2 100644
51663--- a/fs/ext4/resize.c
51664+++ b/fs/ext4/resize.c
51665@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
51666 ext4_fsblk_t end = start + input->blocks_count;
51667 ext4_group_t group = input->group;
51668 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
51669- unsigned overhead = ext4_group_overhead_blocks(sb, group);
51670- ext4_fsblk_t metaend = start + overhead;
51671+ unsigned overhead;
51672+ ext4_fsblk_t metaend;
51673 struct buffer_head *bh = NULL;
51674 ext4_grpblk_t free_blocks_count, offset;
51675 int err = -EINVAL;
51676
51677+ if (group != sbi->s_groups_count) {
51678+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
51679+ input->group, sbi->s_groups_count);
51680+ return -EINVAL;
51681+ }
51682+
51683+ overhead = ext4_group_overhead_blocks(sb, group);
51684+ metaend = start + overhead;
51685 input->free_blocks_count = free_blocks_count =
51686 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
51687
51688@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
51689 free_blocks_count, input->reserved_blocks);
51690
51691 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
51692- if (group != sbi->s_groups_count)
51693- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
51694- input->group, sbi->s_groups_count);
51695- else if (offset != 0)
51696+ if (offset != 0)
51697 ext4_warning(sb, "Last group not full");
51698 else if (input->reserved_blocks > input->blocks_count / 5)
51699 ext4_warning(sb, "Reserved blocks too high (%u)",
51700diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51701index 94cc84d..2490974 100644
51702--- a/fs/ext4/super.c
51703+++ b/fs/ext4/super.c
51704@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
51705 }
51706
51707 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
51708-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
51709+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
51710 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
51711
51712 #ifdef CONFIG_QUOTA
51713@@ -2378,7 +2378,7 @@ struct ext4_attr {
51714 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51715 const char *, size_t);
51716 int offset;
51717-};
51718+} __do_const;
51719
51720 static int parse_strtoull(const char *buf,
51721 unsigned long long max, unsigned long long *value)
51722diff --git a/fs/fcntl.c b/fs/fcntl.c
51723index 6599222..e7bf0de 100644
51724--- a/fs/fcntl.c
51725+++ b/fs/fcntl.c
51726@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51727 if (err)
51728 return err;
51729
51730+ if (gr_handle_chroot_fowner(pid, type))
51731+ return -ENOENT;
51732+ if (gr_check_protected_task_fowner(pid, type))
51733+ return -EACCES;
51734+
51735 f_modown(filp, pid, type, force);
51736 return 0;
51737 }
51738diff --git a/fs/fhandle.c b/fs/fhandle.c
51739index 999ff5c..41f4109 100644
51740--- a/fs/fhandle.c
51741+++ b/fs/fhandle.c
51742@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51743 } else
51744 retval = 0;
51745 /* copy the mount id */
51746- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51747- sizeof(*mnt_id)) ||
51748+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51749 copy_to_user(ufh, handle,
51750 sizeof(struct file_handle) + handle_bytes))
51751 retval = -EFAULT;
51752diff --git a/fs/file.c b/fs/file.c
51753index 4a78f98..9447397 100644
51754--- a/fs/file.c
51755+++ b/fs/file.c
51756@@ -16,6 +16,7 @@
51757 #include <linux/slab.h>
51758 #include <linux/vmalloc.h>
51759 #include <linux/file.h>
51760+#include <linux/security.h>
51761 #include <linux/fdtable.h>
51762 #include <linux/bitops.h>
51763 #include <linux/interrupt.h>
51764@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51765 if (!file)
51766 return __close_fd(files, fd);
51767
51768+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51769 if (fd >= rlimit(RLIMIT_NOFILE))
51770 return -EBADF;
51771
51772@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51773 if (unlikely(oldfd == newfd))
51774 return -EINVAL;
51775
51776+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51777 if (newfd >= rlimit(RLIMIT_NOFILE))
51778 return -EBADF;
51779
51780@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51781 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51782 {
51783 int err;
51784+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51785 if (from >= rlimit(RLIMIT_NOFILE))
51786 return -EINVAL;
51787 err = alloc_fd(from, flags);
51788diff --git a/fs/filesystems.c b/fs/filesystems.c
51789index 92567d9..fcd8cbf 100644
51790--- a/fs/filesystems.c
51791+++ b/fs/filesystems.c
51792@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
51793 int len = dot ? dot - name : strlen(name);
51794
51795 fs = __get_fs_type(name, len);
51796+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51797+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
51798+#else
51799 if (!fs && (request_module("fs-%.*s", len, name) == 0))
51800+#endif
51801 fs = __get_fs_type(name, len);
51802
51803 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51804diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51805index d8ac61d..79a36f0 100644
51806--- a/fs/fs_struct.c
51807+++ b/fs/fs_struct.c
51808@@ -4,6 +4,7 @@
51809 #include <linux/path.h>
51810 #include <linux/slab.h>
51811 #include <linux/fs_struct.h>
51812+#include <linux/grsecurity.h>
51813 #include "internal.h"
51814
51815 /*
51816@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
51817 write_seqcount_begin(&fs->seq);
51818 old_root = fs->root;
51819 fs->root = *path;
51820+ gr_set_chroot_entries(current, path);
51821 write_seqcount_end(&fs->seq);
51822 spin_unlock(&fs->lock);
51823 if (old_root.dentry)
51824@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
51825 int hits = 0;
51826 spin_lock(&fs->lock);
51827 write_seqcount_begin(&fs->seq);
51828+ /* this root replacement is only done by pivot_root,
51829+ leave grsec's chroot tagging alone for this task
51830+ so that a pivoted root isn't treated as a chroot
51831+ */
51832 hits += replace_path(&fs->root, old_root, new_root);
51833 hits += replace_path(&fs->pwd, old_root, new_root);
51834 write_seqcount_end(&fs->seq);
51835@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
51836 task_lock(tsk);
51837 spin_lock(&fs->lock);
51838 tsk->fs = NULL;
51839- kill = !--fs->users;
51840+ gr_clear_chroot_entries(tsk);
51841+ kill = !atomic_dec_return(&fs->users);
51842 spin_unlock(&fs->lock);
51843 task_unlock(tsk);
51844 if (kill)
51845@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51846 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51847 /* We don't need to lock fs - think why ;-) */
51848 if (fs) {
51849- fs->users = 1;
51850+ atomic_set(&fs->users, 1);
51851 fs->in_exec = 0;
51852 spin_lock_init(&fs->lock);
51853 seqcount_init(&fs->seq);
51854@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51855 spin_lock(&old->lock);
51856 fs->root = old->root;
51857 path_get(&fs->root);
51858+ /* instead of calling gr_set_chroot_entries here,
51859+ we call it from every caller of this function
51860+ */
51861 fs->pwd = old->pwd;
51862 path_get(&fs->pwd);
51863 spin_unlock(&old->lock);
51864@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
51865
51866 task_lock(current);
51867 spin_lock(&fs->lock);
51868- kill = !--fs->users;
51869+ kill = !atomic_dec_return(&fs->users);
51870 current->fs = new_fs;
51871+ gr_set_chroot_entries(current, &new_fs->root);
51872 spin_unlock(&fs->lock);
51873 task_unlock(current);
51874
51875@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51876
51877 int current_umask(void)
51878 {
51879- return current->fs->umask;
51880+ return current->fs->umask | gr_acl_umask();
51881 }
51882 EXPORT_SYMBOL(current_umask);
51883
51884 /* to be mentioned only in INIT_TASK */
51885 struct fs_struct init_fs = {
51886- .users = 1,
51887+ .users = ATOMIC_INIT(1),
51888 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51889 .seq = SEQCNT_ZERO,
51890 .umask = 0022,
51891diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51892index e2cba1f..17a25bb 100644
51893--- a/fs/fscache/cookie.c
51894+++ b/fs/fscache/cookie.c
51895@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51896 parent ? (char *) parent->def->name : "<no-parent>",
51897 def->name, netfs_data);
51898
51899- fscache_stat(&fscache_n_acquires);
51900+ fscache_stat_unchecked(&fscache_n_acquires);
51901
51902 /* if there's no parent cookie, then we don't create one here either */
51903 if (!parent) {
51904- fscache_stat(&fscache_n_acquires_null);
51905+ fscache_stat_unchecked(&fscache_n_acquires_null);
51906 _leave(" [no parent]");
51907 return NULL;
51908 }
51909@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51910 /* allocate and initialise a cookie */
51911 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51912 if (!cookie) {
51913- fscache_stat(&fscache_n_acquires_oom);
51914+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51915 _leave(" [ENOMEM]");
51916 return NULL;
51917 }
51918@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51919
51920 switch (cookie->def->type) {
51921 case FSCACHE_COOKIE_TYPE_INDEX:
51922- fscache_stat(&fscache_n_cookie_index);
51923+ fscache_stat_unchecked(&fscache_n_cookie_index);
51924 break;
51925 case FSCACHE_COOKIE_TYPE_DATAFILE:
51926- fscache_stat(&fscache_n_cookie_data);
51927+ fscache_stat_unchecked(&fscache_n_cookie_data);
51928 break;
51929 default:
51930- fscache_stat(&fscache_n_cookie_special);
51931+ fscache_stat_unchecked(&fscache_n_cookie_special);
51932 break;
51933 }
51934
51935@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51936 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51937 atomic_dec(&parent->n_children);
51938 __fscache_cookie_put(cookie);
51939- fscache_stat(&fscache_n_acquires_nobufs);
51940+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51941 _leave(" = NULL");
51942 return NULL;
51943 }
51944 }
51945
51946- fscache_stat(&fscache_n_acquires_ok);
51947+ fscache_stat_unchecked(&fscache_n_acquires_ok);
51948 _leave(" = %p", cookie);
51949 return cookie;
51950 }
51951@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
51952 cache = fscache_select_cache_for_object(cookie->parent);
51953 if (!cache) {
51954 up_read(&fscache_addremove_sem);
51955- fscache_stat(&fscache_n_acquires_no_cache);
51956+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
51957 _leave(" = -ENOMEDIUM [no cache]");
51958 return -ENOMEDIUM;
51959 }
51960@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
51961 object = cache->ops->alloc_object(cache, cookie);
51962 fscache_stat_d(&fscache_n_cop_alloc_object);
51963 if (IS_ERR(object)) {
51964- fscache_stat(&fscache_n_object_no_alloc);
51965+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
51966 ret = PTR_ERR(object);
51967 goto error;
51968 }
51969
51970- fscache_stat(&fscache_n_object_alloc);
51971+ fscache_stat_unchecked(&fscache_n_object_alloc);
51972
51973 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
51974
51975@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
51976
51977 _enter("{%s}", cookie->def->name);
51978
51979- fscache_stat(&fscache_n_invalidates);
51980+ fscache_stat_unchecked(&fscache_n_invalidates);
51981
51982 /* Only permit invalidation of data files. Invalidating an index will
51983 * require the caller to release all its attachments to the tree rooted
51984@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
51985 {
51986 struct fscache_object *object;
51987
51988- fscache_stat(&fscache_n_updates);
51989+ fscache_stat_unchecked(&fscache_n_updates);
51990
51991 if (!cookie) {
51992- fscache_stat(&fscache_n_updates_null);
51993+ fscache_stat_unchecked(&fscache_n_updates_null);
51994 _leave(" [no cookie]");
51995 return;
51996 }
51997@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
51998 struct fscache_object *object;
51999 unsigned long event;
52000
52001- fscache_stat(&fscache_n_relinquishes);
52002+ fscache_stat_unchecked(&fscache_n_relinquishes);
52003 if (retire)
52004- fscache_stat(&fscache_n_relinquishes_retire);
52005+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
52006
52007 if (!cookie) {
52008- fscache_stat(&fscache_n_relinquishes_null);
52009+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
52010 _leave(" [no cookie]");
52011 return;
52012 }
52013@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
52014
52015 /* wait for the cookie to finish being instantiated (or to fail) */
52016 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
52017- fscache_stat(&fscache_n_relinquishes_waitcrt);
52018+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
52019 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
52020 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
52021 }
52022diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
52023index ee38fef..0a326d4 100644
52024--- a/fs/fscache/internal.h
52025+++ b/fs/fscache/internal.h
52026@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
52027 * stats.c
52028 */
52029 #ifdef CONFIG_FSCACHE_STATS
52030-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52031-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52032+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52033+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52034
52035-extern atomic_t fscache_n_op_pend;
52036-extern atomic_t fscache_n_op_run;
52037-extern atomic_t fscache_n_op_enqueue;
52038-extern atomic_t fscache_n_op_deferred_release;
52039-extern atomic_t fscache_n_op_release;
52040-extern atomic_t fscache_n_op_gc;
52041-extern atomic_t fscache_n_op_cancelled;
52042-extern atomic_t fscache_n_op_rejected;
52043+extern atomic_unchecked_t fscache_n_op_pend;
52044+extern atomic_unchecked_t fscache_n_op_run;
52045+extern atomic_unchecked_t fscache_n_op_enqueue;
52046+extern atomic_unchecked_t fscache_n_op_deferred_release;
52047+extern atomic_unchecked_t fscache_n_op_release;
52048+extern atomic_unchecked_t fscache_n_op_gc;
52049+extern atomic_unchecked_t fscache_n_op_cancelled;
52050+extern atomic_unchecked_t fscache_n_op_rejected;
52051
52052-extern atomic_t fscache_n_attr_changed;
52053-extern atomic_t fscache_n_attr_changed_ok;
52054-extern atomic_t fscache_n_attr_changed_nobufs;
52055-extern atomic_t fscache_n_attr_changed_nomem;
52056-extern atomic_t fscache_n_attr_changed_calls;
52057+extern atomic_unchecked_t fscache_n_attr_changed;
52058+extern atomic_unchecked_t fscache_n_attr_changed_ok;
52059+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
52060+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
52061+extern atomic_unchecked_t fscache_n_attr_changed_calls;
52062
52063-extern atomic_t fscache_n_allocs;
52064-extern atomic_t fscache_n_allocs_ok;
52065-extern atomic_t fscache_n_allocs_wait;
52066-extern atomic_t fscache_n_allocs_nobufs;
52067-extern atomic_t fscache_n_allocs_intr;
52068-extern atomic_t fscache_n_allocs_object_dead;
52069-extern atomic_t fscache_n_alloc_ops;
52070-extern atomic_t fscache_n_alloc_op_waits;
52071+extern atomic_unchecked_t fscache_n_allocs;
52072+extern atomic_unchecked_t fscache_n_allocs_ok;
52073+extern atomic_unchecked_t fscache_n_allocs_wait;
52074+extern atomic_unchecked_t fscache_n_allocs_nobufs;
52075+extern atomic_unchecked_t fscache_n_allocs_intr;
52076+extern atomic_unchecked_t fscache_n_allocs_object_dead;
52077+extern atomic_unchecked_t fscache_n_alloc_ops;
52078+extern atomic_unchecked_t fscache_n_alloc_op_waits;
52079
52080-extern atomic_t fscache_n_retrievals;
52081-extern atomic_t fscache_n_retrievals_ok;
52082-extern atomic_t fscache_n_retrievals_wait;
52083-extern atomic_t fscache_n_retrievals_nodata;
52084-extern atomic_t fscache_n_retrievals_nobufs;
52085-extern atomic_t fscache_n_retrievals_intr;
52086-extern atomic_t fscache_n_retrievals_nomem;
52087-extern atomic_t fscache_n_retrievals_object_dead;
52088-extern atomic_t fscache_n_retrieval_ops;
52089-extern atomic_t fscache_n_retrieval_op_waits;
52090+extern atomic_unchecked_t fscache_n_retrievals;
52091+extern atomic_unchecked_t fscache_n_retrievals_ok;
52092+extern atomic_unchecked_t fscache_n_retrievals_wait;
52093+extern atomic_unchecked_t fscache_n_retrievals_nodata;
52094+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
52095+extern atomic_unchecked_t fscache_n_retrievals_intr;
52096+extern atomic_unchecked_t fscache_n_retrievals_nomem;
52097+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
52098+extern atomic_unchecked_t fscache_n_retrieval_ops;
52099+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
52100
52101-extern atomic_t fscache_n_stores;
52102-extern atomic_t fscache_n_stores_ok;
52103-extern atomic_t fscache_n_stores_again;
52104-extern atomic_t fscache_n_stores_nobufs;
52105-extern atomic_t fscache_n_stores_oom;
52106-extern atomic_t fscache_n_store_ops;
52107-extern atomic_t fscache_n_store_calls;
52108-extern atomic_t fscache_n_store_pages;
52109-extern atomic_t fscache_n_store_radix_deletes;
52110-extern atomic_t fscache_n_store_pages_over_limit;
52111+extern atomic_unchecked_t fscache_n_stores;
52112+extern atomic_unchecked_t fscache_n_stores_ok;
52113+extern atomic_unchecked_t fscache_n_stores_again;
52114+extern atomic_unchecked_t fscache_n_stores_nobufs;
52115+extern atomic_unchecked_t fscache_n_stores_oom;
52116+extern atomic_unchecked_t fscache_n_store_ops;
52117+extern atomic_unchecked_t fscache_n_store_calls;
52118+extern atomic_unchecked_t fscache_n_store_pages;
52119+extern atomic_unchecked_t fscache_n_store_radix_deletes;
52120+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
52121
52122-extern atomic_t fscache_n_store_vmscan_not_storing;
52123-extern atomic_t fscache_n_store_vmscan_gone;
52124-extern atomic_t fscache_n_store_vmscan_busy;
52125-extern atomic_t fscache_n_store_vmscan_cancelled;
52126-extern atomic_t fscache_n_store_vmscan_wait;
52127+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52128+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
52129+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
52130+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52131+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
52132
52133-extern atomic_t fscache_n_marks;
52134-extern atomic_t fscache_n_uncaches;
52135+extern atomic_unchecked_t fscache_n_marks;
52136+extern atomic_unchecked_t fscache_n_uncaches;
52137
52138-extern atomic_t fscache_n_acquires;
52139-extern atomic_t fscache_n_acquires_null;
52140-extern atomic_t fscache_n_acquires_no_cache;
52141-extern atomic_t fscache_n_acquires_ok;
52142-extern atomic_t fscache_n_acquires_nobufs;
52143-extern atomic_t fscache_n_acquires_oom;
52144+extern atomic_unchecked_t fscache_n_acquires;
52145+extern atomic_unchecked_t fscache_n_acquires_null;
52146+extern atomic_unchecked_t fscache_n_acquires_no_cache;
52147+extern atomic_unchecked_t fscache_n_acquires_ok;
52148+extern atomic_unchecked_t fscache_n_acquires_nobufs;
52149+extern atomic_unchecked_t fscache_n_acquires_oom;
52150
52151-extern atomic_t fscache_n_invalidates;
52152-extern atomic_t fscache_n_invalidates_run;
52153+extern atomic_unchecked_t fscache_n_invalidates;
52154+extern atomic_unchecked_t fscache_n_invalidates_run;
52155
52156-extern atomic_t fscache_n_updates;
52157-extern atomic_t fscache_n_updates_null;
52158-extern atomic_t fscache_n_updates_run;
52159+extern atomic_unchecked_t fscache_n_updates;
52160+extern atomic_unchecked_t fscache_n_updates_null;
52161+extern atomic_unchecked_t fscache_n_updates_run;
52162
52163-extern atomic_t fscache_n_relinquishes;
52164-extern atomic_t fscache_n_relinquishes_null;
52165-extern atomic_t fscache_n_relinquishes_waitcrt;
52166-extern atomic_t fscache_n_relinquishes_retire;
52167+extern atomic_unchecked_t fscache_n_relinquishes;
52168+extern atomic_unchecked_t fscache_n_relinquishes_null;
52169+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52170+extern atomic_unchecked_t fscache_n_relinquishes_retire;
52171
52172-extern atomic_t fscache_n_cookie_index;
52173-extern atomic_t fscache_n_cookie_data;
52174-extern atomic_t fscache_n_cookie_special;
52175+extern atomic_unchecked_t fscache_n_cookie_index;
52176+extern atomic_unchecked_t fscache_n_cookie_data;
52177+extern atomic_unchecked_t fscache_n_cookie_special;
52178
52179-extern atomic_t fscache_n_object_alloc;
52180-extern atomic_t fscache_n_object_no_alloc;
52181-extern atomic_t fscache_n_object_lookups;
52182-extern atomic_t fscache_n_object_lookups_negative;
52183-extern atomic_t fscache_n_object_lookups_positive;
52184-extern atomic_t fscache_n_object_lookups_timed_out;
52185-extern atomic_t fscache_n_object_created;
52186-extern atomic_t fscache_n_object_avail;
52187-extern atomic_t fscache_n_object_dead;
52188+extern atomic_unchecked_t fscache_n_object_alloc;
52189+extern atomic_unchecked_t fscache_n_object_no_alloc;
52190+extern atomic_unchecked_t fscache_n_object_lookups;
52191+extern atomic_unchecked_t fscache_n_object_lookups_negative;
52192+extern atomic_unchecked_t fscache_n_object_lookups_positive;
52193+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
52194+extern atomic_unchecked_t fscache_n_object_created;
52195+extern atomic_unchecked_t fscache_n_object_avail;
52196+extern atomic_unchecked_t fscache_n_object_dead;
52197
52198-extern atomic_t fscache_n_checkaux_none;
52199-extern atomic_t fscache_n_checkaux_okay;
52200-extern atomic_t fscache_n_checkaux_update;
52201-extern atomic_t fscache_n_checkaux_obsolete;
52202+extern atomic_unchecked_t fscache_n_checkaux_none;
52203+extern atomic_unchecked_t fscache_n_checkaux_okay;
52204+extern atomic_unchecked_t fscache_n_checkaux_update;
52205+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
52206
52207 extern atomic_t fscache_n_cop_alloc_object;
52208 extern atomic_t fscache_n_cop_lookup_object;
52209@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
52210 atomic_inc(stat);
52211 }
52212
52213+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
52214+{
52215+ atomic_inc_unchecked(stat);
52216+}
52217+
52218 static inline void fscache_stat_d(atomic_t *stat)
52219 {
52220 atomic_dec(stat);
52221@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
52222
52223 #define __fscache_stat(stat) (NULL)
52224 #define fscache_stat(stat) do {} while (0)
52225+#define fscache_stat_unchecked(stat) do {} while (0)
52226 #define fscache_stat_d(stat) do {} while (0)
52227 #endif
52228
52229diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52230index 50d41c1..10ee117 100644
52231--- a/fs/fscache/object.c
52232+++ b/fs/fscache/object.c
52233@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52234 /* Invalidate an object on disk */
52235 case FSCACHE_OBJECT_INVALIDATING:
52236 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52237- fscache_stat(&fscache_n_invalidates_run);
52238+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52239 fscache_stat(&fscache_n_cop_invalidate_object);
52240 fscache_invalidate_object(object);
52241 fscache_stat_d(&fscache_n_cop_invalidate_object);
52242@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52243 /* update the object metadata on disk */
52244 case FSCACHE_OBJECT_UPDATING:
52245 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52246- fscache_stat(&fscache_n_updates_run);
52247+ fscache_stat_unchecked(&fscache_n_updates_run);
52248 fscache_stat(&fscache_n_cop_update_object);
52249 object->cache->ops->update_object(object);
52250 fscache_stat_d(&fscache_n_cop_update_object);
52251@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52252 spin_lock(&object->lock);
52253 object->state = FSCACHE_OBJECT_DEAD;
52254 spin_unlock(&object->lock);
52255- fscache_stat(&fscache_n_object_dead);
52256+ fscache_stat_unchecked(&fscache_n_object_dead);
52257 goto terminal_transit;
52258
52259 /* handle the parent cache of this object being withdrawn from
52260@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52261 spin_lock(&object->lock);
52262 object->state = FSCACHE_OBJECT_DEAD;
52263 spin_unlock(&object->lock);
52264- fscache_stat(&fscache_n_object_dead);
52265+ fscache_stat_unchecked(&fscache_n_object_dead);
52266 goto terminal_transit;
52267
52268 /* complain about the object being woken up once it is
52269@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52270 parent->cookie->def->name, cookie->def->name,
52271 object->cache->tag->name);
52272
52273- fscache_stat(&fscache_n_object_lookups);
52274+ fscache_stat_unchecked(&fscache_n_object_lookups);
52275 fscache_stat(&fscache_n_cop_lookup_object);
52276 ret = object->cache->ops->lookup_object(object);
52277 fscache_stat_d(&fscache_n_cop_lookup_object);
52278@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52279 if (ret == -ETIMEDOUT) {
52280 /* probably stuck behind another object, so move this one to
52281 * the back of the queue */
52282- fscache_stat(&fscache_n_object_lookups_timed_out);
52283+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
52284 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52285 }
52286
52287@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
52288
52289 spin_lock(&object->lock);
52290 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52291- fscache_stat(&fscache_n_object_lookups_negative);
52292+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
52293
52294 /* transit here to allow write requests to begin stacking up
52295 * and read requests to begin returning ENODATA */
52296@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
52297 * result, in which case there may be data available */
52298 spin_lock(&object->lock);
52299 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52300- fscache_stat(&fscache_n_object_lookups_positive);
52301+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
52302
52303 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
52304
52305@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
52306 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52307 } else {
52308 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
52309- fscache_stat(&fscache_n_object_created);
52310+ fscache_stat_unchecked(&fscache_n_object_created);
52311
52312 object->state = FSCACHE_OBJECT_AVAILABLE;
52313 spin_unlock(&object->lock);
52314@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
52315 fscache_enqueue_dependents(object);
52316
52317 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
52318- fscache_stat(&fscache_n_object_avail);
52319+ fscache_stat_unchecked(&fscache_n_object_avail);
52320
52321 _leave("");
52322 }
52323@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52324 enum fscache_checkaux result;
52325
52326 if (!object->cookie->def->check_aux) {
52327- fscache_stat(&fscache_n_checkaux_none);
52328+ fscache_stat_unchecked(&fscache_n_checkaux_none);
52329 return FSCACHE_CHECKAUX_OKAY;
52330 }
52331
52332@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52333 switch (result) {
52334 /* entry okay as is */
52335 case FSCACHE_CHECKAUX_OKAY:
52336- fscache_stat(&fscache_n_checkaux_okay);
52337+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
52338 break;
52339
52340 /* entry requires update */
52341 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
52342- fscache_stat(&fscache_n_checkaux_update);
52343+ fscache_stat_unchecked(&fscache_n_checkaux_update);
52344 break;
52345
52346 /* entry requires deletion */
52347 case FSCACHE_CHECKAUX_OBSOLETE:
52348- fscache_stat(&fscache_n_checkaux_obsolete);
52349+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
52350 break;
52351
52352 default:
52353diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
52354index 762a9ec..2023284 100644
52355--- a/fs/fscache/operation.c
52356+++ b/fs/fscache/operation.c
52357@@ -17,7 +17,7 @@
52358 #include <linux/slab.h>
52359 #include "internal.h"
52360
52361-atomic_t fscache_op_debug_id;
52362+atomic_unchecked_t fscache_op_debug_id;
52363 EXPORT_SYMBOL(fscache_op_debug_id);
52364
52365 /**
52366@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
52367 ASSERTCMP(atomic_read(&op->usage), >, 0);
52368 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
52369
52370- fscache_stat(&fscache_n_op_enqueue);
52371+ fscache_stat_unchecked(&fscache_n_op_enqueue);
52372 switch (op->flags & FSCACHE_OP_TYPE) {
52373 case FSCACHE_OP_ASYNC:
52374 _debug("queue async");
52375@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
52376 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
52377 if (op->processor)
52378 fscache_enqueue_operation(op);
52379- fscache_stat(&fscache_n_op_run);
52380+ fscache_stat_unchecked(&fscache_n_op_run);
52381 }
52382
52383 /*
52384@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52385 if (object->n_in_progress > 0) {
52386 atomic_inc(&op->usage);
52387 list_add_tail(&op->pend_link, &object->pending_ops);
52388- fscache_stat(&fscache_n_op_pend);
52389+ fscache_stat_unchecked(&fscache_n_op_pend);
52390 } else if (!list_empty(&object->pending_ops)) {
52391 atomic_inc(&op->usage);
52392 list_add_tail(&op->pend_link, &object->pending_ops);
52393- fscache_stat(&fscache_n_op_pend);
52394+ fscache_stat_unchecked(&fscache_n_op_pend);
52395 fscache_start_operations(object);
52396 } else {
52397 ASSERTCMP(object->n_in_progress, ==, 0);
52398@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52399 object->n_exclusive++; /* reads and writes must wait */
52400 atomic_inc(&op->usage);
52401 list_add_tail(&op->pend_link, &object->pending_ops);
52402- fscache_stat(&fscache_n_op_pend);
52403+ fscache_stat_unchecked(&fscache_n_op_pend);
52404 ret = 0;
52405 } else {
52406 /* If we're in any other state, there must have been an I/O
52407@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
52408 if (object->n_exclusive > 0) {
52409 atomic_inc(&op->usage);
52410 list_add_tail(&op->pend_link, &object->pending_ops);
52411- fscache_stat(&fscache_n_op_pend);
52412+ fscache_stat_unchecked(&fscache_n_op_pend);
52413 } else if (!list_empty(&object->pending_ops)) {
52414 atomic_inc(&op->usage);
52415 list_add_tail(&op->pend_link, &object->pending_ops);
52416- fscache_stat(&fscache_n_op_pend);
52417+ fscache_stat_unchecked(&fscache_n_op_pend);
52418 fscache_start_operations(object);
52419 } else {
52420 ASSERTCMP(object->n_exclusive, ==, 0);
52421@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
52422 object->n_ops++;
52423 atomic_inc(&op->usage);
52424 list_add_tail(&op->pend_link, &object->pending_ops);
52425- fscache_stat(&fscache_n_op_pend);
52426+ fscache_stat_unchecked(&fscache_n_op_pend);
52427 ret = 0;
52428 } else if (object->state == FSCACHE_OBJECT_DYING ||
52429 object->state == FSCACHE_OBJECT_LC_DYING ||
52430 object->state == FSCACHE_OBJECT_WITHDRAWING) {
52431- fscache_stat(&fscache_n_op_rejected);
52432+ fscache_stat_unchecked(&fscache_n_op_rejected);
52433 op->state = FSCACHE_OP_ST_CANCELLED;
52434 ret = -ENOBUFS;
52435 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
52436@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
52437 ret = -EBUSY;
52438 if (op->state == FSCACHE_OP_ST_PENDING) {
52439 ASSERT(!list_empty(&op->pend_link));
52440- fscache_stat(&fscache_n_op_cancelled);
52441+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52442 list_del_init(&op->pend_link);
52443 if (do_cancel)
52444 do_cancel(op);
52445@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
52446 while (!list_empty(&object->pending_ops)) {
52447 op = list_entry(object->pending_ops.next,
52448 struct fscache_operation, pend_link);
52449- fscache_stat(&fscache_n_op_cancelled);
52450+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52451 list_del_init(&op->pend_link);
52452
52453 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
52454@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
52455 op->state, ==, FSCACHE_OP_ST_CANCELLED);
52456 op->state = FSCACHE_OP_ST_DEAD;
52457
52458- fscache_stat(&fscache_n_op_release);
52459+ fscache_stat_unchecked(&fscache_n_op_release);
52460
52461 if (op->release) {
52462 op->release(op);
52463@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
52464 * lock, and defer it otherwise */
52465 if (!spin_trylock(&object->lock)) {
52466 _debug("defer put");
52467- fscache_stat(&fscache_n_op_deferred_release);
52468+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
52469
52470 cache = object->cache;
52471 spin_lock(&cache->op_gc_list_lock);
52472@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
52473
52474 _debug("GC DEFERRED REL OBJ%x OP%x",
52475 object->debug_id, op->debug_id);
52476- fscache_stat(&fscache_n_op_gc);
52477+ fscache_stat_unchecked(&fscache_n_op_gc);
52478
52479 ASSERTCMP(atomic_read(&op->usage), ==, 0);
52480 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
52481diff --git a/fs/fscache/page.c b/fs/fscache/page.c
52482index ff000e5..c44ec6d 100644
52483--- a/fs/fscache/page.c
52484+++ b/fs/fscache/page.c
52485@@ -61,7 +61,7 @@ try_again:
52486 val = radix_tree_lookup(&cookie->stores, page->index);
52487 if (!val) {
52488 rcu_read_unlock();
52489- fscache_stat(&fscache_n_store_vmscan_not_storing);
52490+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
52491 __fscache_uncache_page(cookie, page);
52492 return true;
52493 }
52494@@ -91,11 +91,11 @@ try_again:
52495 spin_unlock(&cookie->stores_lock);
52496
52497 if (xpage) {
52498- fscache_stat(&fscache_n_store_vmscan_cancelled);
52499- fscache_stat(&fscache_n_store_radix_deletes);
52500+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
52501+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52502 ASSERTCMP(xpage, ==, page);
52503 } else {
52504- fscache_stat(&fscache_n_store_vmscan_gone);
52505+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
52506 }
52507
52508 wake_up_bit(&cookie->flags, 0);
52509@@ -110,11 +110,11 @@ page_busy:
52510 * sleeping on memory allocation, so we may need to impose a timeout
52511 * too. */
52512 if (!(gfp & __GFP_WAIT)) {
52513- fscache_stat(&fscache_n_store_vmscan_busy);
52514+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
52515 return false;
52516 }
52517
52518- fscache_stat(&fscache_n_store_vmscan_wait);
52519+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52520 __fscache_wait_on_page_write(cookie, page);
52521 gfp &= ~__GFP_WAIT;
52522 goto try_again;
52523@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52524 FSCACHE_COOKIE_STORING_TAG);
52525 if (!radix_tree_tag_get(&cookie->stores, page->index,
52526 FSCACHE_COOKIE_PENDING_TAG)) {
52527- fscache_stat(&fscache_n_store_radix_deletes);
52528+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52529 xpage = radix_tree_delete(&cookie->stores, page->index);
52530 }
52531 spin_unlock(&cookie->stores_lock);
52532@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52533
52534 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52535
52536- fscache_stat(&fscache_n_attr_changed_calls);
52537+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52538
52539 if (fscache_object_is_active(object)) {
52540 fscache_stat(&fscache_n_cop_attr_changed);
52541@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52542
52543 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52544
52545- fscache_stat(&fscache_n_attr_changed);
52546+ fscache_stat_unchecked(&fscache_n_attr_changed);
52547
52548 op = kzalloc(sizeof(*op), GFP_KERNEL);
52549 if (!op) {
52550- fscache_stat(&fscache_n_attr_changed_nomem);
52551+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52552 _leave(" = -ENOMEM");
52553 return -ENOMEM;
52554 }
52555@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52556 if (fscache_submit_exclusive_op(object, op) < 0)
52557 goto nobufs;
52558 spin_unlock(&cookie->lock);
52559- fscache_stat(&fscache_n_attr_changed_ok);
52560+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52561 fscache_put_operation(op);
52562 _leave(" = 0");
52563 return 0;
52564@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52565 nobufs:
52566 spin_unlock(&cookie->lock);
52567 kfree(op);
52568- fscache_stat(&fscache_n_attr_changed_nobufs);
52569+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52570 _leave(" = %d", -ENOBUFS);
52571 return -ENOBUFS;
52572 }
52573@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52574 /* allocate a retrieval operation and attempt to submit it */
52575 op = kzalloc(sizeof(*op), GFP_NOIO);
52576 if (!op) {
52577- fscache_stat(&fscache_n_retrievals_nomem);
52578+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52579 return NULL;
52580 }
52581
52582@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52583 return 0;
52584 }
52585
52586- fscache_stat(&fscache_n_retrievals_wait);
52587+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52588
52589 jif = jiffies;
52590 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52591 fscache_wait_bit_interruptible,
52592 TASK_INTERRUPTIBLE) != 0) {
52593- fscache_stat(&fscache_n_retrievals_intr);
52594+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52595 _leave(" = -ERESTARTSYS");
52596 return -ERESTARTSYS;
52597 }
52598@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52599 */
52600 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52601 struct fscache_retrieval *op,
52602- atomic_t *stat_op_waits,
52603- atomic_t *stat_object_dead)
52604+ atomic_unchecked_t *stat_op_waits,
52605+ atomic_unchecked_t *stat_object_dead)
52606 {
52607 int ret;
52608
52609@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52610 goto check_if_dead;
52611
52612 _debug(">>> WT");
52613- fscache_stat(stat_op_waits);
52614+ fscache_stat_unchecked(stat_op_waits);
52615 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52616 fscache_wait_bit_interruptible,
52617 TASK_INTERRUPTIBLE) != 0) {
52618@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52619
52620 check_if_dead:
52621 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52622- fscache_stat(stat_object_dead);
52623+ fscache_stat_unchecked(stat_object_dead);
52624 _leave(" = -ENOBUFS [cancelled]");
52625 return -ENOBUFS;
52626 }
52627 if (unlikely(fscache_object_is_dead(object))) {
52628 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52629 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52630- fscache_stat(stat_object_dead);
52631+ fscache_stat_unchecked(stat_object_dead);
52632 return -ENOBUFS;
52633 }
52634 return 0;
52635@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52636
52637 _enter("%p,%p,,,", cookie, page);
52638
52639- fscache_stat(&fscache_n_retrievals);
52640+ fscache_stat_unchecked(&fscache_n_retrievals);
52641
52642 if (hlist_empty(&cookie->backing_objects))
52643 goto nobufs;
52644@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52645 goto nobufs_unlock_dec;
52646 spin_unlock(&cookie->lock);
52647
52648- fscache_stat(&fscache_n_retrieval_ops);
52649+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52650
52651 /* pin the netfs read context in case we need to do the actual netfs
52652 * read because we've encountered a cache read failure */
52653@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52654
52655 error:
52656 if (ret == -ENOMEM)
52657- fscache_stat(&fscache_n_retrievals_nomem);
52658+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52659 else if (ret == -ERESTARTSYS)
52660- fscache_stat(&fscache_n_retrievals_intr);
52661+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52662 else if (ret == -ENODATA)
52663- fscache_stat(&fscache_n_retrievals_nodata);
52664+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52665 else if (ret < 0)
52666- fscache_stat(&fscache_n_retrievals_nobufs);
52667+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52668 else
52669- fscache_stat(&fscache_n_retrievals_ok);
52670+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52671
52672 fscache_put_retrieval(op);
52673 _leave(" = %d", ret);
52674@@ -467,7 +467,7 @@ nobufs_unlock:
52675 spin_unlock(&cookie->lock);
52676 kfree(op);
52677 nobufs:
52678- fscache_stat(&fscache_n_retrievals_nobufs);
52679+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52680 _leave(" = -ENOBUFS");
52681 return -ENOBUFS;
52682 }
52683@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52684
52685 _enter("%p,,%d,,,", cookie, *nr_pages);
52686
52687- fscache_stat(&fscache_n_retrievals);
52688+ fscache_stat_unchecked(&fscache_n_retrievals);
52689
52690 if (hlist_empty(&cookie->backing_objects))
52691 goto nobufs;
52692@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52693 goto nobufs_unlock_dec;
52694 spin_unlock(&cookie->lock);
52695
52696- fscache_stat(&fscache_n_retrieval_ops);
52697+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52698
52699 /* pin the netfs read context in case we need to do the actual netfs
52700 * read because we've encountered a cache read failure */
52701@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52702
52703 error:
52704 if (ret == -ENOMEM)
52705- fscache_stat(&fscache_n_retrievals_nomem);
52706+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52707 else if (ret == -ERESTARTSYS)
52708- fscache_stat(&fscache_n_retrievals_intr);
52709+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52710 else if (ret == -ENODATA)
52711- fscache_stat(&fscache_n_retrievals_nodata);
52712+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52713 else if (ret < 0)
52714- fscache_stat(&fscache_n_retrievals_nobufs);
52715+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52716 else
52717- fscache_stat(&fscache_n_retrievals_ok);
52718+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52719
52720 fscache_put_retrieval(op);
52721 _leave(" = %d", ret);
52722@@ -591,7 +591,7 @@ nobufs_unlock:
52723 spin_unlock(&cookie->lock);
52724 kfree(op);
52725 nobufs:
52726- fscache_stat(&fscache_n_retrievals_nobufs);
52727+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52728 _leave(" = -ENOBUFS");
52729 return -ENOBUFS;
52730 }
52731@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52732
52733 _enter("%p,%p,,,", cookie, page);
52734
52735- fscache_stat(&fscache_n_allocs);
52736+ fscache_stat_unchecked(&fscache_n_allocs);
52737
52738 if (hlist_empty(&cookie->backing_objects))
52739 goto nobufs;
52740@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52741 goto nobufs_unlock;
52742 spin_unlock(&cookie->lock);
52743
52744- fscache_stat(&fscache_n_alloc_ops);
52745+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52746
52747 ret = fscache_wait_for_retrieval_activation(
52748 object, op,
52749@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52750
52751 error:
52752 if (ret == -ERESTARTSYS)
52753- fscache_stat(&fscache_n_allocs_intr);
52754+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52755 else if (ret < 0)
52756- fscache_stat(&fscache_n_allocs_nobufs);
52757+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52758 else
52759- fscache_stat(&fscache_n_allocs_ok);
52760+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52761
52762 fscache_put_retrieval(op);
52763 _leave(" = %d", ret);
52764@@ -677,7 +677,7 @@ nobufs_unlock:
52765 spin_unlock(&cookie->lock);
52766 kfree(op);
52767 nobufs:
52768- fscache_stat(&fscache_n_allocs_nobufs);
52769+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52770 _leave(" = -ENOBUFS");
52771 return -ENOBUFS;
52772 }
52773@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52774
52775 spin_lock(&cookie->stores_lock);
52776
52777- fscache_stat(&fscache_n_store_calls);
52778+ fscache_stat_unchecked(&fscache_n_store_calls);
52779
52780 /* find a page to store */
52781 page = NULL;
52782@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52783 page = results[0];
52784 _debug("gang %d [%lx]", n, page->index);
52785 if (page->index > op->store_limit) {
52786- fscache_stat(&fscache_n_store_pages_over_limit);
52787+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52788 goto superseded;
52789 }
52790
52791@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52792 spin_unlock(&cookie->stores_lock);
52793 spin_unlock(&object->lock);
52794
52795- fscache_stat(&fscache_n_store_pages);
52796+ fscache_stat_unchecked(&fscache_n_store_pages);
52797 fscache_stat(&fscache_n_cop_write_page);
52798 ret = object->cache->ops->write_page(op, page);
52799 fscache_stat_d(&fscache_n_cop_write_page);
52800@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52801 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52802 ASSERT(PageFsCache(page));
52803
52804- fscache_stat(&fscache_n_stores);
52805+ fscache_stat_unchecked(&fscache_n_stores);
52806
52807 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52808 _leave(" = -ENOBUFS [invalidating]");
52809@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52810 spin_unlock(&cookie->stores_lock);
52811 spin_unlock(&object->lock);
52812
52813- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52814+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52815 op->store_limit = object->store_limit;
52816
52817 if (fscache_submit_op(object, &op->op) < 0)
52818@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52819
52820 spin_unlock(&cookie->lock);
52821 radix_tree_preload_end();
52822- fscache_stat(&fscache_n_store_ops);
52823- fscache_stat(&fscache_n_stores_ok);
52824+ fscache_stat_unchecked(&fscache_n_store_ops);
52825+ fscache_stat_unchecked(&fscache_n_stores_ok);
52826
52827 /* the work queue now carries its own ref on the object */
52828 fscache_put_operation(&op->op);
52829@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52830 return 0;
52831
52832 already_queued:
52833- fscache_stat(&fscache_n_stores_again);
52834+ fscache_stat_unchecked(&fscache_n_stores_again);
52835 already_pending:
52836 spin_unlock(&cookie->stores_lock);
52837 spin_unlock(&object->lock);
52838 spin_unlock(&cookie->lock);
52839 radix_tree_preload_end();
52840 kfree(op);
52841- fscache_stat(&fscache_n_stores_ok);
52842+ fscache_stat_unchecked(&fscache_n_stores_ok);
52843 _leave(" = 0");
52844 return 0;
52845
52846@@ -959,14 +959,14 @@ nobufs:
52847 spin_unlock(&cookie->lock);
52848 radix_tree_preload_end();
52849 kfree(op);
52850- fscache_stat(&fscache_n_stores_nobufs);
52851+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52852 _leave(" = -ENOBUFS");
52853 return -ENOBUFS;
52854
52855 nomem_free:
52856 kfree(op);
52857 nomem:
52858- fscache_stat(&fscache_n_stores_oom);
52859+ fscache_stat_unchecked(&fscache_n_stores_oom);
52860 _leave(" = -ENOMEM");
52861 return -ENOMEM;
52862 }
52863@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52864 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52865 ASSERTCMP(page, !=, NULL);
52866
52867- fscache_stat(&fscache_n_uncaches);
52868+ fscache_stat_unchecked(&fscache_n_uncaches);
52869
52870 /* cache withdrawal may beat us to it */
52871 if (!PageFsCache(page))
52872@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52873 struct fscache_cookie *cookie = op->op.object->cookie;
52874
52875 #ifdef CONFIG_FSCACHE_STATS
52876- atomic_inc(&fscache_n_marks);
52877+ atomic_inc_unchecked(&fscache_n_marks);
52878 #endif
52879
52880 _debug("- mark %p{%lx}", page, page->index);
52881diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52882index 40d13c7..ddf52b9 100644
52883--- a/fs/fscache/stats.c
52884+++ b/fs/fscache/stats.c
52885@@ -18,99 +18,99 @@
52886 /*
52887 * operation counters
52888 */
52889-atomic_t fscache_n_op_pend;
52890-atomic_t fscache_n_op_run;
52891-atomic_t fscache_n_op_enqueue;
52892-atomic_t fscache_n_op_requeue;
52893-atomic_t fscache_n_op_deferred_release;
52894-atomic_t fscache_n_op_release;
52895-atomic_t fscache_n_op_gc;
52896-atomic_t fscache_n_op_cancelled;
52897-atomic_t fscache_n_op_rejected;
52898+atomic_unchecked_t fscache_n_op_pend;
52899+atomic_unchecked_t fscache_n_op_run;
52900+atomic_unchecked_t fscache_n_op_enqueue;
52901+atomic_unchecked_t fscache_n_op_requeue;
52902+atomic_unchecked_t fscache_n_op_deferred_release;
52903+atomic_unchecked_t fscache_n_op_release;
52904+atomic_unchecked_t fscache_n_op_gc;
52905+atomic_unchecked_t fscache_n_op_cancelled;
52906+atomic_unchecked_t fscache_n_op_rejected;
52907
52908-atomic_t fscache_n_attr_changed;
52909-atomic_t fscache_n_attr_changed_ok;
52910-atomic_t fscache_n_attr_changed_nobufs;
52911-atomic_t fscache_n_attr_changed_nomem;
52912-atomic_t fscache_n_attr_changed_calls;
52913+atomic_unchecked_t fscache_n_attr_changed;
52914+atomic_unchecked_t fscache_n_attr_changed_ok;
52915+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52916+atomic_unchecked_t fscache_n_attr_changed_nomem;
52917+atomic_unchecked_t fscache_n_attr_changed_calls;
52918
52919-atomic_t fscache_n_allocs;
52920-atomic_t fscache_n_allocs_ok;
52921-atomic_t fscache_n_allocs_wait;
52922-atomic_t fscache_n_allocs_nobufs;
52923-atomic_t fscache_n_allocs_intr;
52924-atomic_t fscache_n_allocs_object_dead;
52925-atomic_t fscache_n_alloc_ops;
52926-atomic_t fscache_n_alloc_op_waits;
52927+atomic_unchecked_t fscache_n_allocs;
52928+atomic_unchecked_t fscache_n_allocs_ok;
52929+atomic_unchecked_t fscache_n_allocs_wait;
52930+atomic_unchecked_t fscache_n_allocs_nobufs;
52931+atomic_unchecked_t fscache_n_allocs_intr;
52932+atomic_unchecked_t fscache_n_allocs_object_dead;
52933+atomic_unchecked_t fscache_n_alloc_ops;
52934+atomic_unchecked_t fscache_n_alloc_op_waits;
52935
52936-atomic_t fscache_n_retrievals;
52937-atomic_t fscache_n_retrievals_ok;
52938-atomic_t fscache_n_retrievals_wait;
52939-atomic_t fscache_n_retrievals_nodata;
52940-atomic_t fscache_n_retrievals_nobufs;
52941-atomic_t fscache_n_retrievals_intr;
52942-atomic_t fscache_n_retrievals_nomem;
52943-atomic_t fscache_n_retrievals_object_dead;
52944-atomic_t fscache_n_retrieval_ops;
52945-atomic_t fscache_n_retrieval_op_waits;
52946+atomic_unchecked_t fscache_n_retrievals;
52947+atomic_unchecked_t fscache_n_retrievals_ok;
52948+atomic_unchecked_t fscache_n_retrievals_wait;
52949+atomic_unchecked_t fscache_n_retrievals_nodata;
52950+atomic_unchecked_t fscache_n_retrievals_nobufs;
52951+atomic_unchecked_t fscache_n_retrievals_intr;
52952+atomic_unchecked_t fscache_n_retrievals_nomem;
52953+atomic_unchecked_t fscache_n_retrievals_object_dead;
52954+atomic_unchecked_t fscache_n_retrieval_ops;
52955+atomic_unchecked_t fscache_n_retrieval_op_waits;
52956
52957-atomic_t fscache_n_stores;
52958-atomic_t fscache_n_stores_ok;
52959-atomic_t fscache_n_stores_again;
52960-atomic_t fscache_n_stores_nobufs;
52961-atomic_t fscache_n_stores_oom;
52962-atomic_t fscache_n_store_ops;
52963-atomic_t fscache_n_store_calls;
52964-atomic_t fscache_n_store_pages;
52965-atomic_t fscache_n_store_radix_deletes;
52966-atomic_t fscache_n_store_pages_over_limit;
52967+atomic_unchecked_t fscache_n_stores;
52968+atomic_unchecked_t fscache_n_stores_ok;
52969+atomic_unchecked_t fscache_n_stores_again;
52970+atomic_unchecked_t fscache_n_stores_nobufs;
52971+atomic_unchecked_t fscache_n_stores_oom;
52972+atomic_unchecked_t fscache_n_store_ops;
52973+atomic_unchecked_t fscache_n_store_calls;
52974+atomic_unchecked_t fscache_n_store_pages;
52975+atomic_unchecked_t fscache_n_store_radix_deletes;
52976+atomic_unchecked_t fscache_n_store_pages_over_limit;
52977
52978-atomic_t fscache_n_store_vmscan_not_storing;
52979-atomic_t fscache_n_store_vmscan_gone;
52980-atomic_t fscache_n_store_vmscan_busy;
52981-atomic_t fscache_n_store_vmscan_cancelled;
52982-atomic_t fscache_n_store_vmscan_wait;
52983+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52984+atomic_unchecked_t fscache_n_store_vmscan_gone;
52985+atomic_unchecked_t fscache_n_store_vmscan_busy;
52986+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52987+atomic_unchecked_t fscache_n_store_vmscan_wait;
52988
52989-atomic_t fscache_n_marks;
52990-atomic_t fscache_n_uncaches;
52991+atomic_unchecked_t fscache_n_marks;
52992+atomic_unchecked_t fscache_n_uncaches;
52993
52994-atomic_t fscache_n_acquires;
52995-atomic_t fscache_n_acquires_null;
52996-atomic_t fscache_n_acquires_no_cache;
52997-atomic_t fscache_n_acquires_ok;
52998-atomic_t fscache_n_acquires_nobufs;
52999-atomic_t fscache_n_acquires_oom;
53000+atomic_unchecked_t fscache_n_acquires;
53001+atomic_unchecked_t fscache_n_acquires_null;
53002+atomic_unchecked_t fscache_n_acquires_no_cache;
53003+atomic_unchecked_t fscache_n_acquires_ok;
53004+atomic_unchecked_t fscache_n_acquires_nobufs;
53005+atomic_unchecked_t fscache_n_acquires_oom;
53006
53007-atomic_t fscache_n_invalidates;
53008-atomic_t fscache_n_invalidates_run;
53009+atomic_unchecked_t fscache_n_invalidates;
53010+atomic_unchecked_t fscache_n_invalidates_run;
53011
53012-atomic_t fscache_n_updates;
53013-atomic_t fscache_n_updates_null;
53014-atomic_t fscache_n_updates_run;
53015+atomic_unchecked_t fscache_n_updates;
53016+atomic_unchecked_t fscache_n_updates_null;
53017+atomic_unchecked_t fscache_n_updates_run;
53018
53019-atomic_t fscache_n_relinquishes;
53020-atomic_t fscache_n_relinquishes_null;
53021-atomic_t fscache_n_relinquishes_waitcrt;
53022-atomic_t fscache_n_relinquishes_retire;
53023+atomic_unchecked_t fscache_n_relinquishes;
53024+atomic_unchecked_t fscache_n_relinquishes_null;
53025+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
53026+atomic_unchecked_t fscache_n_relinquishes_retire;
53027
53028-atomic_t fscache_n_cookie_index;
53029-atomic_t fscache_n_cookie_data;
53030-atomic_t fscache_n_cookie_special;
53031+atomic_unchecked_t fscache_n_cookie_index;
53032+atomic_unchecked_t fscache_n_cookie_data;
53033+atomic_unchecked_t fscache_n_cookie_special;
53034
53035-atomic_t fscache_n_object_alloc;
53036-atomic_t fscache_n_object_no_alloc;
53037-atomic_t fscache_n_object_lookups;
53038-atomic_t fscache_n_object_lookups_negative;
53039-atomic_t fscache_n_object_lookups_positive;
53040-atomic_t fscache_n_object_lookups_timed_out;
53041-atomic_t fscache_n_object_created;
53042-atomic_t fscache_n_object_avail;
53043-atomic_t fscache_n_object_dead;
53044+atomic_unchecked_t fscache_n_object_alloc;
53045+atomic_unchecked_t fscache_n_object_no_alloc;
53046+atomic_unchecked_t fscache_n_object_lookups;
53047+atomic_unchecked_t fscache_n_object_lookups_negative;
53048+atomic_unchecked_t fscache_n_object_lookups_positive;
53049+atomic_unchecked_t fscache_n_object_lookups_timed_out;
53050+atomic_unchecked_t fscache_n_object_created;
53051+atomic_unchecked_t fscache_n_object_avail;
53052+atomic_unchecked_t fscache_n_object_dead;
53053
53054-atomic_t fscache_n_checkaux_none;
53055-atomic_t fscache_n_checkaux_okay;
53056-atomic_t fscache_n_checkaux_update;
53057-atomic_t fscache_n_checkaux_obsolete;
53058+atomic_unchecked_t fscache_n_checkaux_none;
53059+atomic_unchecked_t fscache_n_checkaux_okay;
53060+atomic_unchecked_t fscache_n_checkaux_update;
53061+atomic_unchecked_t fscache_n_checkaux_obsolete;
53062
53063 atomic_t fscache_n_cop_alloc_object;
53064 atomic_t fscache_n_cop_lookup_object;
53065@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
53066 seq_puts(m, "FS-Cache statistics\n");
53067
53068 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
53069- atomic_read(&fscache_n_cookie_index),
53070- atomic_read(&fscache_n_cookie_data),
53071- atomic_read(&fscache_n_cookie_special));
53072+ atomic_read_unchecked(&fscache_n_cookie_index),
53073+ atomic_read_unchecked(&fscache_n_cookie_data),
53074+ atomic_read_unchecked(&fscache_n_cookie_special));
53075
53076 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
53077- atomic_read(&fscache_n_object_alloc),
53078- atomic_read(&fscache_n_object_no_alloc),
53079- atomic_read(&fscache_n_object_avail),
53080- atomic_read(&fscache_n_object_dead));
53081+ atomic_read_unchecked(&fscache_n_object_alloc),
53082+ atomic_read_unchecked(&fscache_n_object_no_alloc),
53083+ atomic_read_unchecked(&fscache_n_object_avail),
53084+ atomic_read_unchecked(&fscache_n_object_dead));
53085 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
53086- atomic_read(&fscache_n_checkaux_none),
53087- atomic_read(&fscache_n_checkaux_okay),
53088- atomic_read(&fscache_n_checkaux_update),
53089- atomic_read(&fscache_n_checkaux_obsolete));
53090+ atomic_read_unchecked(&fscache_n_checkaux_none),
53091+ atomic_read_unchecked(&fscache_n_checkaux_okay),
53092+ atomic_read_unchecked(&fscache_n_checkaux_update),
53093+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
53094
53095 seq_printf(m, "Pages : mrk=%u unc=%u\n",
53096- atomic_read(&fscache_n_marks),
53097- atomic_read(&fscache_n_uncaches));
53098+ atomic_read_unchecked(&fscache_n_marks),
53099+ atomic_read_unchecked(&fscache_n_uncaches));
53100
53101 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
53102 " oom=%u\n",
53103- atomic_read(&fscache_n_acquires),
53104- atomic_read(&fscache_n_acquires_null),
53105- atomic_read(&fscache_n_acquires_no_cache),
53106- atomic_read(&fscache_n_acquires_ok),
53107- atomic_read(&fscache_n_acquires_nobufs),
53108- atomic_read(&fscache_n_acquires_oom));
53109+ atomic_read_unchecked(&fscache_n_acquires),
53110+ atomic_read_unchecked(&fscache_n_acquires_null),
53111+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
53112+ atomic_read_unchecked(&fscache_n_acquires_ok),
53113+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
53114+ atomic_read_unchecked(&fscache_n_acquires_oom));
53115
53116 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
53117- atomic_read(&fscache_n_object_lookups),
53118- atomic_read(&fscache_n_object_lookups_negative),
53119- atomic_read(&fscache_n_object_lookups_positive),
53120- atomic_read(&fscache_n_object_created),
53121- atomic_read(&fscache_n_object_lookups_timed_out));
53122+ atomic_read_unchecked(&fscache_n_object_lookups),
53123+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
53124+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
53125+ atomic_read_unchecked(&fscache_n_object_created),
53126+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
53127
53128 seq_printf(m, "Invals : n=%u run=%u\n",
53129- atomic_read(&fscache_n_invalidates),
53130- atomic_read(&fscache_n_invalidates_run));
53131+ atomic_read_unchecked(&fscache_n_invalidates),
53132+ atomic_read_unchecked(&fscache_n_invalidates_run));
53133
53134 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
53135- atomic_read(&fscache_n_updates),
53136- atomic_read(&fscache_n_updates_null),
53137- atomic_read(&fscache_n_updates_run));
53138+ atomic_read_unchecked(&fscache_n_updates),
53139+ atomic_read_unchecked(&fscache_n_updates_null),
53140+ atomic_read_unchecked(&fscache_n_updates_run));
53141
53142 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
53143- atomic_read(&fscache_n_relinquishes),
53144- atomic_read(&fscache_n_relinquishes_null),
53145- atomic_read(&fscache_n_relinquishes_waitcrt),
53146- atomic_read(&fscache_n_relinquishes_retire));
53147+ atomic_read_unchecked(&fscache_n_relinquishes),
53148+ atomic_read_unchecked(&fscache_n_relinquishes_null),
53149+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
53150+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
53151
53152 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
53153- atomic_read(&fscache_n_attr_changed),
53154- atomic_read(&fscache_n_attr_changed_ok),
53155- atomic_read(&fscache_n_attr_changed_nobufs),
53156- atomic_read(&fscache_n_attr_changed_nomem),
53157- atomic_read(&fscache_n_attr_changed_calls));
53158+ atomic_read_unchecked(&fscache_n_attr_changed),
53159+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
53160+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
53161+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
53162+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
53163
53164 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
53165- atomic_read(&fscache_n_allocs),
53166- atomic_read(&fscache_n_allocs_ok),
53167- atomic_read(&fscache_n_allocs_wait),
53168- atomic_read(&fscache_n_allocs_nobufs),
53169- atomic_read(&fscache_n_allocs_intr));
53170+ atomic_read_unchecked(&fscache_n_allocs),
53171+ atomic_read_unchecked(&fscache_n_allocs_ok),
53172+ atomic_read_unchecked(&fscache_n_allocs_wait),
53173+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
53174+ atomic_read_unchecked(&fscache_n_allocs_intr));
53175 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
53176- atomic_read(&fscache_n_alloc_ops),
53177- atomic_read(&fscache_n_alloc_op_waits),
53178- atomic_read(&fscache_n_allocs_object_dead));
53179+ atomic_read_unchecked(&fscache_n_alloc_ops),
53180+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
53181+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
53182
53183 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
53184 " int=%u oom=%u\n",
53185- atomic_read(&fscache_n_retrievals),
53186- atomic_read(&fscache_n_retrievals_ok),
53187- atomic_read(&fscache_n_retrievals_wait),
53188- atomic_read(&fscache_n_retrievals_nodata),
53189- atomic_read(&fscache_n_retrievals_nobufs),
53190- atomic_read(&fscache_n_retrievals_intr),
53191- atomic_read(&fscache_n_retrievals_nomem));
53192+ atomic_read_unchecked(&fscache_n_retrievals),
53193+ atomic_read_unchecked(&fscache_n_retrievals_ok),
53194+ atomic_read_unchecked(&fscache_n_retrievals_wait),
53195+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
53196+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
53197+ atomic_read_unchecked(&fscache_n_retrievals_intr),
53198+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
53199 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
53200- atomic_read(&fscache_n_retrieval_ops),
53201- atomic_read(&fscache_n_retrieval_op_waits),
53202- atomic_read(&fscache_n_retrievals_object_dead));
53203+ atomic_read_unchecked(&fscache_n_retrieval_ops),
53204+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
53205+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
53206
53207 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
53208- atomic_read(&fscache_n_stores),
53209- atomic_read(&fscache_n_stores_ok),
53210- atomic_read(&fscache_n_stores_again),
53211- atomic_read(&fscache_n_stores_nobufs),
53212- atomic_read(&fscache_n_stores_oom));
53213+ atomic_read_unchecked(&fscache_n_stores),
53214+ atomic_read_unchecked(&fscache_n_stores_ok),
53215+ atomic_read_unchecked(&fscache_n_stores_again),
53216+ atomic_read_unchecked(&fscache_n_stores_nobufs),
53217+ atomic_read_unchecked(&fscache_n_stores_oom));
53218 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
53219- atomic_read(&fscache_n_store_ops),
53220- atomic_read(&fscache_n_store_calls),
53221- atomic_read(&fscache_n_store_pages),
53222- atomic_read(&fscache_n_store_radix_deletes),
53223- atomic_read(&fscache_n_store_pages_over_limit));
53224+ atomic_read_unchecked(&fscache_n_store_ops),
53225+ atomic_read_unchecked(&fscache_n_store_calls),
53226+ atomic_read_unchecked(&fscache_n_store_pages),
53227+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
53228+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53229
53230 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53231- atomic_read(&fscache_n_store_vmscan_not_storing),
53232- atomic_read(&fscache_n_store_vmscan_gone),
53233- atomic_read(&fscache_n_store_vmscan_busy),
53234- atomic_read(&fscache_n_store_vmscan_cancelled),
53235- atomic_read(&fscache_n_store_vmscan_wait));
53236+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53237+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53238+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53239+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53240+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53241
53242 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53243- atomic_read(&fscache_n_op_pend),
53244- atomic_read(&fscache_n_op_run),
53245- atomic_read(&fscache_n_op_enqueue),
53246- atomic_read(&fscache_n_op_cancelled),
53247- atomic_read(&fscache_n_op_rejected));
53248+ atomic_read_unchecked(&fscache_n_op_pend),
53249+ atomic_read_unchecked(&fscache_n_op_run),
53250+ atomic_read_unchecked(&fscache_n_op_enqueue),
53251+ atomic_read_unchecked(&fscache_n_op_cancelled),
53252+ atomic_read_unchecked(&fscache_n_op_rejected));
53253 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53254- atomic_read(&fscache_n_op_deferred_release),
53255- atomic_read(&fscache_n_op_release),
53256- atomic_read(&fscache_n_op_gc));
53257+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53258+ atomic_read_unchecked(&fscache_n_op_release),
53259+ atomic_read_unchecked(&fscache_n_op_gc));
53260
53261 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53262 atomic_read(&fscache_n_cop_alloc_object),
53263diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
53264index aef34b1..59bfd7b 100644
53265--- a/fs/fuse/cuse.c
53266+++ b/fs/fuse/cuse.c
53267@@ -600,10 +600,12 @@ static int __init cuse_init(void)
53268 INIT_LIST_HEAD(&cuse_conntbl[i]);
53269
53270 /* inherit and extend fuse_dev_operations */
53271- cuse_channel_fops = fuse_dev_operations;
53272- cuse_channel_fops.owner = THIS_MODULE;
53273- cuse_channel_fops.open = cuse_channel_open;
53274- cuse_channel_fops.release = cuse_channel_release;
53275+ pax_open_kernel();
53276+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
53277+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
53278+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
53279+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
53280+ pax_close_kernel();
53281
53282 cuse_class = class_create(THIS_MODULE, "cuse");
53283 if (IS_ERR(cuse_class))
53284diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
53285index 1d55f94..088da65 100644
53286--- a/fs/fuse/dev.c
53287+++ b/fs/fuse/dev.c
53288@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53289 ret = 0;
53290 pipe_lock(pipe);
53291
53292- if (!pipe->readers) {
53293+ if (!atomic_read(&pipe->readers)) {
53294 send_sig(SIGPIPE, current, 0);
53295 if (!ret)
53296 ret = -EPIPE;
53297@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53298 page_nr++;
53299 ret += buf->len;
53300
53301- if (pipe->files)
53302+ if (atomic_read(&pipe->files))
53303 do_wakeup = 1;
53304 }
53305
53306diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
53307index f3f783d..bf11a8e 100644
53308--- a/fs/fuse/dir.c
53309+++ b/fs/fuse/dir.c
53310@@ -1415,7 +1415,7 @@ static char *read_link(struct dentry *dentry)
53311 return link;
53312 }
53313
53314-static void free_link(char *link)
53315+static void free_link(const char *link)
53316 {
53317 if (!IS_ERR(link))
53318 free_page((unsigned long) link);
53319diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
53320index 62b484e..0f9a140 100644
53321--- a/fs/gfs2/inode.c
53322+++ b/fs/gfs2/inode.c
53323@@ -1441,7 +1441,7 @@ out:
53324
53325 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53326 {
53327- char *s = nd_get_link(nd);
53328+ const char *s = nd_get_link(nd);
53329 if (!IS_ERR(s))
53330 kfree(s);
53331 }
53332diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
53333index a3f868a..bb308ae 100644
53334--- a/fs/hugetlbfs/inode.c
53335+++ b/fs/hugetlbfs/inode.c
53336@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53337 struct mm_struct *mm = current->mm;
53338 struct vm_area_struct *vma;
53339 struct hstate *h = hstate_file(file);
53340+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
53341 struct vm_unmapped_area_info info;
53342
53343 if (len & ~huge_page_mask(h))
53344@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53345 return addr;
53346 }
53347
53348+#ifdef CONFIG_PAX_RANDMMAP
53349+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
53350+#endif
53351+
53352 if (addr) {
53353 addr = ALIGN(addr, huge_page_size(h));
53354 vma = find_vma(mm, addr);
53355- if (TASK_SIZE - len >= addr &&
53356- (!vma || addr + len <= vma->vm_start))
53357+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
53358 return addr;
53359 }
53360
53361 info.flags = 0;
53362 info.length = len;
53363 info.low_limit = TASK_UNMAPPED_BASE;
53364+
53365+#ifdef CONFIG_PAX_RANDMMAP
53366+ if (mm->pax_flags & MF_PAX_RANDMMAP)
53367+ info.low_limit += mm->delta_mmap;
53368+#endif
53369+
53370 info.high_limit = TASK_SIZE;
53371 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
53372 info.align_offset = 0;
53373@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
53374 };
53375 MODULE_ALIAS_FS("hugetlbfs");
53376
53377-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53378+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53379
53380 static int can_do_hugetlb_shm(void)
53381 {
53382diff --git a/fs/inode.c b/fs/inode.c
53383index 00d5fc3..98ce7d7 100644
53384--- a/fs/inode.c
53385+++ b/fs/inode.c
53386@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
53387
53388 #ifdef CONFIG_SMP
53389 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
53390- static atomic_t shared_last_ino;
53391- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
53392+ static atomic_unchecked_t shared_last_ino;
53393+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
53394
53395 res = next - LAST_INO_BATCH;
53396 }
53397diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
53398index 4a6cf28..d3a29d3 100644
53399--- a/fs/jffs2/erase.c
53400+++ b/fs/jffs2/erase.c
53401@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
53402 struct jffs2_unknown_node marker = {
53403 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
53404 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53405- .totlen = cpu_to_je32(c->cleanmarker_size)
53406+ .totlen = cpu_to_je32(c->cleanmarker_size),
53407+ .hdr_crc = cpu_to_je32(0)
53408 };
53409
53410 jffs2_prealloc_raw_node_refs(c, jeb, 1);
53411diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
53412index a6597d6..41b30ec 100644
53413--- a/fs/jffs2/wbuf.c
53414+++ b/fs/jffs2/wbuf.c
53415@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
53416 {
53417 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
53418 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53419- .totlen = constant_cpu_to_je32(8)
53420+ .totlen = constant_cpu_to_je32(8),
53421+ .hdr_crc = constant_cpu_to_je32(0)
53422 };
53423
53424 /*
53425diff --git a/fs/jfs/super.c b/fs/jfs/super.c
53426index 788e0a9..8433098 100644
53427--- a/fs/jfs/super.c
53428+++ b/fs/jfs/super.c
53429@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
53430
53431 jfs_inode_cachep =
53432 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
53433- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
53434+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
53435 init_once);
53436 if (jfs_inode_cachep == NULL)
53437 return -ENOMEM;
53438diff --git a/fs/libfs.c b/fs/libfs.c
53439index 916da8c..1588998 100644
53440--- a/fs/libfs.c
53441+++ b/fs/libfs.c
53442@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53443
53444 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
53445 struct dentry *next;
53446+ char d_name[sizeof(next->d_iname)];
53447+ const unsigned char *name;
53448+
53449 next = list_entry(p, struct dentry, d_u.d_child);
53450 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
53451 if (!simple_positive(next)) {
53452@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53453
53454 spin_unlock(&next->d_lock);
53455 spin_unlock(&dentry->d_lock);
53456- if (filldir(dirent, next->d_name.name,
53457+ name = next->d_name.name;
53458+ if (name == next->d_iname) {
53459+ memcpy(d_name, name, next->d_name.len);
53460+ name = d_name;
53461+ }
53462+ if (filldir(dirent, name,
53463 next->d_name.len, filp->f_pos,
53464 next->d_inode->i_ino,
53465 dt_type(next->d_inode)) < 0)
53466diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
53467index 9760ecb..9b838ef 100644
53468--- a/fs/lockd/clntproc.c
53469+++ b/fs/lockd/clntproc.c
53470@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
53471 /*
53472 * Cookie counter for NLM requests
53473 */
53474-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
53475+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
53476
53477 void nlmclnt_next_cookie(struct nlm_cookie *c)
53478 {
53479- u32 cookie = atomic_inc_return(&nlm_cookie);
53480+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
53481
53482 memcpy(c->data, &cookie, 4);
53483 c->len=4;
53484diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
53485index a2aa97d..10d6c41 100644
53486--- a/fs/lockd/svc.c
53487+++ b/fs/lockd/svc.c
53488@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
53489 svc_sock_update_bufs(serv);
53490 serv->sv_maxconn = nlm_max_connections;
53491
53492- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
53493+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
53494 if (IS_ERR(nlmsvc_task)) {
53495 error = PTR_ERR(nlmsvc_task);
53496 printk(KERN_WARNING
53497diff --git a/fs/locks.c b/fs/locks.c
53498index cb424a4..850e4dd 100644
53499--- a/fs/locks.c
53500+++ b/fs/locks.c
53501@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
53502 return;
53503
53504 if (filp->f_op && filp->f_op->flock) {
53505- struct file_lock fl = {
53506+ struct file_lock flock = {
53507 .fl_pid = current->tgid,
53508 .fl_file = filp,
53509 .fl_flags = FL_FLOCK,
53510 .fl_type = F_UNLCK,
53511 .fl_end = OFFSET_MAX,
53512 };
53513- filp->f_op->flock(filp, F_SETLKW, &fl);
53514- if (fl.fl_ops && fl.fl_ops->fl_release_private)
53515- fl.fl_ops->fl_release_private(&fl);
53516+ filp->f_op->flock(filp, F_SETLKW, &flock);
53517+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
53518+ flock.fl_ops->fl_release_private(&flock);
53519 }
53520
53521 lock_flocks();
53522diff --git a/fs/namei.c b/fs/namei.c
53523index 9ed9361..2b72db1 100644
53524--- a/fs/namei.c
53525+++ b/fs/namei.c
53526@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53527 if (ret != -EACCES)
53528 return ret;
53529
53530+#ifdef CONFIG_GRKERNSEC
53531+ /* we'll block if we have to log due to a denied capability use */
53532+ if (mask & MAY_NOT_BLOCK)
53533+ return -ECHILD;
53534+#endif
53535+
53536 if (S_ISDIR(inode->i_mode)) {
53537 /* DACs are overridable for directories */
53538- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53539- return 0;
53540 if (!(mask & MAY_WRITE))
53541- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53542+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53543+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53544 return 0;
53545+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53546+ return 0;
53547 return -EACCES;
53548 }
53549 /*
53550+ * Searching includes executable on directories, else just read.
53551+ */
53552+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53553+ if (mask == MAY_READ)
53554+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53555+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53556+ return 0;
53557+
53558+ /*
53559 * Read/write DACs are always overridable.
53560 * Executable DACs are overridable when there is
53561 * at least one exec bit set.
53562@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53563 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53564 return 0;
53565
53566- /*
53567- * Searching includes executable on directories, else just read.
53568- */
53569- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53570- if (mask == MAY_READ)
53571- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53572- return 0;
53573-
53574 return -EACCES;
53575 }
53576
53577@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53578 {
53579 struct dentry *dentry = link->dentry;
53580 int error;
53581- char *s;
53582+ const char *s;
53583
53584 BUG_ON(nd->flags & LOOKUP_RCU);
53585
53586@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53587 if (error)
53588 goto out_put_nd_path;
53589
53590+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53591+ dentry->d_inode, dentry, nd->path.mnt)) {
53592+ error = -EACCES;
53593+ goto out_put_nd_path;
53594+ }
53595+
53596 nd->last_type = LAST_BIND;
53597 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53598 error = PTR_ERR(*p);
53599@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53600 if (res)
53601 break;
53602 res = walk_component(nd, path, LOOKUP_FOLLOW);
53603+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53604+ res = -EACCES;
53605 put_link(nd, &link, cookie);
53606 } while (res > 0);
53607
53608@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
53609 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53610 {
53611 unsigned long a, b, adata, bdata, mask, hash, len;
53612- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53613+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53614
53615 hash = a = 0;
53616 len = -sizeof(unsigned long);
53617@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
53618 if (err)
53619 break;
53620 err = lookup_last(nd, &path);
53621+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53622+ err = -EACCES;
53623 put_link(nd, &link, cookie);
53624 }
53625 }
53626@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
53627 if (!err)
53628 err = complete_walk(nd);
53629
53630+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53631+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53632+ path_put(&nd->path);
53633+ err = -ENOENT;
53634+ }
53635+ }
53636+
53637 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53638 if (!can_lookup(nd->inode)) {
53639 path_put(&nd->path);
53640@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
53641 retval = path_lookupat(dfd, name->name,
53642 flags | LOOKUP_REVAL, nd);
53643
53644- if (likely(!retval))
53645+ if (likely(!retval)) {
53646 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53647+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53648+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53649+ path_put(&nd->path);
53650+ return -ENOENT;
53651+ }
53652+ }
53653+ }
53654 return retval;
53655 }
53656
53657@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53658 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53659 return -EPERM;
53660
53661+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53662+ return -EPERM;
53663+ if (gr_handle_rawio(inode))
53664+ return -EPERM;
53665+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53666+ return -EACCES;
53667+
53668 return 0;
53669 }
53670
53671@@ -2602,7 +2641,7 @@ looked_up:
53672 * cleared otherwise prior to returning.
53673 */
53674 static int lookup_open(struct nameidata *nd, struct path *path,
53675- struct file *file,
53676+ struct path *link, struct file *file,
53677 const struct open_flags *op,
53678 bool got_write, int *opened)
53679 {
53680@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53681 /* Negative dentry, just create the file */
53682 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53683 umode_t mode = op->mode;
53684+
53685+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53686+ error = -EACCES;
53687+ goto out_dput;
53688+ }
53689+
53690+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53691+ error = -EACCES;
53692+ goto out_dput;
53693+ }
53694+
53695 if (!IS_POSIXACL(dir->d_inode))
53696 mode &= ~current_umask();
53697 /*
53698@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53699 nd->flags & LOOKUP_EXCL);
53700 if (error)
53701 goto out_dput;
53702+ else
53703+ gr_handle_create(dentry, nd->path.mnt);
53704 }
53705 out_no_open:
53706 path->dentry = dentry;
53707@@ -2672,7 +2724,7 @@ out_dput:
53708 /*
53709 * Handle the last step of open()
53710 */
53711-static int do_last(struct nameidata *nd, struct path *path,
53712+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53713 struct file *file, const struct open_flags *op,
53714 int *opened, struct filename *name)
53715 {
53716@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53717 error = complete_walk(nd);
53718 if (error)
53719 return error;
53720+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53721+ error = -ENOENT;
53722+ goto out;
53723+ }
53724 audit_inode(name, nd->path.dentry, 0);
53725 if (open_flag & O_CREAT) {
53726 error = -EISDIR;
53727 goto out;
53728 }
53729+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53730+ error = -EACCES;
53731+ goto out;
53732+ }
53733 goto finish_open;
53734 case LAST_BIND:
53735 error = complete_walk(nd);
53736 if (error)
53737 return error;
53738+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53739+ error = -ENOENT;
53740+ goto out;
53741+ }
53742+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53743+ error = -EACCES;
53744+ goto out;
53745+ }
53746 audit_inode(name, dir, 0);
53747 goto finish_open;
53748 }
53749@@ -2759,7 +2827,7 @@ retry_lookup:
53750 */
53751 }
53752 mutex_lock(&dir->d_inode->i_mutex);
53753- error = lookup_open(nd, path, file, op, got_write, opened);
53754+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53755 mutex_unlock(&dir->d_inode->i_mutex);
53756
53757 if (error <= 0) {
53758@@ -2783,11 +2851,28 @@ retry_lookup:
53759 goto finish_open_created;
53760 }
53761
53762+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53763+ error = -ENOENT;
53764+ goto exit_dput;
53765+ }
53766+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53767+ error = -EACCES;
53768+ goto exit_dput;
53769+ }
53770+
53771 /*
53772 * create/update audit record if it already exists.
53773 */
53774- if (path->dentry->d_inode)
53775+ if (path->dentry->d_inode) {
53776+ /* only check if O_CREAT is specified, all other checks need to go
53777+ into may_open */
53778+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53779+ error = -EACCES;
53780+ goto exit_dput;
53781+ }
53782+
53783 audit_inode(name, path->dentry, 0);
53784+ }
53785
53786 /*
53787 * If atomic_open() acquired write access it is dropped now due to
53788@@ -2828,6 +2913,11 @@ finish_lookup:
53789 }
53790 }
53791 BUG_ON(inode != path->dentry->d_inode);
53792+ /* if we're resolving a symlink to another symlink */
53793+ if (link && gr_handle_symlink_owner(link, inode)) {
53794+ error = -EACCES;
53795+ goto out;
53796+ }
53797 return 1;
53798 }
53799
53800@@ -2837,7 +2927,6 @@ finish_lookup:
53801 save_parent.dentry = nd->path.dentry;
53802 save_parent.mnt = mntget(path->mnt);
53803 nd->path.dentry = path->dentry;
53804-
53805 }
53806 nd->inode = inode;
53807 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53808@@ -2846,6 +2935,16 @@ finish_lookup:
53809 path_put(&save_parent);
53810 return error;
53811 }
53812+
53813+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53814+ error = -ENOENT;
53815+ goto out;
53816+ }
53817+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53818+ error = -EACCES;
53819+ goto out;
53820+ }
53821+
53822 error = -EISDIR;
53823 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53824 goto out;
53825@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53826 if (unlikely(error))
53827 goto out;
53828
53829- error = do_last(nd, &path, file, op, &opened, pathname);
53830+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53831 while (unlikely(error > 0)) { /* trailing symlink */
53832 struct path link = path;
53833 void *cookie;
53834@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53835 error = follow_link(&link, nd, &cookie);
53836 if (unlikely(error))
53837 break;
53838- error = do_last(nd, &path, file, op, &opened, pathname);
53839+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53840 put_link(nd, &link, cookie);
53841 }
53842 out:
53843@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53844 goto unlock;
53845
53846 error = -EEXIST;
53847- if (dentry->d_inode)
53848+ if (dentry->d_inode) {
53849+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53850+ error = -ENOENT;
53851+ }
53852 goto fail;
53853+ }
53854 /*
53855 * Special case - lookup gave negative, but... we had foo/bar/
53856 * From the vfs_mknod() POV we just have a negative dentry -
53857@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53858 }
53859 EXPORT_SYMBOL(user_path_create);
53860
53861+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53862+{
53863+ struct filename *tmp = getname(pathname);
53864+ struct dentry *res;
53865+ if (IS_ERR(tmp))
53866+ return ERR_CAST(tmp);
53867+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53868+ if (IS_ERR(res))
53869+ putname(tmp);
53870+ else
53871+ *to = tmp;
53872+ return res;
53873+}
53874+
53875 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53876 {
53877 int error = may_create(dir, dentry);
53878@@ -3177,6 +3294,17 @@ retry:
53879
53880 if (!IS_POSIXACL(path.dentry->d_inode))
53881 mode &= ~current_umask();
53882+
53883+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53884+ error = -EPERM;
53885+ goto out;
53886+ }
53887+
53888+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53889+ error = -EACCES;
53890+ goto out;
53891+ }
53892+
53893 error = security_path_mknod(&path, dentry, mode, dev);
53894 if (error)
53895 goto out;
53896@@ -3193,6 +3321,8 @@ retry:
53897 break;
53898 }
53899 out:
53900+ if (!error)
53901+ gr_handle_create(dentry, path.mnt);
53902 done_path_create(&path, dentry);
53903 if (retry_estale(error, lookup_flags)) {
53904 lookup_flags |= LOOKUP_REVAL;
53905@@ -3245,9 +3375,16 @@ retry:
53906
53907 if (!IS_POSIXACL(path.dentry->d_inode))
53908 mode &= ~current_umask();
53909+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53910+ error = -EACCES;
53911+ goto out;
53912+ }
53913 error = security_path_mkdir(&path, dentry, mode);
53914 if (!error)
53915 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53916+ if (!error)
53917+ gr_handle_create(dentry, path.mnt);
53918+out:
53919 done_path_create(&path, dentry);
53920 if (retry_estale(error, lookup_flags)) {
53921 lookup_flags |= LOOKUP_REVAL;
53922@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53923 struct filename *name;
53924 struct dentry *dentry;
53925 struct nameidata nd;
53926+ ino_t saved_ino = 0;
53927+ dev_t saved_dev = 0;
53928 unsigned int lookup_flags = 0;
53929 retry:
53930 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53931@@ -3360,10 +3499,21 @@ retry:
53932 error = -ENOENT;
53933 goto exit3;
53934 }
53935+
53936+ saved_ino = dentry->d_inode->i_ino;
53937+ saved_dev = gr_get_dev_from_dentry(dentry);
53938+
53939+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53940+ error = -EACCES;
53941+ goto exit3;
53942+ }
53943+
53944 error = security_path_rmdir(&nd.path, dentry);
53945 if (error)
53946 goto exit3;
53947 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
53948+ if (!error && (saved_dev || saved_ino))
53949+ gr_handle_delete(saved_ino, saved_dev);
53950 exit3:
53951 dput(dentry);
53952 exit2:
53953@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
53954 struct dentry *dentry;
53955 struct nameidata nd;
53956 struct inode *inode = NULL;
53957+ ino_t saved_ino = 0;
53958+ dev_t saved_dev = 0;
53959 unsigned int lookup_flags = 0;
53960 retry:
53961 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53962@@ -3455,10 +3607,22 @@ retry:
53963 if (!inode)
53964 goto slashes;
53965 ihold(inode);
53966+
53967+ if (inode->i_nlink <= 1) {
53968+ saved_ino = inode->i_ino;
53969+ saved_dev = gr_get_dev_from_dentry(dentry);
53970+ }
53971+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
53972+ error = -EACCES;
53973+ goto exit2;
53974+ }
53975+
53976 error = security_path_unlink(&nd.path, dentry);
53977 if (error)
53978 goto exit2;
53979 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
53980+ if (!error && (saved_ino || saved_dev))
53981+ gr_handle_delete(saved_ino, saved_dev);
53982 exit2:
53983 dput(dentry);
53984 }
53985@@ -3536,9 +3700,17 @@ retry:
53986 if (IS_ERR(dentry))
53987 goto out_putname;
53988
53989+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
53990+ error = -EACCES;
53991+ goto out;
53992+ }
53993+
53994 error = security_path_symlink(&path, dentry, from->name);
53995 if (!error)
53996 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
53997+ if (!error)
53998+ gr_handle_create(dentry, path.mnt);
53999+out:
54000 done_path_create(&path, dentry);
54001 if (retry_estale(error, lookup_flags)) {
54002 lookup_flags |= LOOKUP_REVAL;
54003@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
54004 {
54005 struct dentry *new_dentry;
54006 struct path old_path, new_path;
54007+ struct filename *to = NULL;
54008 int how = 0;
54009 int error;
54010
54011@@ -3635,7 +3808,7 @@ retry:
54012 if (error)
54013 return error;
54014
54015- new_dentry = user_path_create(newdfd, newname, &new_path,
54016+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
54017 (how & LOOKUP_REVAL));
54018 error = PTR_ERR(new_dentry);
54019 if (IS_ERR(new_dentry))
54020@@ -3647,11 +3820,28 @@ retry:
54021 error = may_linkat(&old_path);
54022 if (unlikely(error))
54023 goto out_dput;
54024+
54025+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
54026+ old_path.dentry->d_inode,
54027+ old_path.dentry->d_inode->i_mode, to)) {
54028+ error = -EACCES;
54029+ goto out_dput;
54030+ }
54031+
54032+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
54033+ old_path.dentry, old_path.mnt, to)) {
54034+ error = -EACCES;
54035+ goto out_dput;
54036+ }
54037+
54038 error = security_path_link(old_path.dentry, &new_path, new_dentry);
54039 if (error)
54040 goto out_dput;
54041 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
54042+ if (!error)
54043+ gr_handle_create(new_dentry, new_path.mnt);
54044 out_dput:
54045+ putname(to);
54046 done_path_create(&new_path, new_dentry);
54047 if (retry_estale(error, how)) {
54048 how |= LOOKUP_REVAL;
54049@@ -3897,12 +4087,21 @@ retry:
54050 if (new_dentry == trap)
54051 goto exit5;
54052
54053+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
54054+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
54055+ to);
54056+ if (error)
54057+ goto exit5;
54058+
54059 error = security_path_rename(&oldnd.path, old_dentry,
54060 &newnd.path, new_dentry);
54061 if (error)
54062 goto exit5;
54063 error = vfs_rename(old_dir->d_inode, old_dentry,
54064 new_dir->d_inode, new_dentry);
54065+ if (!error)
54066+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
54067+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
54068 exit5:
54069 dput(new_dentry);
54070 exit4:
54071@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
54072
54073 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
54074 {
54075+ char tmpbuf[64];
54076+ const char *newlink;
54077 int len;
54078
54079 len = PTR_ERR(link);
54080@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
54081 len = strlen(link);
54082 if (len > (unsigned) buflen)
54083 len = buflen;
54084- if (copy_to_user(buffer, link, len))
54085+
54086+ if (len < sizeof(tmpbuf)) {
54087+ memcpy(tmpbuf, link, len);
54088+ newlink = tmpbuf;
54089+ } else
54090+ newlink = link;
54091+
54092+ if (copy_to_user(buffer, newlink, len))
54093 len = -EFAULT;
54094 out:
54095 return len;
54096diff --git a/fs/namespace.c b/fs/namespace.c
54097index 7b1ca9b..6faeccf 100644
54098--- a/fs/namespace.c
54099+++ b/fs/namespace.c
54100@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
54101 if (!(sb->s_flags & MS_RDONLY))
54102 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
54103 up_write(&sb->s_umount);
54104+
54105+ gr_log_remount(mnt->mnt_devname, retval);
54106+
54107 return retval;
54108 }
54109
54110@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
54111 }
54112 br_write_unlock(&vfsmount_lock);
54113 namespace_unlock();
54114+
54115+ gr_log_unmount(mnt->mnt_devname, retval);
54116+
54117 return retval;
54118 }
54119
54120@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
54121 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
54122 */
54123
54124-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
54125+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
54126 {
54127 struct path path;
54128 struct mount *mnt;
54129@@ -1342,7 +1348,7 @@ out:
54130 /*
54131 * The 2.0 compatible umount. No flags.
54132 */
54133-SYSCALL_DEFINE1(oldumount, char __user *, name)
54134+SYSCALL_DEFINE1(oldumount, const char __user *, name)
54135 {
54136 return sys_umount(name, 0);
54137 }
54138@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
54139 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
54140 MS_STRICTATIME);
54141
54142+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
54143+ retval = -EPERM;
54144+ goto dput_out;
54145+ }
54146+
54147+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
54148+ retval = -EPERM;
54149+ goto dput_out;
54150+ }
54151+
54152 if (flags & MS_REMOUNT)
54153 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
54154 data_page);
54155@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
54156 dev_name, data_page);
54157 dput_out:
54158 path_put(&path);
54159+
54160+ gr_log_mount(dev_name, dir_name, retval);
54161+
54162 return retval;
54163 }
54164
54165@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
54166 }
54167 EXPORT_SYMBOL(mount_subtree);
54168
54169-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
54170- char __user *, type, unsigned long, flags, void __user *, data)
54171+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
54172+ const char __user *, type, unsigned long, flags, void __user *, data)
54173 {
54174 int ret;
54175 char *kernel_type;
54176@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
54177 if (error)
54178 goto out2;
54179
54180+ if (gr_handle_chroot_pivot()) {
54181+ error = -EPERM;
54182+ goto out2;
54183+ }
54184+
54185 get_fs_root(current->fs, &root);
54186 old_mp = lock_mount(&old);
54187 error = PTR_ERR(old_mp);
54188@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
54189 !nsown_capable(CAP_SYS_ADMIN))
54190 return -EPERM;
54191
54192- if (fs->users != 1)
54193+ if (atomic_read(&fs->users) != 1)
54194 return -EINVAL;
54195
54196 get_mnt_ns(mnt_ns);
54197diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
54198index cff089a..4c3d57a 100644
54199--- a/fs/nfs/callback.c
54200+++ b/fs/nfs/callback.c
54201@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
54202 struct svc_rqst *rqstp;
54203 int (*callback_svc)(void *vrqstp);
54204 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
54205- char svc_name[12];
54206 int ret;
54207
54208 nfs_callback_bc_serv(minorversion, xprt, serv);
54209@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
54210
54211 svc_sock_update_bufs(serv);
54212
54213- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
54214 cb_info->serv = serv;
54215 cb_info->rqst = rqstp;
54216- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
54217+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
54218 if (IS_ERR(cb_info->task)) {
54219 ret = PTR_ERR(cb_info->task);
54220 svc_exit_thread(cb_info->rqst);
54221diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
54222index a35582c..ebbdcd5 100644
54223--- a/fs/nfs/callback_xdr.c
54224+++ b/fs/nfs/callback_xdr.c
54225@@ -51,7 +51,7 @@ struct callback_op {
54226 callback_decode_arg_t decode_args;
54227 callback_encode_res_t encode_res;
54228 long res_maxsize;
54229-};
54230+} __do_const;
54231
54232 static struct callback_op callback_ops[];
54233
54234diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
54235index c1c7a9d..7afa0b8 100644
54236--- a/fs/nfs/inode.c
54237+++ b/fs/nfs/inode.c
54238@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
54239 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
54240 }
54241
54242-static atomic_long_t nfs_attr_generation_counter;
54243+static atomic_long_unchecked_t nfs_attr_generation_counter;
54244
54245 static unsigned long nfs_read_attr_generation_counter(void)
54246 {
54247- return atomic_long_read(&nfs_attr_generation_counter);
54248+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
54249 }
54250
54251 unsigned long nfs_inc_attr_generation_counter(void)
54252 {
54253- return atomic_long_inc_return(&nfs_attr_generation_counter);
54254+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
54255 }
54256
54257 void nfs_fattr_init(struct nfs_fattr *fattr)
54258diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
54259index 2c37442..9b9538b 100644
54260--- a/fs/nfs/nfs4state.c
54261+++ b/fs/nfs/nfs4state.c
54262@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
54263 snprintf(buf, sizeof(buf), "%s-manager",
54264 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
54265 rcu_read_unlock();
54266- task = kthread_run(nfs4_run_state_manager, clp, buf);
54267+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
54268 if (IS_ERR(task)) {
54269 printk(KERN_ERR "%s: kthread_run: %ld\n",
54270 __func__, PTR_ERR(task));
54271diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
54272index 27d74a2..c4c2a73 100644
54273--- a/fs/nfsd/nfs4proc.c
54274+++ b/fs/nfsd/nfs4proc.c
54275@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
54276 nfsd4op_rsize op_rsize_bop;
54277 stateid_getter op_get_currentstateid;
54278 stateid_setter op_set_currentstateid;
54279-};
54280+} __do_const;
54281
54282 static struct nfsd4_operation nfsd4_ops[];
54283
54284diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
54285index 582321a..0224663 100644
54286--- a/fs/nfsd/nfs4xdr.c
54287+++ b/fs/nfsd/nfs4xdr.c
54288@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
54289
54290 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
54291
54292-static nfsd4_dec nfsd4_dec_ops[] = {
54293+static const nfsd4_dec nfsd4_dec_ops[] = {
54294 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54295 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54296 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54297@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
54298 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54299 };
54300
54301-static nfsd4_dec nfsd41_dec_ops[] = {
54302+static const nfsd4_dec nfsd41_dec_ops[] = {
54303 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54304 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54305 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54306@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54307 };
54308
54309 struct nfsd4_minorversion_ops {
54310- nfsd4_dec *decoders;
54311+ const nfsd4_dec *decoders;
54312 int nops;
54313 };
54314
54315diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
54316index e76244e..9fe8f2f1 100644
54317--- a/fs/nfsd/nfscache.c
54318+++ b/fs/nfsd/nfscache.c
54319@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
54320 {
54321 struct svc_cacherep *rp = rqstp->rq_cacherep;
54322 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
54323- int len;
54324+ long len;
54325 size_t bufsize = 0;
54326
54327 if (!rp)
54328 return;
54329
54330- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
54331- len >>= 2;
54332+ if (statp) {
54333+ len = (char*)statp - (char*)resv->iov_base;
54334+ len = resv->iov_len - len;
54335+ len >>= 2;
54336+ }
54337
54338 /* Don't cache excessive amounts of data and XDR failures */
54339 if (!statp || len > (256 >> 2)) {
54340diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
54341index 84ce601..633d226 100644
54342--- a/fs/nfsd/vfs.c
54343+++ b/fs/nfsd/vfs.c
54344@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54345 } else {
54346 oldfs = get_fs();
54347 set_fs(KERNEL_DS);
54348- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
54349+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
54350 set_fs(oldfs);
54351 }
54352
54353@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54354
54355 /* Write the data. */
54356 oldfs = get_fs(); set_fs(KERNEL_DS);
54357- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
54358+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
54359 set_fs(oldfs);
54360 if (host_err < 0)
54361 goto out_nfserr;
54362@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
54363 */
54364
54365 oldfs = get_fs(); set_fs(KERNEL_DS);
54366- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
54367+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
54368 set_fs(oldfs);
54369
54370 if (host_err < 0)
54371diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
54372index fea6bd5..8ee9d81 100644
54373--- a/fs/nls/nls_base.c
54374+++ b/fs/nls/nls_base.c
54375@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
54376
54377 int register_nls(struct nls_table * nls)
54378 {
54379- struct nls_table ** tmp = &tables;
54380+ struct nls_table *tmp = tables;
54381
54382 if (nls->next)
54383 return -EBUSY;
54384
54385 spin_lock(&nls_lock);
54386- while (*tmp) {
54387- if (nls == *tmp) {
54388+ while (tmp) {
54389+ if (nls == tmp) {
54390 spin_unlock(&nls_lock);
54391 return -EBUSY;
54392 }
54393- tmp = &(*tmp)->next;
54394+ tmp = tmp->next;
54395 }
54396- nls->next = tables;
54397+ pax_open_kernel();
54398+ *(struct nls_table **)&nls->next = tables;
54399+ pax_close_kernel();
54400 tables = nls;
54401 spin_unlock(&nls_lock);
54402 return 0;
54403@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
54404
54405 int unregister_nls(struct nls_table * nls)
54406 {
54407- struct nls_table ** tmp = &tables;
54408+ struct nls_table * const * tmp = &tables;
54409
54410 spin_lock(&nls_lock);
54411 while (*tmp) {
54412 if (nls == *tmp) {
54413- *tmp = nls->next;
54414+ pax_open_kernel();
54415+ *(struct nls_table **)tmp = nls->next;
54416+ pax_close_kernel();
54417 spin_unlock(&nls_lock);
54418 return 0;
54419 }
54420diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
54421index 7424929..35f6be5 100644
54422--- a/fs/nls/nls_euc-jp.c
54423+++ b/fs/nls/nls_euc-jp.c
54424@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
54425 p_nls = load_nls("cp932");
54426
54427 if (p_nls) {
54428- table.charset2upper = p_nls->charset2upper;
54429- table.charset2lower = p_nls->charset2lower;
54430+ pax_open_kernel();
54431+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54432+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54433+ pax_close_kernel();
54434 return register_nls(&table);
54435 }
54436
54437diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
54438index e7bc1d7..06bd4bb 100644
54439--- a/fs/nls/nls_koi8-ru.c
54440+++ b/fs/nls/nls_koi8-ru.c
54441@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
54442 p_nls = load_nls("koi8-u");
54443
54444 if (p_nls) {
54445- table.charset2upper = p_nls->charset2upper;
54446- table.charset2lower = p_nls->charset2lower;
54447+ pax_open_kernel();
54448+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54449+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54450+ pax_close_kernel();
54451 return register_nls(&table);
54452 }
54453
54454diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
54455index 6c80083..a1e6299 100644
54456--- a/fs/notify/fanotify/fanotify_user.c
54457+++ b/fs/notify/fanotify/fanotify_user.c
54458@@ -122,6 +122,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
54459 metadata->event_len = FAN_EVENT_METADATA_LEN;
54460 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
54461 metadata->vers = FANOTIFY_METADATA_VERSION;
54462+ metadata->reserved = 0;
54463 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
54464 metadata->pid = pid_vnr(event->tgid);
54465 if (unlikely(event->mask & FAN_Q_OVERFLOW))
54466@@ -252,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
54467
54468 fd = fanotify_event_metadata.fd;
54469 ret = -EFAULT;
54470- if (copy_to_user(buf, &fanotify_event_metadata,
54471- fanotify_event_metadata.event_len))
54472+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
54473+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
54474 goto out_close_fd;
54475
54476 ret = prepare_for_access_response(group, event, fd);
54477diff --git a/fs/notify/notification.c b/fs/notify/notification.c
54478index 7b51b05..5ea5ef6 100644
54479--- a/fs/notify/notification.c
54480+++ b/fs/notify/notification.c
54481@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
54482 * get set to 0 so it will never get 'freed'
54483 */
54484 static struct fsnotify_event *q_overflow_event;
54485-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54486+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54487
54488 /**
54489 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
54490@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54491 */
54492 u32 fsnotify_get_cookie(void)
54493 {
54494- return atomic_inc_return(&fsnotify_sync_cookie);
54495+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
54496 }
54497 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
54498
54499diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
54500index aa411c3..c260a84 100644
54501--- a/fs/ntfs/dir.c
54502+++ b/fs/ntfs/dir.c
54503@@ -1329,7 +1329,7 @@ find_next_index_buffer:
54504 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
54505 ~(s64)(ndir->itype.index.block_size - 1)));
54506 /* Bounds checks. */
54507- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54508+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54509 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
54510 "inode 0x%lx or driver bug.", vdir->i_ino);
54511 goto err_out;
54512diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
54513index c5670b8..01a3656 100644
54514--- a/fs/ntfs/file.c
54515+++ b/fs/ntfs/file.c
54516@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
54517 #endif /* NTFS_RW */
54518 };
54519
54520-const struct file_operations ntfs_empty_file_ops = {};
54521+const struct file_operations ntfs_empty_file_ops __read_only;
54522
54523-const struct inode_operations ntfs_empty_inode_ops = {};
54524+const struct inode_operations ntfs_empty_inode_ops __read_only;
54525diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
54526index aebeacd..0dcdd26 100644
54527--- a/fs/ocfs2/localalloc.c
54528+++ b/fs/ocfs2/localalloc.c
54529@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
54530 goto bail;
54531 }
54532
54533- atomic_inc(&osb->alloc_stats.moves);
54534+ atomic_inc_unchecked(&osb->alloc_stats.moves);
54535
54536 bail:
54537 if (handle)
54538diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
54539index d355e6e..578d905 100644
54540--- a/fs/ocfs2/ocfs2.h
54541+++ b/fs/ocfs2/ocfs2.h
54542@@ -235,11 +235,11 @@ enum ocfs2_vol_state
54543
54544 struct ocfs2_alloc_stats
54545 {
54546- atomic_t moves;
54547- atomic_t local_data;
54548- atomic_t bitmap_data;
54549- atomic_t bg_allocs;
54550- atomic_t bg_extends;
54551+ atomic_unchecked_t moves;
54552+ atomic_unchecked_t local_data;
54553+ atomic_unchecked_t bitmap_data;
54554+ atomic_unchecked_t bg_allocs;
54555+ atomic_unchecked_t bg_extends;
54556 };
54557
54558 enum ocfs2_local_alloc_state
54559diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
54560index b7e74b5..19c6536 100644
54561--- a/fs/ocfs2/suballoc.c
54562+++ b/fs/ocfs2/suballoc.c
54563@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
54564 mlog_errno(status);
54565 goto bail;
54566 }
54567- atomic_inc(&osb->alloc_stats.bg_extends);
54568+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
54569
54570 /* You should never ask for this much metadata */
54571 BUG_ON(bits_wanted >
54572@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
54573 mlog_errno(status);
54574 goto bail;
54575 }
54576- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54577+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54578
54579 *suballoc_loc = res.sr_bg_blkno;
54580 *suballoc_bit_start = res.sr_bit_offset;
54581@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
54582 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
54583 res->sr_bits);
54584
54585- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54586+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54587
54588 BUG_ON(res->sr_bits != 1);
54589
54590@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
54591 mlog_errno(status);
54592 goto bail;
54593 }
54594- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54595+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54596
54597 BUG_ON(res.sr_bits != 1);
54598
54599@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54600 cluster_start,
54601 num_clusters);
54602 if (!status)
54603- atomic_inc(&osb->alloc_stats.local_data);
54604+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
54605 } else {
54606 if (min_clusters > (osb->bitmap_cpg - 1)) {
54607 /* The only paths asking for contiguousness
54608@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54609 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
54610 res.sr_bg_blkno,
54611 res.sr_bit_offset);
54612- atomic_inc(&osb->alloc_stats.bitmap_data);
54613+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
54614 *num_clusters = res.sr_bits;
54615 }
54616 }
54617diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
54618index 01b8516..579c4df 100644
54619--- a/fs/ocfs2/super.c
54620+++ b/fs/ocfs2/super.c
54621@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54622 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54623 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54624 "Stats",
54625- atomic_read(&osb->alloc_stats.bitmap_data),
54626- atomic_read(&osb->alloc_stats.local_data),
54627- atomic_read(&osb->alloc_stats.bg_allocs),
54628- atomic_read(&osb->alloc_stats.moves),
54629- atomic_read(&osb->alloc_stats.bg_extends));
54630+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54631+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54632+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54633+ atomic_read_unchecked(&osb->alloc_stats.moves),
54634+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54635
54636 out += snprintf(buf + out, len - out,
54637 "%10s => State: %u Descriptor: %llu Size: %u bits "
54638@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54639 spin_lock_init(&osb->osb_xattr_lock);
54640 ocfs2_init_steal_slots(osb);
54641
54642- atomic_set(&osb->alloc_stats.moves, 0);
54643- atomic_set(&osb->alloc_stats.local_data, 0);
54644- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54645- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54646- atomic_set(&osb->alloc_stats.bg_extends, 0);
54647+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54648+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54649+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54650+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54651+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54652
54653 /* Copy the blockcheck stats from the superblock probe */
54654 osb->osb_ecc_stats = *stats;
54655diff --git a/fs/open.c b/fs/open.c
54656index 8c74100..4239c48 100644
54657--- a/fs/open.c
54658+++ b/fs/open.c
54659@@ -32,6 +32,8 @@
54660 #include <linux/dnotify.h>
54661 #include <linux/compat.h>
54662
54663+#define CREATE_TRACE_POINTS
54664+#include <trace/events/fs.h>
54665 #include "internal.h"
54666
54667 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54668@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
54669 error = locks_verify_truncate(inode, NULL, length);
54670 if (!error)
54671 error = security_path_truncate(path);
54672+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54673+ error = -EACCES;
54674 if (!error)
54675 error = do_truncate(path->dentry, length, 0, NULL);
54676
54677@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54678 error = locks_verify_truncate(inode, f.file, length);
54679 if (!error)
54680 error = security_path_truncate(&f.file->f_path);
54681+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54682+ error = -EACCES;
54683 if (!error)
54684 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54685 sb_end_write(inode->i_sb);
54686@@ -360,6 +366,9 @@ retry:
54687 if (__mnt_is_readonly(path.mnt))
54688 res = -EROFS;
54689
54690+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54691+ res = -EACCES;
54692+
54693 out_path_release:
54694 path_put(&path);
54695 if (retry_estale(res, lookup_flags)) {
54696@@ -391,6 +400,8 @@ retry:
54697 if (error)
54698 goto dput_and_out;
54699
54700+ gr_log_chdir(path.dentry, path.mnt);
54701+
54702 set_fs_pwd(current->fs, &path);
54703
54704 dput_and_out:
54705@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54706 goto out_putf;
54707
54708 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54709+
54710+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54711+ error = -EPERM;
54712+
54713+ if (!error)
54714+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54715+
54716 if (!error)
54717 set_fs_pwd(current->fs, &f.file->f_path);
54718 out_putf:
54719@@ -449,7 +467,13 @@ retry:
54720 if (error)
54721 goto dput_and_out;
54722
54723+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54724+ goto dput_and_out;
54725+
54726 set_fs_root(current->fs, &path);
54727+
54728+ gr_handle_chroot_chdir(&path);
54729+
54730 error = 0;
54731 dput_and_out:
54732 path_put(&path);
54733@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
54734 if (error)
54735 return error;
54736 mutex_lock(&inode->i_mutex);
54737+
54738+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54739+ error = -EACCES;
54740+ goto out_unlock;
54741+ }
54742+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54743+ error = -EACCES;
54744+ goto out_unlock;
54745+ }
54746+
54747 error = security_path_chmod(path, mode);
54748 if (error)
54749 goto out_unlock;
54750@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54751 uid = make_kuid(current_user_ns(), user);
54752 gid = make_kgid(current_user_ns(), group);
54753
54754+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54755+ return -EACCES;
54756+
54757 newattrs.ia_valid = ATTR_CTIME;
54758 if (user != (uid_t) -1) {
54759 if (!uid_valid(uid))
54760@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54761 } else {
54762 fsnotify_open(f);
54763 fd_install(fd, f);
54764+ trace_do_sys_open(tmp->name, flags, mode);
54765 }
54766 }
54767 putname(tmp);
54768diff --git a/fs/pipe.c b/fs/pipe.c
54769index d2c45e1..009fe1c 100644
54770--- a/fs/pipe.c
54771+++ b/fs/pipe.c
54772@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
54773
54774 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
54775 {
54776- if (pipe->files)
54777+ if (atomic_read(&pipe->files))
54778 mutex_lock_nested(&pipe->mutex, subclass);
54779 }
54780
54781@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
54782
54783 void pipe_unlock(struct pipe_inode_info *pipe)
54784 {
54785- if (pipe->files)
54786+ if (atomic_read(&pipe->files))
54787 mutex_unlock(&pipe->mutex);
54788 }
54789 EXPORT_SYMBOL(pipe_unlock);
54790@@ -449,9 +449,9 @@ redo:
54791 }
54792 if (bufs) /* More to do? */
54793 continue;
54794- if (!pipe->writers)
54795+ if (!atomic_read(&pipe->writers))
54796 break;
54797- if (!pipe->waiting_writers) {
54798+ if (!atomic_read(&pipe->waiting_writers)) {
54799 /* syscall merging: Usually we must not sleep
54800 * if O_NONBLOCK is set, or if we got some data.
54801 * But if a writer sleeps in kernel space, then
54802@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54803 ret = 0;
54804 __pipe_lock(pipe);
54805
54806- if (!pipe->readers) {
54807+ if (!atomic_read(&pipe->readers)) {
54808 send_sig(SIGPIPE, current, 0);
54809 ret = -EPIPE;
54810 goto out;
54811@@ -562,7 +562,7 @@ redo1:
54812 for (;;) {
54813 int bufs;
54814
54815- if (!pipe->readers) {
54816+ if (!atomic_read(&pipe->readers)) {
54817 send_sig(SIGPIPE, current, 0);
54818 if (!ret)
54819 ret = -EPIPE;
54820@@ -653,9 +653,9 @@ redo2:
54821 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54822 do_wakeup = 0;
54823 }
54824- pipe->waiting_writers++;
54825+ atomic_inc(&pipe->waiting_writers);
54826 pipe_wait(pipe);
54827- pipe->waiting_writers--;
54828+ atomic_dec(&pipe->waiting_writers);
54829 }
54830 out:
54831 __pipe_unlock(pipe);
54832@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54833 mask = 0;
54834 if (filp->f_mode & FMODE_READ) {
54835 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54836- if (!pipe->writers && filp->f_version != pipe->w_counter)
54837+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54838 mask |= POLLHUP;
54839 }
54840
54841@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54842 * Most Unices do not set POLLERR for FIFOs but on Linux they
54843 * behave exactly like pipes for poll().
54844 */
54845- if (!pipe->readers)
54846+ if (!atomic_read(&pipe->readers))
54847 mask |= POLLERR;
54848 }
54849
54850@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
54851
54852 __pipe_lock(pipe);
54853 if (file->f_mode & FMODE_READ)
54854- pipe->readers--;
54855+ atomic_dec(&pipe->readers);
54856 if (file->f_mode & FMODE_WRITE)
54857- pipe->writers--;
54858+ atomic_dec(&pipe->writers);
54859
54860- if (pipe->readers || pipe->writers) {
54861+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
54862 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54863 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54864 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
54865 }
54866 spin_lock(&inode->i_lock);
54867- if (!--pipe->files) {
54868+ if (atomic_dec_and_test(&pipe->files)) {
54869 inode->i_pipe = NULL;
54870 kill = 1;
54871 }
54872@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
54873 kfree(pipe);
54874 }
54875
54876-static struct vfsmount *pipe_mnt __read_mostly;
54877+struct vfsmount *pipe_mnt __read_mostly;
54878
54879 /*
54880 * pipefs_dname() is called from d_path().
54881@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
54882 goto fail_iput;
54883
54884 inode->i_pipe = pipe;
54885- pipe->files = 2;
54886- pipe->readers = pipe->writers = 1;
54887+ atomic_set(&pipe->files, 2);
54888+ atomic_set(&pipe->readers, 1);
54889+ atomic_set(&pipe->writers, 1);
54890 inode->i_fop = &pipefifo_fops;
54891
54892 /*
54893@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
54894 spin_lock(&inode->i_lock);
54895 if (inode->i_pipe) {
54896 pipe = inode->i_pipe;
54897- pipe->files++;
54898+ atomic_inc(&pipe->files);
54899 spin_unlock(&inode->i_lock);
54900 } else {
54901 spin_unlock(&inode->i_lock);
54902 pipe = alloc_pipe_info();
54903 if (!pipe)
54904 return -ENOMEM;
54905- pipe->files = 1;
54906+ atomic_set(&pipe->files, 1);
54907 spin_lock(&inode->i_lock);
54908 if (unlikely(inode->i_pipe)) {
54909- inode->i_pipe->files++;
54910+ atomic_inc(&inode->i_pipe->files);
54911 spin_unlock(&inode->i_lock);
54912 free_pipe_info(pipe);
54913 pipe = inode->i_pipe;
54914@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
54915 * opened, even when there is no process writing the FIFO.
54916 */
54917 pipe->r_counter++;
54918- if (pipe->readers++ == 0)
54919+ if (atomic_inc_return(&pipe->readers) == 1)
54920 wake_up_partner(pipe);
54921
54922- if (!is_pipe && !pipe->writers) {
54923+ if (!is_pipe && !atomic_read(&pipe->writers)) {
54924 if ((filp->f_flags & O_NONBLOCK)) {
54925 /* suppress POLLHUP until we have
54926 * seen a writer */
54927@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
54928 * errno=ENXIO when there is no process reading the FIFO.
54929 */
54930 ret = -ENXIO;
54931- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
54932+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
54933 goto err;
54934
54935 pipe->w_counter++;
54936- if (!pipe->writers++)
54937+ if (atomic_inc_return(&pipe->writers) == 1)
54938 wake_up_partner(pipe);
54939
54940- if (!is_pipe && !pipe->readers) {
54941+ if (!is_pipe && !atomic_read(&pipe->readers)) {
54942 if (wait_for_partner(pipe, &pipe->r_counter))
54943 goto err_wr;
54944 }
54945@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
54946 * the process can at least talk to itself.
54947 */
54948
54949- pipe->readers++;
54950- pipe->writers++;
54951+ atomic_inc(&pipe->readers);
54952+ atomic_inc(&pipe->writers);
54953 pipe->r_counter++;
54954 pipe->w_counter++;
54955- if (pipe->readers == 1 || pipe->writers == 1)
54956+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
54957 wake_up_partner(pipe);
54958 break;
54959
54960@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
54961 return 0;
54962
54963 err_rd:
54964- if (!--pipe->readers)
54965+ if (atomic_dec_and_test(&pipe->readers))
54966 wake_up_interruptible(&pipe->wait);
54967 ret = -ERESTARTSYS;
54968 goto err;
54969
54970 err_wr:
54971- if (!--pipe->writers)
54972+ if (atomic_dec_and_test(&pipe->writers))
54973 wake_up_interruptible(&pipe->wait);
54974 ret = -ERESTARTSYS;
54975 goto err;
54976
54977 err:
54978 spin_lock(&inode->i_lock);
54979- if (!--pipe->files) {
54980+ if (atomic_dec_and_test(&pipe->files)) {
54981 inode->i_pipe = NULL;
54982 kill = 1;
54983 }
54984diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
54985index 15af622..0e9f4467 100644
54986--- a/fs/proc/Kconfig
54987+++ b/fs/proc/Kconfig
54988@@ -30,12 +30,12 @@ config PROC_FS
54989
54990 config PROC_KCORE
54991 bool "/proc/kcore support" if !ARM
54992- depends on PROC_FS && MMU
54993+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
54994
54995 config PROC_VMCORE
54996 bool "/proc/vmcore support"
54997- depends on PROC_FS && CRASH_DUMP
54998- default y
54999+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
55000+ default n
55001 help
55002 Exports the dump image of crashed kernel in ELF format.
55003
55004@@ -59,8 +59,8 @@ config PROC_SYSCTL
55005 limited in memory.
55006
55007 config PROC_PAGE_MONITOR
55008- default y
55009- depends on PROC_FS && MMU
55010+ default n
55011+ depends on PROC_FS && MMU && !GRKERNSEC
55012 bool "Enable /proc page monitoring" if EXPERT
55013 help
55014 Various /proc files exist to monitor process memory utilization:
55015diff --git a/fs/proc/array.c b/fs/proc/array.c
55016index cbd0f1b..adec3f0 100644
55017--- a/fs/proc/array.c
55018+++ b/fs/proc/array.c
55019@@ -60,6 +60,7 @@
55020 #include <linux/tty.h>
55021 #include <linux/string.h>
55022 #include <linux/mman.h>
55023+#include <linux/grsecurity.h>
55024 #include <linux/proc_fs.h>
55025 #include <linux/ioport.h>
55026 #include <linux/uaccess.h>
55027@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
55028 seq_putc(m, '\n');
55029 }
55030
55031+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55032+static inline void task_pax(struct seq_file *m, struct task_struct *p)
55033+{
55034+ if (p->mm)
55035+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
55036+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
55037+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
55038+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
55039+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
55040+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
55041+ else
55042+ seq_printf(m, "PaX:\t-----\n");
55043+}
55044+#endif
55045+
55046 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55047 struct pid *pid, struct task_struct *task)
55048 {
55049@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55050 task_cpus_allowed(m, task);
55051 cpuset_task_status_allowed(m, task);
55052 task_context_switch_counts(m, task);
55053+
55054+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55055+ task_pax(m, task);
55056+#endif
55057+
55058+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
55059+ task_grsec_rbac(m, task);
55060+#endif
55061+
55062 return 0;
55063 }
55064
55065+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55066+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55067+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55068+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55069+#endif
55070+
55071 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55072 struct pid *pid, struct task_struct *task, int whole)
55073 {
55074@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55075 char tcomm[sizeof(task->comm)];
55076 unsigned long flags;
55077
55078+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55079+ if (current->exec_id != m->exec_id) {
55080+ gr_log_badprocpid("stat");
55081+ return 0;
55082+ }
55083+#endif
55084+
55085 state = *get_task_state(task);
55086 vsize = eip = esp = 0;
55087 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55088@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55089 gtime = task_gtime(task);
55090 }
55091
55092+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55093+ if (PAX_RAND_FLAGS(mm)) {
55094+ eip = 0;
55095+ esp = 0;
55096+ wchan = 0;
55097+ }
55098+#endif
55099+#ifdef CONFIG_GRKERNSEC_HIDESYM
55100+ wchan = 0;
55101+ eip =0;
55102+ esp =0;
55103+#endif
55104+
55105 /* scale priority and nice values from timeslices to -20..20 */
55106 /* to make it look like a "normal" Unix priority/nice value */
55107 priority = task_prio(task);
55108@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55109 seq_put_decimal_ull(m, ' ', vsize);
55110 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
55111 seq_put_decimal_ull(m, ' ', rsslim);
55112+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55113+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
55114+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
55115+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
55116+#else
55117 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
55118 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
55119 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
55120+#endif
55121 seq_put_decimal_ull(m, ' ', esp);
55122 seq_put_decimal_ull(m, ' ', eip);
55123 /* The signal information here is obsolete.
55124@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55125 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
55126 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
55127
55128- if (mm && permitted) {
55129+ if (mm && permitted
55130+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55131+ && !PAX_RAND_FLAGS(mm)
55132+#endif
55133+ ) {
55134 seq_put_decimal_ull(m, ' ', mm->start_data);
55135 seq_put_decimal_ull(m, ' ', mm->end_data);
55136 seq_put_decimal_ull(m, ' ', mm->start_brk);
55137@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55138 struct pid *pid, struct task_struct *task)
55139 {
55140 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
55141- struct mm_struct *mm = get_task_mm(task);
55142+ struct mm_struct *mm;
55143
55144+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55145+ if (current->exec_id != m->exec_id) {
55146+ gr_log_badprocpid("statm");
55147+ return 0;
55148+ }
55149+#endif
55150+ mm = get_task_mm(task);
55151 if (mm) {
55152 size = task_statm(mm, &shared, &text, &data, &resident);
55153 mmput(mm);
55154@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55155 return 0;
55156 }
55157
55158+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55159+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
55160+{
55161+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
55162+}
55163+#endif
55164+
55165 #ifdef CONFIG_CHECKPOINT_RESTORE
55166 static struct pid *
55167 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
55168diff --git a/fs/proc/base.c b/fs/proc/base.c
55169index c3834da..b402b2b 100644
55170--- a/fs/proc/base.c
55171+++ b/fs/proc/base.c
55172@@ -113,6 +113,14 @@ struct pid_entry {
55173 union proc_op op;
55174 };
55175
55176+struct getdents_callback {
55177+ struct linux_dirent __user * current_dir;
55178+ struct linux_dirent __user * previous;
55179+ struct file * file;
55180+ int count;
55181+ int error;
55182+};
55183+
55184 #define NOD(NAME, MODE, IOP, FOP, OP) { \
55185 .name = (NAME), \
55186 .len = sizeof(NAME) - 1, \
55187@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
55188 if (!mm->arg_end)
55189 goto out_mm; /* Shh! No looking before we're done */
55190
55191+ if (gr_acl_handle_procpidmem(task))
55192+ goto out_mm;
55193+
55194 len = mm->arg_end - mm->arg_start;
55195
55196 if (len > PAGE_SIZE)
55197@@ -237,12 +248,28 @@ out:
55198 return res;
55199 }
55200
55201+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55202+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55203+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55204+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55205+#endif
55206+
55207 static int proc_pid_auxv(struct task_struct *task, char *buffer)
55208 {
55209 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
55210 int res = PTR_ERR(mm);
55211 if (mm && !IS_ERR(mm)) {
55212 unsigned int nwords = 0;
55213+
55214+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55215+ /* allow if we're currently ptracing this task */
55216+ if (PAX_RAND_FLAGS(mm) &&
55217+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
55218+ mmput(mm);
55219+ return 0;
55220+ }
55221+#endif
55222+
55223 do {
55224 nwords += 2;
55225 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
55226@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
55227 }
55228
55229
55230-#ifdef CONFIG_KALLSYMS
55231+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55232 /*
55233 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
55234 * Returns the resolved symbol. If that fails, simply return the address.
55235@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
55236 mutex_unlock(&task->signal->cred_guard_mutex);
55237 }
55238
55239-#ifdef CONFIG_STACKTRACE
55240+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55241
55242 #define MAX_STACK_TRACE_DEPTH 64
55243
55244@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
55245 return count;
55246 }
55247
55248-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55249+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55250 static int proc_pid_syscall(struct task_struct *task, char *buffer)
55251 {
55252 long nr;
55253@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
55254 /************************************************************************/
55255
55256 /* permission checks */
55257-static int proc_fd_access_allowed(struct inode *inode)
55258+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
55259 {
55260 struct task_struct *task;
55261 int allowed = 0;
55262@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
55263 */
55264 task = get_proc_task(inode);
55265 if (task) {
55266- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55267+ if (log)
55268+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55269+ else
55270+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55271 put_task_struct(task);
55272 }
55273 return allowed;
55274@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
55275 struct task_struct *task,
55276 int hide_pid_min)
55277 {
55278+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55279+ return false;
55280+
55281+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55282+ rcu_read_lock();
55283+ {
55284+ const struct cred *tmpcred = current_cred();
55285+ const struct cred *cred = __task_cred(task);
55286+
55287+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
55288+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55289+ || in_group_p(grsec_proc_gid)
55290+#endif
55291+ ) {
55292+ rcu_read_unlock();
55293+ return true;
55294+ }
55295+ }
55296+ rcu_read_unlock();
55297+
55298+ if (!pid->hide_pid)
55299+ return false;
55300+#endif
55301+
55302 if (pid->hide_pid < hide_pid_min)
55303 return true;
55304 if (in_group_p(pid->pid_gid))
55305 return true;
55306+
55307 return ptrace_may_access(task, PTRACE_MODE_READ);
55308 }
55309
55310@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
55311 put_task_struct(task);
55312
55313 if (!has_perms) {
55314+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55315+ {
55316+#else
55317 if (pid->hide_pid == 2) {
55318+#endif
55319 /*
55320 * Let's make getdents(), stat(), and open()
55321 * consistent with each other. If a process
55322@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55323 if (!task)
55324 return -ESRCH;
55325
55326+ if (gr_acl_handle_procpidmem(task)) {
55327+ put_task_struct(task);
55328+ return -EPERM;
55329+ }
55330+
55331 mm = mm_access(task, mode);
55332 put_task_struct(task);
55333
55334@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55335
55336 file->private_data = mm;
55337
55338+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55339+ file->f_version = current->exec_id;
55340+#endif
55341+
55342 return 0;
55343 }
55344
55345@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55346 ssize_t copied;
55347 char *page;
55348
55349+#ifdef CONFIG_GRKERNSEC
55350+ if (write)
55351+ return -EPERM;
55352+#endif
55353+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55354+ if (file->f_version != current->exec_id) {
55355+ gr_log_badprocpid("mem");
55356+ return 0;
55357+ }
55358+#endif
55359+
55360 if (!mm)
55361 return 0;
55362
55363@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55364 goto free;
55365
55366 while (count > 0) {
55367- int this_len = min_t(int, count, PAGE_SIZE);
55368+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
55369
55370 if (write && copy_from_user(page, buf, this_len)) {
55371 copied = -EFAULT;
55372@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55373 if (!mm)
55374 return 0;
55375
55376+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55377+ if (file->f_version != current->exec_id) {
55378+ gr_log_badprocpid("environ");
55379+ return 0;
55380+ }
55381+#endif
55382+
55383 page = (char *)__get_free_page(GFP_TEMPORARY);
55384 if (!page)
55385 return -ENOMEM;
55386@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55387 goto free;
55388 while (count > 0) {
55389 size_t this_len, max_len;
55390- int retval;
55391+ ssize_t retval;
55392
55393 if (src >= (mm->env_end - mm->env_start))
55394 break;
55395@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
55396 int error = -EACCES;
55397
55398 /* Are we allowed to snoop on the tasks file descriptors? */
55399- if (!proc_fd_access_allowed(inode))
55400+ if (!proc_fd_access_allowed(inode, 0))
55401 goto out;
55402
55403 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55404@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
55405 struct path path;
55406
55407 /* Are we allowed to snoop on the tasks file descriptors? */
55408- if (!proc_fd_access_allowed(inode))
55409- goto out;
55410+ /* logging this is needed for learning on chromium to work properly,
55411+ but we don't want to flood the logs from 'ps' which does a readlink
55412+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
55413+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
55414+ */
55415+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
55416+ if (!proc_fd_access_allowed(inode,0))
55417+ goto out;
55418+ } else {
55419+ if (!proc_fd_access_allowed(inode,1))
55420+ goto out;
55421+ }
55422
55423 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55424 if (error)
55425@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55426 rcu_read_lock();
55427 cred = __task_cred(task);
55428 inode->i_uid = cred->euid;
55429+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55430+ inode->i_gid = grsec_proc_gid;
55431+#else
55432 inode->i_gid = cred->egid;
55433+#endif
55434 rcu_read_unlock();
55435 }
55436 security_task_to_inode(task, inode);
55437@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55438 return -ENOENT;
55439 }
55440 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55441+#ifdef CONFIG_GRKERNSEC_PROC_USER
55442+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55443+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55444+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55445+#endif
55446 task_dumpable(task)) {
55447 cred = __task_cred(task);
55448 stat->uid = cred->euid;
55449+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55450+ stat->gid = grsec_proc_gid;
55451+#else
55452 stat->gid = cred->egid;
55453+#endif
55454 }
55455 }
55456 rcu_read_unlock();
55457@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
55458
55459 if (task) {
55460 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55461+#ifdef CONFIG_GRKERNSEC_PROC_USER
55462+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55463+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55464+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55465+#endif
55466 task_dumpable(task)) {
55467 rcu_read_lock();
55468 cred = __task_cred(task);
55469 inode->i_uid = cred->euid;
55470+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55471+ inode->i_gid = grsec_proc_gid;
55472+#else
55473 inode->i_gid = cred->egid;
55474+#endif
55475 rcu_read_unlock();
55476 } else {
55477 inode->i_uid = GLOBAL_ROOT_UID;
55478@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
55479 if (!task)
55480 goto out_no_task;
55481
55482+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55483+ goto out;
55484+
55485 /*
55486 * Yes, it does not scale. And it should not. Don't add
55487 * new entries into /proc/<tgid>/ without very good reasons.
55488@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
55489 if (!task)
55490 goto out_no_task;
55491
55492+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55493+ goto out;
55494+
55495 ret = 0;
55496 i = filp->f_pos;
55497 switch (i) {
55498@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
55499 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
55500 #endif
55501 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55502-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55503+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55504 INF("syscall", S_IRUGO, proc_pid_syscall),
55505 #endif
55506 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55507@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
55508 #ifdef CONFIG_SECURITY
55509 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55510 #endif
55511-#ifdef CONFIG_KALLSYMS
55512+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55513 INF("wchan", S_IRUGO, proc_pid_wchan),
55514 #endif
55515-#ifdef CONFIG_STACKTRACE
55516+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55517 ONE("stack", S_IRUGO, proc_pid_stack),
55518 #endif
55519 #ifdef CONFIG_SCHEDSTATS
55520@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
55521 #ifdef CONFIG_HARDWALL
55522 INF("hardwall", S_IRUGO, proc_pid_hardwall),
55523 #endif
55524+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55525+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
55526+#endif
55527 #ifdef CONFIG_USER_NS
55528 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
55529 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
55530@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
55531 if (!inode)
55532 goto out;
55533
55534+#ifdef CONFIG_GRKERNSEC_PROC_USER
55535+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
55536+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55537+ inode->i_gid = grsec_proc_gid;
55538+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
55539+#else
55540 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
55541+#endif
55542 inode->i_op = &proc_tgid_base_inode_operations;
55543 inode->i_fop = &proc_tgid_base_operations;
55544 inode->i_flags|=S_IMMUTABLE;
55545@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
55546 if (!task)
55547 goto out;
55548
55549+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55550+ goto out_put_task;
55551+
55552 result = proc_pid_instantiate(dir, dentry, task, NULL);
55553+out_put_task:
55554 put_task_struct(task);
55555 out:
55556 return result;
55557@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
55558 static int fake_filldir(void *buf, const char *name, int namelen,
55559 loff_t offset, u64 ino, unsigned d_type)
55560 {
55561+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
55562+ __buf->error = -EINVAL;
55563 return 0;
55564 }
55565
55566@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
55567 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
55568 #endif
55569 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55570-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55571+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55572 INF("syscall", S_IRUGO, proc_pid_syscall),
55573 #endif
55574 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55575@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
55576 #ifdef CONFIG_SECURITY
55577 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55578 #endif
55579-#ifdef CONFIG_KALLSYMS
55580+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55581 INF("wchan", S_IRUGO, proc_pid_wchan),
55582 #endif
55583-#ifdef CONFIG_STACKTRACE
55584+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55585 ONE("stack", S_IRUGO, proc_pid_stack),
55586 #endif
55587 #ifdef CONFIG_SCHEDSTATS
55588diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
55589index 82676e3..5f8518a 100644
55590--- a/fs/proc/cmdline.c
55591+++ b/fs/proc/cmdline.c
55592@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
55593
55594 static int __init proc_cmdline_init(void)
55595 {
55596+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55597+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
55598+#else
55599 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
55600+#endif
55601 return 0;
55602 }
55603 module_init(proc_cmdline_init);
55604diff --git a/fs/proc/devices.c b/fs/proc/devices.c
55605index b143471..bb105e5 100644
55606--- a/fs/proc/devices.c
55607+++ b/fs/proc/devices.c
55608@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
55609
55610 static int __init proc_devices_init(void)
55611 {
55612+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55613+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
55614+#else
55615 proc_create("devices", 0, NULL, &proc_devinfo_operations);
55616+#endif
55617 return 0;
55618 }
55619 module_init(proc_devices_init);
55620diff --git a/fs/proc/fd.c b/fs/proc/fd.c
55621index d7a4a28..0201742 100644
55622--- a/fs/proc/fd.c
55623+++ b/fs/proc/fd.c
55624@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
55625 if (!task)
55626 return -ENOENT;
55627
55628- files = get_files_struct(task);
55629+ if (!gr_acl_handle_procpidmem(task))
55630+ files = get_files_struct(task);
55631 put_task_struct(task);
55632
55633 if (files) {
55634@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
55635 */
55636 int proc_fd_permission(struct inode *inode, int mask)
55637 {
55638+ struct task_struct *task;
55639 int rv = generic_permission(inode, mask);
55640- if (rv == 0)
55641- return 0;
55642+
55643 if (task_pid(current) == proc_pid(inode))
55644 rv = 0;
55645+
55646+ task = get_proc_task(inode);
55647+ if (task == NULL)
55648+ return rv;
55649+
55650+ if (gr_acl_handle_procpidmem(task))
55651+ rv = -EACCES;
55652+
55653+ put_task_struct(task);
55654+
55655 return rv;
55656 }
55657
55658diff --git a/fs/proc/inode.c b/fs/proc/inode.c
55659index 073aea6..0630370 100644
55660--- a/fs/proc/inode.c
55661+++ b/fs/proc/inode.c
55662@@ -23,11 +23,17 @@
55663 #include <linux/slab.h>
55664 #include <linux/mount.h>
55665 #include <linux/magic.h>
55666+#include <linux/grsecurity.h>
55667
55668 #include <asm/uaccess.h>
55669
55670 #include "internal.h"
55671
55672+#ifdef CONFIG_PROC_SYSCTL
55673+extern const struct inode_operations proc_sys_inode_operations;
55674+extern const struct inode_operations proc_sys_dir_operations;
55675+#endif
55676+
55677 static void proc_evict_inode(struct inode *inode)
55678 {
55679 struct proc_dir_entry *de;
55680@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
55681 ns = PROC_I(inode)->ns.ns;
55682 if (ns_ops && ns)
55683 ns_ops->put(ns);
55684+
55685+#ifdef CONFIG_PROC_SYSCTL
55686+ if (inode->i_op == &proc_sys_inode_operations ||
55687+ inode->i_op == &proc_sys_dir_operations)
55688+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
55689+#endif
55690+
55691 }
55692
55693 static struct kmem_cache * proc_inode_cachep;
55694@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
55695 if (de->mode) {
55696 inode->i_mode = de->mode;
55697 inode->i_uid = de->uid;
55698+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55699+ inode->i_gid = grsec_proc_gid;
55700+#else
55701 inode->i_gid = de->gid;
55702+#endif
55703 }
55704 if (de->size)
55705 inode->i_size = de->size;
55706diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55707index d600fb0..3b495fe 100644
55708--- a/fs/proc/internal.h
55709+++ b/fs/proc/internal.h
55710@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
55711 struct pid *, struct task_struct *);
55712 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
55713 struct pid *, struct task_struct *);
55714+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55715+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55716+#endif
55717
55718 /*
55719 * base.c
55720diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55721index 0a22194..a9fc8c1 100644
55722--- a/fs/proc/kcore.c
55723+++ b/fs/proc/kcore.c
55724@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55725 * the addresses in the elf_phdr on our list.
55726 */
55727 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55728- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55729+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55730+ if (tsz > buflen)
55731 tsz = buflen;
55732-
55733+
55734 while (buflen) {
55735 struct kcore_list *m;
55736
55737@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55738 kfree(elf_buf);
55739 } else {
55740 if (kern_addr_valid(start)) {
55741- unsigned long n;
55742+ char *elf_buf;
55743+ mm_segment_t oldfs;
55744
55745- n = copy_to_user(buffer, (char *)start, tsz);
55746- /*
55747- * We cannot distinguish between fault on source
55748- * and fault on destination. When this happens
55749- * we clear too and hope it will trigger the
55750- * EFAULT again.
55751- */
55752- if (n) {
55753- if (clear_user(buffer + tsz - n,
55754- n))
55755+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55756+ if (!elf_buf)
55757+ return -ENOMEM;
55758+ oldfs = get_fs();
55759+ set_fs(KERNEL_DS);
55760+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55761+ set_fs(oldfs);
55762+ if (copy_to_user(buffer, elf_buf, tsz)) {
55763+ kfree(elf_buf);
55764 return -EFAULT;
55765+ }
55766 }
55767+ set_fs(oldfs);
55768+ kfree(elf_buf);
55769 } else {
55770 if (clear_user(buffer, tsz))
55771 return -EFAULT;
55772@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55773
55774 static int open_kcore(struct inode *inode, struct file *filp)
55775 {
55776+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55777+ return -EPERM;
55778+#endif
55779 if (!capable(CAP_SYS_RAWIO))
55780 return -EPERM;
55781 if (kcore_need_update)
55782diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55783index 5aa847a..f77c8d4 100644
55784--- a/fs/proc/meminfo.c
55785+++ b/fs/proc/meminfo.c
55786@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55787 vmi.used >> 10,
55788 vmi.largest_chunk >> 10
55789 #ifdef CONFIG_MEMORY_FAILURE
55790- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
55791+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
55792 #endif
55793 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55794 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55795diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55796index ccfd99b..1b7e255 100644
55797--- a/fs/proc/nommu.c
55798+++ b/fs/proc/nommu.c
55799@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55800 if (len < 1)
55801 len = 1;
55802 seq_printf(m, "%*c", len, ' ');
55803- seq_path(m, &file->f_path, "");
55804+ seq_path(m, &file->f_path, "\n\\");
55805 }
55806
55807 seq_putc(m, '\n');
55808diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55809index 986e832..6e8e859 100644
55810--- a/fs/proc/proc_net.c
55811+++ b/fs/proc/proc_net.c
55812@@ -23,6 +23,7 @@
55813 #include <linux/nsproxy.h>
55814 #include <net/net_namespace.h>
55815 #include <linux/seq_file.h>
55816+#include <linux/grsecurity.h>
55817
55818 #include "internal.h"
55819
55820@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55821 struct task_struct *task;
55822 struct nsproxy *ns;
55823 struct net *net = NULL;
55824+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55825+ const struct cred *cred = current_cred();
55826+#endif
55827+
55828+#ifdef CONFIG_GRKERNSEC_PROC_USER
55829+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55830+ return net;
55831+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55832+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55833+ return net;
55834+#endif
55835
55836 rcu_read_lock();
55837 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55838diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55839index ac05f33..1e6dc7e 100644
55840--- a/fs/proc/proc_sysctl.c
55841+++ b/fs/proc/proc_sysctl.c
55842@@ -13,11 +13,15 @@
55843 #include <linux/module.h>
55844 #include "internal.h"
55845
55846+extern int gr_handle_chroot_sysctl(const int op);
55847+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55848+ const int op);
55849+
55850 static const struct dentry_operations proc_sys_dentry_operations;
55851 static const struct file_operations proc_sys_file_operations;
55852-static const struct inode_operations proc_sys_inode_operations;
55853+const struct inode_operations proc_sys_inode_operations;
55854 static const struct file_operations proc_sys_dir_file_operations;
55855-static const struct inode_operations proc_sys_dir_operations;
55856+const struct inode_operations proc_sys_dir_operations;
55857
55858 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55859 {
55860@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55861
55862 err = NULL;
55863 d_set_d_op(dentry, &proc_sys_dentry_operations);
55864+
55865+ gr_handle_proc_create(dentry, inode);
55866+
55867 d_add(dentry, inode);
55868
55869 out:
55870@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55871 struct inode *inode = file_inode(filp);
55872 struct ctl_table_header *head = grab_header(inode);
55873 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55874+ int op = write ? MAY_WRITE : MAY_READ;
55875 ssize_t error;
55876 size_t res;
55877
55878@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55879 * and won't be until we finish.
55880 */
55881 error = -EPERM;
55882- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55883+ if (sysctl_perm(head, table, op))
55884 goto out;
55885
55886 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55887@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55888 if (!table->proc_handler)
55889 goto out;
55890
55891+#ifdef CONFIG_GRKERNSEC
55892+ error = -EPERM;
55893+ if (gr_handle_chroot_sysctl(op))
55894+ goto out;
55895+ dget(filp->f_path.dentry);
55896+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55897+ dput(filp->f_path.dentry);
55898+ goto out;
55899+ }
55900+ dput(filp->f_path.dentry);
55901+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55902+ goto out;
55903+ if (write && !capable(CAP_SYS_ADMIN))
55904+ goto out;
55905+#endif
55906+
55907 /* careful: calling conventions are nasty here */
55908 res = count;
55909 error = table->proc_handler(table, write, buf, &res, ppos);
55910@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55911 return -ENOMEM;
55912 } else {
55913 d_set_d_op(child, &proc_sys_dentry_operations);
55914+
55915+ gr_handle_proc_create(child, inode);
55916+
55917 d_add(child, inode);
55918 }
55919 } else {
55920@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55921 if ((*pos)++ < file->f_pos)
55922 return 0;
55923
55924+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55925+ return 0;
55926+
55927 if (unlikely(S_ISLNK(table->mode)))
55928 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55929 else
55930@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55931 if (IS_ERR(head))
55932 return PTR_ERR(head);
55933
55934+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55935+ return -ENOENT;
55936+
55937 generic_fillattr(inode, stat);
55938 if (table)
55939 stat->mode = (stat->mode & S_IFMT) | table->mode;
55940@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55941 .llseek = generic_file_llseek,
55942 };
55943
55944-static const struct inode_operations proc_sys_inode_operations = {
55945+const struct inode_operations proc_sys_inode_operations = {
55946 .permission = proc_sys_permission,
55947 .setattr = proc_sys_setattr,
55948 .getattr = proc_sys_getattr,
55949 };
55950
55951-static const struct inode_operations proc_sys_dir_operations = {
55952+const struct inode_operations proc_sys_dir_operations = {
55953 .lookup = proc_sys_lookup,
55954 .permission = proc_sys_permission,
55955 .setattr = proc_sys_setattr,
55956@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
55957 static struct ctl_dir *new_dir(struct ctl_table_set *set,
55958 const char *name, int namelen)
55959 {
55960- struct ctl_table *table;
55961+ ctl_table_no_const *table;
55962 struct ctl_dir *new;
55963 struct ctl_node *node;
55964 char *new_name;
55965@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
55966 return NULL;
55967
55968 node = (struct ctl_node *)(new + 1);
55969- table = (struct ctl_table *)(node + 1);
55970+ table = (ctl_table_no_const *)(node + 1);
55971 new_name = (char *)(table + 2);
55972 memcpy(new_name, name, namelen);
55973 new_name[namelen] = '\0';
55974@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
55975 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
55976 struct ctl_table_root *link_root)
55977 {
55978- struct ctl_table *link_table, *entry, *link;
55979+ ctl_table_no_const *link_table, *link;
55980+ struct ctl_table *entry;
55981 struct ctl_table_header *links;
55982 struct ctl_node *node;
55983 char *link_name;
55984@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
55985 return NULL;
55986
55987 node = (struct ctl_node *)(links + 1);
55988- link_table = (struct ctl_table *)(node + nr_entries);
55989+ link_table = (ctl_table_no_const *)(node + nr_entries);
55990 link_name = (char *)&link_table[nr_entries + 1];
55991
55992 for (link = link_table, entry = table; entry->procname; link++, entry++) {
55993@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
55994 struct ctl_table_header ***subheader, struct ctl_table_set *set,
55995 struct ctl_table *table)
55996 {
55997- struct ctl_table *ctl_table_arg = NULL;
55998- struct ctl_table *entry, *files;
55999+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
56000+ struct ctl_table *entry;
56001 int nr_files = 0;
56002 int nr_dirs = 0;
56003 int err = -ENOMEM;
56004@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56005 nr_files++;
56006 }
56007
56008- files = table;
56009 /* If there are mixed files and directories we need a new table */
56010 if (nr_dirs && nr_files) {
56011- struct ctl_table *new;
56012+ ctl_table_no_const *new;
56013 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
56014 GFP_KERNEL);
56015 if (!files)
56016@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56017 /* Register everything except a directory full of subdirectories */
56018 if (nr_files || !nr_dirs) {
56019 struct ctl_table_header *header;
56020- header = __register_sysctl_table(set, path, files);
56021+ header = __register_sysctl_table(set, path, files ? files : table);
56022 if (!header) {
56023 kfree(ctl_table_arg);
56024 goto out;
56025diff --git a/fs/proc/root.c b/fs/proc/root.c
56026index 41a6ea9..23eaa92 100644
56027--- a/fs/proc/root.c
56028+++ b/fs/proc/root.c
56029@@ -182,7 +182,15 @@ void __init proc_root_init(void)
56030 #ifdef CONFIG_PROC_DEVICETREE
56031 proc_device_tree_init();
56032 #endif
56033+#ifdef CONFIG_GRKERNSEC_PROC_ADD
56034+#ifdef CONFIG_GRKERNSEC_PROC_USER
56035+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
56036+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56037+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
56038+#endif
56039+#else
56040 proc_mkdir("bus", NULL);
56041+#endif
56042 proc_sys_init();
56043 }
56044
56045diff --git a/fs/proc/self.c b/fs/proc/self.c
56046index 6b6a993..807cccc 100644
56047--- a/fs/proc/self.c
56048+++ b/fs/proc/self.c
56049@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
56050 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
56051 void *cookie)
56052 {
56053- char *s = nd_get_link(nd);
56054+ const char *s = nd_get_link(nd);
56055 if (!IS_ERR(s))
56056 kfree(s);
56057 }
56058diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
56059index 3e636d8..83e3b71 100644
56060--- a/fs/proc/task_mmu.c
56061+++ b/fs/proc/task_mmu.c
56062@@ -11,12 +11,19 @@
56063 #include <linux/rmap.h>
56064 #include <linux/swap.h>
56065 #include <linux/swapops.h>
56066+#include <linux/grsecurity.h>
56067
56068 #include <asm/elf.h>
56069 #include <asm/uaccess.h>
56070 #include <asm/tlbflush.h>
56071 #include "internal.h"
56072
56073+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56074+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
56075+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
56076+ _mm->pax_flags & MF_PAX_SEGMEXEC))
56077+#endif
56078+
56079 void task_mem(struct seq_file *m, struct mm_struct *mm)
56080 {
56081 unsigned long data, text, lib, swap;
56082@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56083 "VmExe:\t%8lu kB\n"
56084 "VmLib:\t%8lu kB\n"
56085 "VmPTE:\t%8lu kB\n"
56086- "VmSwap:\t%8lu kB\n",
56087- hiwater_vm << (PAGE_SHIFT-10),
56088+ "VmSwap:\t%8lu kB\n"
56089+
56090+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56091+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
56092+#endif
56093+
56094+ ,hiwater_vm << (PAGE_SHIFT-10),
56095 total_vm << (PAGE_SHIFT-10),
56096 mm->locked_vm << (PAGE_SHIFT-10),
56097 mm->pinned_vm << (PAGE_SHIFT-10),
56098@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56099 data << (PAGE_SHIFT-10),
56100 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
56101 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
56102- swap << (PAGE_SHIFT-10));
56103+ swap << (PAGE_SHIFT-10)
56104+
56105+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56106+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56107+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
56108+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
56109+#else
56110+ , mm->context.user_cs_base
56111+ , mm->context.user_cs_limit
56112+#endif
56113+#endif
56114+
56115+ );
56116 }
56117
56118 unsigned long task_vsize(struct mm_struct *mm)
56119@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56120 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
56121 }
56122
56123- /* We don't show the stack guard page in /proc/maps */
56124+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56125+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
56126+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
56127+#else
56128 start = vma->vm_start;
56129- if (stack_guard_page_start(vma, start))
56130- start += PAGE_SIZE;
56131 end = vma->vm_end;
56132- if (stack_guard_page_end(vma, end))
56133- end -= PAGE_SIZE;
56134+#endif
56135
56136 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
56137 start,
56138@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56139 flags & VM_WRITE ? 'w' : '-',
56140 flags & VM_EXEC ? 'x' : '-',
56141 flags & VM_MAYSHARE ? 's' : 'p',
56142+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56143+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
56144+#else
56145 pgoff,
56146+#endif
56147 MAJOR(dev), MINOR(dev), ino, &len);
56148
56149 /*
56150@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56151 */
56152 if (file) {
56153 pad_len_spaces(m, len);
56154- seq_path(m, &file->f_path, "\n");
56155+ seq_path(m, &file->f_path, "\n\\");
56156 goto done;
56157 }
56158
56159@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56160 * Thread stack in /proc/PID/task/TID/maps or
56161 * the main process stack.
56162 */
56163- if (!is_pid || (vma->vm_start <= mm->start_stack &&
56164- vma->vm_end >= mm->start_stack)) {
56165+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
56166+ (vma->vm_start <= mm->start_stack &&
56167+ vma->vm_end >= mm->start_stack)) {
56168 name = "[stack]";
56169 } else {
56170 /* Thread stack in /proc/PID/maps */
56171@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
56172 struct proc_maps_private *priv = m->private;
56173 struct task_struct *task = priv->task;
56174
56175+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56176+ if (current->exec_id != m->exec_id) {
56177+ gr_log_badprocpid("maps");
56178+ return 0;
56179+ }
56180+#endif
56181+
56182 show_map_vma(m, vma, is_pid);
56183
56184 if (m->count < m->size) /* vma is copied successfully */
56185@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56186 .private = &mss,
56187 };
56188
56189+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56190+ if (current->exec_id != m->exec_id) {
56191+ gr_log_badprocpid("smaps");
56192+ return 0;
56193+ }
56194+#endif
56195 memset(&mss, 0, sizeof mss);
56196- mss.vma = vma;
56197- /* mmap_sem is held in m_start */
56198- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56199- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56200-
56201+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56202+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
56203+#endif
56204+ mss.vma = vma;
56205+ /* mmap_sem is held in m_start */
56206+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56207+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56208+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56209+ }
56210+#endif
56211 show_map_vma(m, vma, is_pid);
56212
56213 seq_printf(m,
56214@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56215 "KernelPageSize: %8lu kB\n"
56216 "MMUPageSize: %8lu kB\n"
56217 "Locked: %8lu kB\n",
56218+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56219+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
56220+#else
56221 (vma->vm_end - vma->vm_start) >> 10,
56222+#endif
56223 mss.resident >> 10,
56224 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
56225 mss.shared_clean >> 10,
56226@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56227 int n;
56228 char buffer[50];
56229
56230+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56231+ if (current->exec_id != m->exec_id) {
56232+ gr_log_badprocpid("numa_maps");
56233+ return 0;
56234+ }
56235+#endif
56236+
56237 if (!mm)
56238 return 0;
56239
56240@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56241 mpol_to_str(buffer, sizeof(buffer), pol);
56242 mpol_cond_put(pol);
56243
56244+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56245+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
56246+#else
56247 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
56248+#endif
56249
56250 if (file) {
56251 seq_printf(m, " file=");
56252- seq_path(m, &file->f_path, "\n\t= ");
56253+ seq_path(m, &file->f_path, "\n\t\\= ");
56254 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
56255 seq_printf(m, " heap");
56256 } else {
56257diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
56258index 56123a6..5a2f6ec 100644
56259--- a/fs/proc/task_nommu.c
56260+++ b/fs/proc/task_nommu.c
56261@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56262 else
56263 bytes += kobjsize(mm);
56264
56265- if (current->fs && current->fs->users > 1)
56266+ if (current->fs && atomic_read(&current->fs->users) > 1)
56267 sbytes += kobjsize(current->fs);
56268 else
56269 bytes += kobjsize(current->fs);
56270@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
56271
56272 if (file) {
56273 pad_len_spaces(m, len);
56274- seq_path(m, &file->f_path, "");
56275+ seq_path(m, &file->f_path, "\n\\");
56276 } else if (mm) {
56277 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
56278
56279diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
56280index 17f7e08..e4b1529 100644
56281--- a/fs/proc/vmcore.c
56282+++ b/fs/proc/vmcore.c
56283@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
56284 nr_bytes = count;
56285
56286 /* If pfn is not ram, return zeros for sparse dump files */
56287- if (pfn_is_ram(pfn) == 0)
56288- memset(buf, 0, nr_bytes);
56289- else {
56290+ if (pfn_is_ram(pfn) == 0) {
56291+ if (userbuf) {
56292+ if (clear_user((char __force_user *)buf, nr_bytes))
56293+ return -EFAULT;
56294+ } else
56295+ memset(buf, 0, nr_bytes);
56296+ } else {
56297 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
56298 offset, userbuf);
56299 if (tmp < 0)
56300@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
56301 if (tsz > nr_bytes)
56302 tsz = nr_bytes;
56303
56304- tmp = read_from_oldmem(buffer, tsz, &start, 1);
56305+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
56306 if (tmp < 0)
56307 return tmp;
56308 buflen -= tsz;
56309diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
56310index b00fcc9..e0c6381 100644
56311--- a/fs/qnx6/qnx6.h
56312+++ b/fs/qnx6/qnx6.h
56313@@ -74,7 +74,7 @@ enum {
56314 BYTESEX_BE,
56315 };
56316
56317-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56318+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56319 {
56320 if (sbi->s_bytesex == BYTESEX_LE)
56321 return le64_to_cpu((__force __le64)n);
56322@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
56323 return (__force __fs64)cpu_to_be64(n);
56324 }
56325
56326-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56327+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56328 {
56329 if (sbi->s_bytesex == BYTESEX_LE)
56330 return le32_to_cpu((__force __le32)n);
56331diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
56332index 16e8abb..2dcf914 100644
56333--- a/fs/quota/netlink.c
56334+++ b/fs/quota/netlink.c
56335@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
56336 void quota_send_warning(struct kqid qid, dev_t dev,
56337 const char warntype)
56338 {
56339- static atomic_t seq;
56340+ static atomic_unchecked_t seq;
56341 struct sk_buff *skb;
56342 void *msg_head;
56343 int ret;
56344@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
56345 "VFS: Not enough memory to send quota warning.\n");
56346 return;
56347 }
56348- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
56349+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
56350 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
56351 if (!msg_head) {
56352 printk(KERN_ERR
56353diff --git a/fs/read_write.c b/fs/read_write.c
56354index 2cefa41..c7e2fe0 100644
56355--- a/fs/read_write.c
56356+++ b/fs/read_write.c
56357@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
56358
56359 old_fs = get_fs();
56360 set_fs(get_ds());
56361- p = (__force const char __user *)buf;
56362+ p = (const char __force_user *)buf;
56363 if (count > MAX_RW_COUNT)
56364 count = MAX_RW_COUNT;
56365 if (file->f_op->write)
56366diff --git a/fs/readdir.c b/fs/readdir.c
56367index fee38e0..12fdf47 100644
56368--- a/fs/readdir.c
56369+++ b/fs/readdir.c
56370@@ -17,6 +17,7 @@
56371 #include <linux/security.h>
56372 #include <linux/syscalls.h>
56373 #include <linux/unistd.h>
56374+#include <linux/namei.h>
56375
56376 #include <asm/uaccess.h>
56377
56378@@ -67,6 +68,7 @@ struct old_linux_dirent {
56379
56380 struct readdir_callback {
56381 struct old_linux_dirent __user * dirent;
56382+ struct file * file;
56383 int result;
56384 };
56385
56386@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
56387 buf->result = -EOVERFLOW;
56388 return -EOVERFLOW;
56389 }
56390+
56391+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56392+ return 0;
56393+
56394 buf->result++;
56395 dirent = buf->dirent;
56396 if (!access_ok(VERIFY_WRITE, dirent,
56397@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
56398
56399 buf.result = 0;
56400 buf.dirent = dirent;
56401+ buf.file = f.file;
56402
56403 error = vfs_readdir(f.file, fillonedir, &buf);
56404 if (buf.result)
56405@@ -139,6 +146,7 @@ struct linux_dirent {
56406 struct getdents_callback {
56407 struct linux_dirent __user * current_dir;
56408 struct linux_dirent __user * previous;
56409+ struct file * file;
56410 int count;
56411 int error;
56412 };
56413@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
56414 buf->error = -EOVERFLOW;
56415 return -EOVERFLOW;
56416 }
56417+
56418+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56419+ return 0;
56420+
56421 dirent = buf->previous;
56422 if (dirent) {
56423 if (__put_user(offset, &dirent->d_off))
56424@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56425 buf.previous = NULL;
56426 buf.count = count;
56427 buf.error = 0;
56428+ buf.file = f.file;
56429
56430 error = vfs_readdir(f.file, filldir, &buf);
56431 if (error >= 0)
56432@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56433 struct getdents_callback64 {
56434 struct linux_dirent64 __user * current_dir;
56435 struct linux_dirent64 __user * previous;
56436+ struct file *file;
56437 int count;
56438 int error;
56439 };
56440@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
56441 buf->error = -EINVAL; /* only used if we fail.. */
56442 if (reclen > buf->count)
56443 return -EINVAL;
56444+
56445+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56446+ return 0;
56447+
56448 dirent = buf->previous;
56449 if (dirent) {
56450 if (__put_user(offset, &dirent->d_off))
56451@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56452
56453 buf.current_dir = dirent;
56454 buf.previous = NULL;
56455+ buf.file = f.file;
56456 buf.count = count;
56457 buf.error = 0;
56458
56459@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56460 error = buf.error;
56461 lastdirent = buf.previous;
56462 if (lastdirent) {
56463- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56464+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56465 if (__put_user(d_off, &lastdirent->d_off))
56466 error = -EFAULT;
56467 else
56468diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56469index 2b7882b..1c5ef48 100644
56470--- a/fs/reiserfs/do_balan.c
56471+++ b/fs/reiserfs/do_balan.c
56472@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
56473 return;
56474 }
56475
56476- atomic_inc(&(fs_generation(tb->tb_sb)));
56477+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
56478 do_balance_starts(tb);
56479
56480 /* balance leaf returns 0 except if combining L R and S into
56481diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
56482index 33532f7..4846ade 100644
56483--- a/fs/reiserfs/procfs.c
56484+++ b/fs/reiserfs/procfs.c
56485@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
56486 "SMALL_TAILS " : "NO_TAILS ",
56487 replay_only(sb) ? "REPLAY_ONLY " : "",
56488 convert_reiserfs(sb) ? "CONV " : "",
56489- atomic_read(&r->s_generation_counter),
56490+ atomic_read_unchecked(&r->s_generation_counter),
56491 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
56492 SF(s_do_balance), SF(s_unneeded_left_neighbor),
56493 SF(s_good_search_by_key_reada), SF(s_bmaps),
56494diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
56495index 157e474..65a6114 100644
56496--- a/fs/reiserfs/reiserfs.h
56497+++ b/fs/reiserfs/reiserfs.h
56498@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
56499 /* Comment? -Hans */
56500 wait_queue_head_t s_wait;
56501 /* To be obsoleted soon by per buffer seals.. -Hans */
56502- atomic_t s_generation_counter; // increased by one every time the
56503+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56504 // tree gets re-balanced
56505 unsigned long s_properties; /* File system properties. Currently holds
56506 on-disk FS format */
56507@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
56508 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56509
56510 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56511-#define get_generation(s) atomic_read (&fs_generation(s))
56512+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56513 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56514 #define __fs_changed(gen,s) (gen != get_generation (s))
56515 #define fs_changed(gen,s) \
56516diff --git a/fs/select.c b/fs/select.c
56517index 8c1c96c..a0f9b6d 100644
56518--- a/fs/select.c
56519+++ b/fs/select.c
56520@@ -20,6 +20,7 @@
56521 #include <linux/export.h>
56522 #include <linux/slab.h>
56523 #include <linux/poll.h>
56524+#include <linux/security.h>
56525 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
56526 #include <linux/file.h>
56527 #include <linux/fdtable.h>
56528@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
56529 struct poll_list *walk = head;
56530 unsigned long todo = nfds;
56531
56532+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
56533 if (nfds > rlimit(RLIMIT_NOFILE))
56534 return -EINVAL;
56535
56536diff --git a/fs/seq_file.c b/fs/seq_file.c
56537index 774c1eb..b67582a 100644
56538--- a/fs/seq_file.c
56539+++ b/fs/seq_file.c
56540@@ -10,6 +10,7 @@
56541 #include <linux/seq_file.h>
56542 #include <linux/slab.h>
56543 #include <linux/cred.h>
56544+#include <linux/sched.h>
56545
56546 #include <asm/uaccess.h>
56547 #include <asm/page.h>
56548@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56549 #ifdef CONFIG_USER_NS
56550 p->user_ns = file->f_cred->user_ns;
56551 #endif
56552+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56553+ p->exec_id = current->exec_id;
56554+#endif
56555
56556 /*
56557 * Wrappers around seq_open(e.g. swaps_open) need to be
56558@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56559 return 0;
56560 }
56561 if (!m->buf) {
56562- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56563+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56564 if (!m->buf)
56565 return -ENOMEM;
56566 }
56567@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56568 Eoverflow:
56569 m->op->stop(m, p);
56570 kfree(m->buf);
56571- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56572+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56573 return !m->buf ? -ENOMEM : -EAGAIN;
56574 }
56575
56576@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56577
56578 /* grab buffer if we didn't have one */
56579 if (!m->buf) {
56580- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56581+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56582 if (!m->buf)
56583 goto Enomem;
56584 }
56585@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56586 goto Fill;
56587 m->op->stop(m, p);
56588 kfree(m->buf);
56589- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56590+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56591 if (!m->buf)
56592 goto Enomem;
56593 m->count = 0;
56594@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
56595 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
56596 void *data)
56597 {
56598- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
56599+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
56600 int res = -ENOMEM;
56601
56602 if (op) {
56603diff --git a/fs/splice.c b/fs/splice.c
56604index d37431d..81c3044 100644
56605--- a/fs/splice.c
56606+++ b/fs/splice.c
56607@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56608 pipe_lock(pipe);
56609
56610 for (;;) {
56611- if (!pipe->readers) {
56612+ if (!atomic_read(&pipe->readers)) {
56613 send_sig(SIGPIPE, current, 0);
56614 if (!ret)
56615 ret = -EPIPE;
56616@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56617 page_nr++;
56618 ret += buf->len;
56619
56620- if (pipe->files)
56621+ if (atomic_read(&pipe->files))
56622 do_wakeup = 1;
56623
56624 if (!--spd->nr_pages)
56625@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56626 do_wakeup = 0;
56627 }
56628
56629- pipe->waiting_writers++;
56630+ atomic_inc(&pipe->waiting_writers);
56631 pipe_wait(pipe);
56632- pipe->waiting_writers--;
56633+ atomic_dec(&pipe->waiting_writers);
56634 }
56635
56636 pipe_unlock(pipe);
56637@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
56638 old_fs = get_fs();
56639 set_fs(get_ds());
56640 /* The cast to a user pointer is valid due to the set_fs() */
56641- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
56642+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
56643 set_fs(old_fs);
56644
56645 return res;
56646@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
56647 old_fs = get_fs();
56648 set_fs(get_ds());
56649 /* The cast to a user pointer is valid due to the set_fs() */
56650- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
56651+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
56652 set_fs(old_fs);
56653
56654 return res;
56655@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
56656 goto err;
56657
56658 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
56659- vec[i].iov_base = (void __user *) page_address(page);
56660+ vec[i].iov_base = (void __force_user *) page_address(page);
56661 vec[i].iov_len = this_len;
56662 spd.pages[i] = page;
56663 spd.nr_pages++;
56664@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
56665 ops->release(pipe, buf);
56666 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
56667 pipe->nrbufs--;
56668- if (pipe->files)
56669+ if (atomic_read(&pipe->files))
56670 sd->need_wakeup = true;
56671 }
56672
56673@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
56674 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
56675 {
56676 while (!pipe->nrbufs) {
56677- if (!pipe->writers)
56678+ if (!atomic_read(&pipe->writers))
56679 return 0;
56680
56681- if (!pipe->waiting_writers && sd->num_spliced)
56682+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
56683 return 0;
56684
56685 if (sd->flags & SPLICE_F_NONBLOCK)
56686@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
56687 * out of the pipe right after the splice_to_pipe(). So set
56688 * PIPE_READERS appropriately.
56689 */
56690- pipe->readers = 1;
56691+ atomic_set(&pipe->readers, 1);
56692
56693 current->splice_pipe = pipe;
56694 }
56695@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56696 ret = -ERESTARTSYS;
56697 break;
56698 }
56699- if (!pipe->writers)
56700+ if (!atomic_read(&pipe->writers))
56701 break;
56702- if (!pipe->waiting_writers) {
56703+ if (!atomic_read(&pipe->waiting_writers)) {
56704 if (flags & SPLICE_F_NONBLOCK) {
56705 ret = -EAGAIN;
56706 break;
56707@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56708 pipe_lock(pipe);
56709
56710 while (pipe->nrbufs >= pipe->buffers) {
56711- if (!pipe->readers) {
56712+ if (!atomic_read(&pipe->readers)) {
56713 send_sig(SIGPIPE, current, 0);
56714 ret = -EPIPE;
56715 break;
56716@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56717 ret = -ERESTARTSYS;
56718 break;
56719 }
56720- pipe->waiting_writers++;
56721+ atomic_inc(&pipe->waiting_writers);
56722 pipe_wait(pipe);
56723- pipe->waiting_writers--;
56724+ atomic_dec(&pipe->waiting_writers);
56725 }
56726
56727 pipe_unlock(pipe);
56728@@ -1854,14 +1854,14 @@ retry:
56729 pipe_double_lock(ipipe, opipe);
56730
56731 do {
56732- if (!opipe->readers) {
56733+ if (!atomic_read(&opipe->readers)) {
56734 send_sig(SIGPIPE, current, 0);
56735 if (!ret)
56736 ret = -EPIPE;
56737 break;
56738 }
56739
56740- if (!ipipe->nrbufs && !ipipe->writers)
56741+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
56742 break;
56743
56744 /*
56745@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56746 pipe_double_lock(ipipe, opipe);
56747
56748 do {
56749- if (!opipe->readers) {
56750+ if (!atomic_read(&opipe->readers)) {
56751 send_sig(SIGPIPE, current, 0);
56752 if (!ret)
56753 ret = -EPIPE;
56754@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56755 * return EAGAIN if we have the potential of some data in the
56756 * future, otherwise just return 0
56757 */
56758- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
56759+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
56760 ret = -EAGAIN;
56761
56762 pipe_unlock(ipipe);
56763diff --git a/fs/stat.c b/fs/stat.c
56764index 04ce1ac..a13dd1e 100644
56765--- a/fs/stat.c
56766+++ b/fs/stat.c
56767@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56768 stat->gid = inode->i_gid;
56769 stat->rdev = inode->i_rdev;
56770 stat->size = i_size_read(inode);
56771- stat->atime = inode->i_atime;
56772- stat->mtime = inode->i_mtime;
56773+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56774+ stat->atime = inode->i_ctime;
56775+ stat->mtime = inode->i_ctime;
56776+ } else {
56777+ stat->atime = inode->i_atime;
56778+ stat->mtime = inode->i_mtime;
56779+ }
56780 stat->ctime = inode->i_ctime;
56781 stat->blksize = (1 << inode->i_blkbits);
56782 stat->blocks = inode->i_blocks;
56783@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
56784 if (retval)
56785 return retval;
56786
56787- if (inode->i_op->getattr)
56788- return inode->i_op->getattr(path->mnt, path->dentry, stat);
56789+ if (inode->i_op->getattr) {
56790+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
56791+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56792+ stat->atime = stat->ctime;
56793+ stat->mtime = stat->ctime;
56794+ }
56795+ return retval;
56796+ }
56797
56798 generic_fillattr(inode, stat);
56799 return 0;
56800diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
56801index 15c68f9..36a8b3e 100644
56802--- a/fs/sysfs/bin.c
56803+++ b/fs/sysfs/bin.c
56804@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
56805 return ret;
56806 }
56807
56808-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
56809- void *buf, int len, int write)
56810+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
56811+ void *buf, size_t len, int write)
56812 {
56813 struct file *file = vma->vm_file;
56814 struct bin_buffer *bb = file->private_data;
56815 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
56816- int ret;
56817+ ssize_t ret;
56818
56819 if (!bb->vm_ops)
56820 return -EINVAL;
56821diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56822index e8e0e71..3ca455a 100644
56823--- a/fs/sysfs/dir.c
56824+++ b/fs/sysfs/dir.c
56825@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
56826 *
56827 * Returns 31 bit hash of ns + name (so it fits in an off_t )
56828 */
56829-static unsigned int sysfs_name_hash(const void *ns, const char *name)
56830+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
56831 {
56832 unsigned long hash = init_name_hash();
56833 unsigned int len = strlen(name);
56834@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56835 struct sysfs_dirent *sd;
56836 int rc;
56837
56838+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56839+ const char *parent_name = parent_sd->s_name;
56840+
56841+ mode = S_IFDIR | S_IRWXU;
56842+
56843+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56844+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56845+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse"))) ||
56846+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56847+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56848+#endif
56849+
56850 /* allocate */
56851 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56852 if (!sd)
56853diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56854index 602f56d..6853db8 100644
56855--- a/fs/sysfs/file.c
56856+++ b/fs/sysfs/file.c
56857@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56858
56859 struct sysfs_open_dirent {
56860 atomic_t refcnt;
56861- atomic_t event;
56862+ atomic_unchecked_t event;
56863 wait_queue_head_t poll;
56864 struct list_head buffers; /* goes through sysfs_buffer.list */
56865 };
56866@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56867 if (!sysfs_get_active(attr_sd))
56868 return -ENODEV;
56869
56870- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56871+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56872 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56873
56874 sysfs_put_active(attr_sd);
56875@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56876 return -ENOMEM;
56877
56878 atomic_set(&new_od->refcnt, 0);
56879- atomic_set(&new_od->event, 1);
56880+ atomic_set_unchecked(&new_od->event, 1);
56881 init_waitqueue_head(&new_od->poll);
56882 INIT_LIST_HEAD(&new_od->buffers);
56883 goto retry;
56884@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56885
56886 sysfs_put_active(attr_sd);
56887
56888- if (buffer->event != atomic_read(&od->event))
56889+ if (buffer->event != atomic_read_unchecked(&od->event))
56890 goto trigger;
56891
56892 return DEFAULT_POLLMASK;
56893@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
56894
56895 od = sd->s_attr.open;
56896 if (od) {
56897- atomic_inc(&od->event);
56898+ atomic_inc_unchecked(&od->event);
56899 wake_up_interruptible(&od->poll);
56900 }
56901
56902diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
56903index 8c940df..25b733e 100644
56904--- a/fs/sysfs/symlink.c
56905+++ b/fs/sysfs/symlink.c
56906@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
56907
56908 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
56909 {
56910- char *page = nd_get_link(nd);
56911+ const char *page = nd_get_link(nd);
56912 if (!IS_ERR(page))
56913 free_page((unsigned long)page);
56914 }
56915diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
56916index 69d4889..a810bd4 100644
56917--- a/fs/sysv/sysv.h
56918+++ b/fs/sysv/sysv.h
56919@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
56920 #endif
56921 }
56922
56923-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56924+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
56925 {
56926 if (sbi->s_bytesex == BYTESEX_PDP)
56927 return PDP_swab((__force __u32)n);
56928diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
56929index e18b988..f1d4ad0f 100644
56930--- a/fs/ubifs/io.c
56931+++ b/fs/ubifs/io.c
56932@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
56933 return err;
56934 }
56935
56936-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56937+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
56938 {
56939 int err;
56940
56941diff --git a/fs/udf/misc.c b/fs/udf/misc.c
56942index c175b4d..8f36a16 100644
56943--- a/fs/udf/misc.c
56944+++ b/fs/udf/misc.c
56945@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
56946
56947 u8 udf_tag_checksum(const struct tag *t)
56948 {
56949- u8 *data = (u8 *)t;
56950+ const u8 *data = (const u8 *)t;
56951 u8 checksum = 0;
56952 int i;
56953 for (i = 0; i < sizeof(struct tag); ++i)
56954diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
56955index 8d974c4..b82f6ec 100644
56956--- a/fs/ufs/swab.h
56957+++ b/fs/ufs/swab.h
56958@@ -22,7 +22,7 @@ enum {
56959 BYTESEX_BE
56960 };
56961
56962-static inline u64
56963+static inline u64 __intentional_overflow(-1)
56964 fs64_to_cpu(struct super_block *sbp, __fs64 n)
56965 {
56966 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56967@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
56968 return (__force __fs64)cpu_to_be64(n);
56969 }
56970
56971-static inline u32
56972+static inline u32 __intentional_overflow(-1)
56973 fs32_to_cpu(struct super_block *sbp, __fs32 n)
56974 {
56975 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
56976diff --git a/fs/utimes.c b/fs/utimes.c
56977index f4fb7ec..3fe03c0 100644
56978--- a/fs/utimes.c
56979+++ b/fs/utimes.c
56980@@ -1,6 +1,7 @@
56981 #include <linux/compiler.h>
56982 #include <linux/file.h>
56983 #include <linux/fs.h>
56984+#include <linux/security.h>
56985 #include <linux/linkage.h>
56986 #include <linux/mount.h>
56987 #include <linux/namei.h>
56988@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
56989 goto mnt_drop_write_and_out;
56990 }
56991 }
56992+
56993+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
56994+ error = -EACCES;
56995+ goto mnt_drop_write_and_out;
56996+ }
56997+
56998 mutex_lock(&inode->i_mutex);
56999 error = notify_change(path->dentry, &newattrs);
57000 mutex_unlock(&inode->i_mutex);
57001diff --git a/fs/xattr.c b/fs/xattr.c
57002index 3377dff..4d074d9 100644
57003--- a/fs/xattr.c
57004+++ b/fs/xattr.c
57005@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
57006 return rc;
57007 }
57008
57009+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
57010+ssize_t
57011+pax_getxattr(struct dentry *dentry, void *value, size_t size)
57012+{
57013+ struct inode *inode = dentry->d_inode;
57014+ ssize_t error;
57015+
57016+ error = inode_permission(inode, MAY_EXEC);
57017+ if (error)
57018+ return error;
57019+
57020+ if (inode->i_op->getxattr)
57021+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
57022+ else
57023+ error = -EOPNOTSUPP;
57024+
57025+ return error;
57026+}
57027+EXPORT_SYMBOL(pax_getxattr);
57028+#endif
57029+
57030 ssize_t
57031 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
57032 {
57033@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
57034 * Extended attribute SET operations
57035 */
57036 static long
57037-setxattr(struct dentry *d, const char __user *name, const void __user *value,
57038+setxattr(struct path *path, const char __user *name, const void __user *value,
57039 size_t size, int flags)
57040 {
57041 int error;
57042@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
57043 posix_acl_fix_xattr_from_user(kvalue, size);
57044 }
57045
57046- error = vfs_setxattr(d, kname, kvalue, size, flags);
57047+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
57048+ error = -EACCES;
57049+ goto out;
57050+ }
57051+
57052+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
57053 out:
57054 if (vvalue)
57055 vfree(vvalue);
57056@@ -377,7 +403,7 @@ retry:
57057 return error;
57058 error = mnt_want_write(path.mnt);
57059 if (!error) {
57060- error = setxattr(path.dentry, name, value, size, flags);
57061+ error = setxattr(&path, name, value, size, flags);
57062 mnt_drop_write(path.mnt);
57063 }
57064 path_put(&path);
57065@@ -401,7 +427,7 @@ retry:
57066 return error;
57067 error = mnt_want_write(path.mnt);
57068 if (!error) {
57069- error = setxattr(path.dentry, name, value, size, flags);
57070+ error = setxattr(&path, name, value, size, flags);
57071 mnt_drop_write(path.mnt);
57072 }
57073 path_put(&path);
57074@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
57075 const void __user *,value, size_t, size, int, flags)
57076 {
57077 struct fd f = fdget(fd);
57078- struct dentry *dentry;
57079 int error = -EBADF;
57080
57081 if (!f.file)
57082 return error;
57083- dentry = f.file->f_path.dentry;
57084- audit_inode(NULL, dentry, 0);
57085+ audit_inode(NULL, f.file->f_path.dentry, 0);
57086 error = mnt_want_write_file(f.file);
57087 if (!error) {
57088- error = setxattr(dentry, name, value, size, flags);
57089+ error = setxattr(&f.file->f_path, name, value, size, flags);
57090 mnt_drop_write_file(f.file);
57091 }
57092 fdput(f);
57093diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
57094index 9fbea87..6b19972 100644
57095--- a/fs/xattr_acl.c
57096+++ b/fs/xattr_acl.c
57097@@ -76,8 +76,8 @@ struct posix_acl *
57098 posix_acl_from_xattr(struct user_namespace *user_ns,
57099 const void *value, size_t size)
57100 {
57101- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
57102- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
57103+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
57104+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
57105 int count;
57106 struct posix_acl *acl;
57107 struct posix_acl_entry *acl_e;
57108diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
57109index 8904284..ee0e14b 100644
57110--- a/fs/xfs/xfs_bmap.c
57111+++ b/fs/xfs/xfs_bmap.c
57112@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
57113
57114 #else
57115 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
57116-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
57117+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
57118 #endif /* DEBUG */
57119
57120 /*
57121diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
57122index 6157424..ac98f6d 100644
57123--- a/fs/xfs/xfs_dir2_sf.c
57124+++ b/fs/xfs/xfs_dir2_sf.c
57125@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
57126 }
57127
57128 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
57129- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57130+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
57131+ char name[sfep->namelen];
57132+ memcpy(name, sfep->name, sfep->namelen);
57133+ if (filldir(dirent, name, sfep->namelen,
57134+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
57135+ *offset = off & 0x7fffffff;
57136+ return 0;
57137+ }
57138+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57139 off & 0x7fffffff, ino, DT_UNKNOWN)) {
57140 *offset = off & 0x7fffffff;
57141 return 0;
57142diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
57143index 5e99968..45bd327 100644
57144--- a/fs/xfs/xfs_ioctl.c
57145+++ b/fs/xfs/xfs_ioctl.c
57146@@ -127,7 +127,7 @@ xfs_find_handle(
57147 }
57148
57149 error = -EFAULT;
57150- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
57151+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
57152 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
57153 goto out_put;
57154
57155diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
57156index ca9ecaa..60100c7 100644
57157--- a/fs/xfs/xfs_iops.c
57158+++ b/fs/xfs/xfs_iops.c
57159@@ -395,7 +395,7 @@ xfs_vn_put_link(
57160 struct nameidata *nd,
57161 void *p)
57162 {
57163- char *s = nd_get_link(nd);
57164+ const char *s = nd_get_link(nd);
57165
57166 if (!IS_ERR(s))
57167 kfree(s);
57168diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
57169new file mode 100644
57170index 0000000..c9c4ac3
57171--- /dev/null
57172+++ b/grsecurity/Kconfig
57173@@ -0,0 +1,1054 @@
57174+#
57175+# grecurity configuration
57176+#
57177+menu "Memory Protections"
57178+depends on GRKERNSEC
57179+
57180+config GRKERNSEC_KMEM
57181+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
57182+ default y if GRKERNSEC_CONFIG_AUTO
57183+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
57184+ help
57185+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
57186+ be written to or read from to modify or leak the contents of the running
57187+ kernel. /dev/port will also not be allowed to be opened and support
57188+ for /dev/cpu/*/msr will be removed. If you have module
57189+ support disabled, enabling this will close up five ways that are
57190+ currently used to insert malicious code into the running kernel.
57191+
57192+ Even with all these features enabled, we still highly recommend that
57193+ you use the RBAC system, as it is still possible for an attacker to
57194+ modify the running kernel through privileged I/O granted by ioperm/iopl.
57195+
57196+ If you are not using XFree86, you may be able to stop this additional
57197+ case by enabling the 'Disable privileged I/O' option. Though nothing
57198+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
57199+ but only to video memory, which is the only writing we allow in this
57200+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
57201+ not be allowed to mprotect it with PROT_WRITE later.
57202+ Enabling this feature will prevent the "cpupower" and "powertop" tools
57203+ from working.
57204+
57205+ It is highly recommended that you say Y here if you meet all the
57206+ conditions above.
57207+
57208+config GRKERNSEC_VM86
57209+ bool "Restrict VM86 mode"
57210+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57211+ depends on X86_32
57212+
57213+ help
57214+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
57215+ make use of a special execution mode on 32bit x86 processors called
57216+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
57217+ video cards and will still work with this option enabled. The purpose
57218+ of the option is to prevent exploitation of emulation errors in
57219+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
57220+ Nearly all users should be able to enable this option.
57221+
57222+config GRKERNSEC_IO
57223+ bool "Disable privileged I/O"
57224+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57225+ depends on X86
57226+ select RTC_CLASS
57227+ select RTC_INTF_DEV
57228+ select RTC_DRV_CMOS
57229+
57230+ help
57231+ If you say Y here, all ioperm and iopl calls will return an error.
57232+ Ioperm and iopl can be used to modify the running kernel.
57233+ Unfortunately, some programs need this access to operate properly,
57234+ the most notable of which are XFree86 and hwclock. hwclock can be
57235+ remedied by having RTC support in the kernel, so real-time
57236+ clock support is enabled if this option is enabled, to ensure
57237+ that hwclock operates correctly. XFree86 still will not
57238+ operate correctly with this option enabled, so DO NOT CHOOSE Y
57239+ IF YOU USE XFree86. If you use XFree86 and you still want to
57240+ protect your kernel against modification, use the RBAC system.
57241+
57242+config GRKERNSEC_JIT_HARDEN
57243+ bool "Harden BPF JIT against spray attacks"
57244+ default y if GRKERNSEC_CONFIG_AUTO
57245+ depends on BPF_JIT
57246+ help
57247+ If you say Y here, the native code generated by the kernel's Berkeley
57248+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
57249+ attacks that attempt to fit attacker-beneficial instructions in
57250+ 32bit immediate fields of JIT-generated native instructions. The
57251+ attacker will generally aim to cause an unintended instruction sequence
57252+ of JIT-generated native code to execute by jumping into the middle of
57253+ a generated instruction. This feature effectively randomizes the 32bit
57254+ immediate constants present in the generated code to thwart such attacks.
57255+
57256+ If you're using KERNEXEC, it's recommended that you enable this option
57257+ to supplement the hardening of the kernel.
57258+
57259+config GRKERNSEC_PERF_HARDEN
57260+ bool "Disable unprivileged PERF_EVENTS usage by default"
57261+ default y if GRKERNSEC_CONFIG_AUTO
57262+ depends on PERF_EVENTS
57263+ help
57264+ If you say Y here, the range of acceptable values for the
57265+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
57266+ default to a new value: 3. When the sysctl is set to this value, no
57267+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
57268+
57269+ Though PERF_EVENTS can be used legitimately for performance monitoring
57270+ and low-level application profiling, it is forced on regardless of
57271+ configuration, has been at fault for several vulnerabilities, and
57272+ creates new opportunities for side channels and other information leaks.
57273+
57274+ This feature puts PERF_EVENTS into a secure default state and permits
57275+ the administrator to change out of it temporarily if unprivileged
57276+ application profiling is needed.
57277+
57278+config GRKERNSEC_RAND_THREADSTACK
57279+ bool "Insert random gaps between thread stacks"
57280+ default y if GRKERNSEC_CONFIG_AUTO
57281+ depends on PAX_RANDMMAP && !PPC
57282+ help
57283+ If you say Y here, a random-sized gap will be enforced between allocated
57284+ thread stacks. Glibc's NPTL and other threading libraries that
57285+ pass MAP_STACK to the kernel for thread stack allocation are supported.
57286+ The implementation currently provides 8 bits of entropy for the gap.
57287+
57288+ Many distributions do not compile threaded remote services with the
57289+ -fstack-check argument to GCC, causing the variable-sized stack-based
57290+ allocator, alloca(), to not probe the stack on allocation. This
57291+ permits an unbounded alloca() to skip over any guard page and potentially
57292+ modify another thread's stack reliably. An enforced random gap
57293+ reduces the reliability of such an attack and increases the chance
57294+ that such a read/write to another thread's stack instead lands in
57295+ an unmapped area, causing a crash and triggering grsecurity's
57296+ anti-bruteforcing logic.
57297+
57298+config GRKERNSEC_PROC_MEMMAP
57299+ bool "Harden ASLR against information leaks and entropy reduction"
57300+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
57301+ depends on PAX_NOEXEC || PAX_ASLR
57302+ help
57303+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
57304+ give no information about the addresses of its mappings if
57305+ PaX features that rely on random addresses are enabled on the task.
57306+ In addition to sanitizing this information and disabling other
57307+ dangerous sources of information, this option causes reads of sensitive
57308+ /proc/<pid> entries where the file descriptor was opened in a different
57309+ task than the one performing the read. Such attempts are logged.
57310+ This option also limits argv/env strings for suid/sgid binaries
57311+ to 512KB to prevent a complete exhaustion of the stack entropy provided
57312+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
57313+ binaries to prevent alternative mmap layouts from being abused.
57314+
57315+ If you use PaX it is essential that you say Y here as it closes up
57316+ several holes that make full ASLR useless locally.
57317+
57318+config GRKERNSEC_BRUTE
57319+ bool "Deter exploit bruteforcing"
57320+ default y if GRKERNSEC_CONFIG_AUTO
57321+ help
57322+ If you say Y here, attempts to bruteforce exploits against forking
57323+ daemons such as apache or sshd, as well as against suid/sgid binaries
57324+ will be deterred. When a child of a forking daemon is killed by PaX
57325+ or crashes due to an illegal instruction or other suspicious signal,
57326+ the parent process will be delayed 30 seconds upon every subsequent
57327+ fork until the administrator is able to assess the situation and
57328+ restart the daemon.
57329+ In the suid/sgid case, the attempt is logged, the user has all their
57330+ existing instances of the suid/sgid binary terminated and will
57331+ be unable to execute any suid/sgid binaries for 15 minutes.
57332+
57333+ It is recommended that you also enable signal logging in the auditing
57334+ section so that logs are generated when a process triggers a suspicious
57335+ signal.
57336+ If the sysctl option is enabled, a sysctl option with name
57337+ "deter_bruteforce" is created.
57338+
57339+
57340+config GRKERNSEC_MODHARDEN
57341+ bool "Harden module auto-loading"
57342+ default y if GRKERNSEC_CONFIG_AUTO
57343+ depends on MODULES
57344+ help
57345+ If you say Y here, module auto-loading in response to use of some
57346+ feature implemented by an unloaded module will be restricted to
57347+ root users. Enabling this option helps defend against attacks
57348+ by unprivileged users who abuse the auto-loading behavior to
57349+ cause a vulnerable module to load that is then exploited.
57350+
57351+ If this option prevents a legitimate use of auto-loading for a
57352+ non-root user, the administrator can execute modprobe manually
57353+ with the exact name of the module mentioned in the alert log.
57354+ Alternatively, the administrator can add the module to the list
57355+ of modules loaded at boot by modifying init scripts.
57356+
57357+ Modification of init scripts will most likely be needed on
57358+ Ubuntu servers with encrypted home directory support enabled,
57359+ as the first non-root user logging in will cause the ecb(aes),
57360+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
57361+
57362+config GRKERNSEC_HIDESYM
57363+ bool "Hide kernel symbols"
57364+ default y if GRKERNSEC_CONFIG_AUTO
57365+ select PAX_USERCOPY_SLABS
57366+ help
57367+ If you say Y here, getting information on loaded modules, and
57368+ displaying all kernel symbols through a syscall will be restricted
57369+ to users with CAP_SYS_MODULE. For software compatibility reasons,
57370+ /proc/kallsyms will be restricted to the root user. The RBAC
57371+ system can hide that entry even from root.
57372+
57373+ This option also prevents leaking of kernel addresses through
57374+ several /proc entries.
57375+
57376+ Note that this option is only effective provided the following
57377+ conditions are met:
57378+ 1) The kernel using grsecurity is not precompiled by some distribution
57379+ 2) You have also enabled GRKERNSEC_DMESG
57380+ 3) You are using the RBAC system and hiding other files such as your
57381+ kernel image and System.map. Alternatively, enabling this option
57382+ causes the permissions on /boot, /lib/modules, and the kernel
57383+ source directory to change at compile time to prevent
57384+ reading by non-root users.
57385+ If the above conditions are met, this option will aid in providing a
57386+ useful protection against local kernel exploitation of overflows
57387+ and arbitrary read/write vulnerabilities.
57388+
57389+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
57390+ in addition to this feature.
57391+
57392+config GRKERNSEC_KERN_LOCKOUT
57393+ bool "Active kernel exploit response"
57394+ default y if GRKERNSEC_CONFIG_AUTO
57395+ depends on X86 || ARM || PPC || SPARC
57396+ help
57397+ If you say Y here, when a PaX alert is triggered due to suspicious
57398+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
57399+ or an OOPS occurs due to bad memory accesses, instead of just
57400+ terminating the offending process (and potentially allowing
57401+ a subsequent exploit from the same user), we will take one of two
57402+ actions:
57403+ If the user was root, we will panic the system
57404+ If the user was non-root, we will log the attempt, terminate
57405+ all processes owned by the user, then prevent them from creating
57406+ any new processes until the system is restarted
57407+ This deters repeated kernel exploitation/bruteforcing attempts
57408+ and is useful for later forensics.
57409+
57410+endmenu
57411+menu "Role Based Access Control Options"
57412+depends on GRKERNSEC
57413+
57414+config GRKERNSEC_RBAC_DEBUG
57415+ bool
57416+
57417+config GRKERNSEC_NO_RBAC
57418+ bool "Disable RBAC system"
57419+ help
57420+ If you say Y here, the /dev/grsec device will be removed from the kernel,
57421+ preventing the RBAC system from being enabled. You should only say Y
57422+ here if you have no intention of using the RBAC system, so as to prevent
57423+ an attacker with root access from misusing the RBAC system to hide files
57424+ and processes when loadable module support and /dev/[k]mem have been
57425+ locked down.
57426+
57427+config GRKERNSEC_ACL_HIDEKERN
57428+ bool "Hide kernel processes"
57429+ help
57430+ If you say Y here, all kernel threads will be hidden to all
57431+ processes but those whose subject has the "view hidden processes"
57432+ flag.
57433+
57434+config GRKERNSEC_ACL_MAXTRIES
57435+ int "Maximum tries before password lockout"
57436+ default 3
57437+ help
57438+ This option enforces the maximum number of times a user can attempt
57439+ to authorize themselves with the grsecurity RBAC system before being
57440+ denied the ability to attempt authorization again for a specified time.
57441+ The lower the number, the harder it will be to brute-force a password.
57442+
57443+config GRKERNSEC_ACL_TIMEOUT
57444+ int "Time to wait after max password tries, in seconds"
57445+ default 30
57446+ help
57447+ This option specifies the time the user must wait after attempting to
57448+ authorize to the RBAC system with the maximum number of invalid
57449+ passwords. The higher the number, the harder it will be to brute-force
57450+ a password.
57451+
57452+endmenu
57453+menu "Filesystem Protections"
57454+depends on GRKERNSEC
57455+
57456+config GRKERNSEC_PROC
57457+ bool "Proc restrictions"
57458+ default y if GRKERNSEC_CONFIG_AUTO
57459+ help
57460+ If you say Y here, the permissions of the /proc filesystem
57461+ will be altered to enhance system security and privacy. You MUST
57462+ choose either a user only restriction or a user and group restriction.
57463+ Depending upon the option you choose, you can either restrict users to
57464+ see only the processes they themselves run, or choose a group that can
57465+ view all processes and files normally restricted to root if you choose
57466+ the "restrict to user only" option. NOTE: If you're running identd or
57467+ ntpd as a non-root user, you will have to run it as the group you
57468+ specify here.
57469+
57470+config GRKERNSEC_PROC_USER
57471+ bool "Restrict /proc to user only"
57472+ depends on GRKERNSEC_PROC
57473+ help
57474+ If you say Y here, non-root users will only be able to view their own
57475+ processes, and restricts them from viewing network-related information,
57476+ and viewing kernel symbol and module information.
57477+
57478+config GRKERNSEC_PROC_USERGROUP
57479+ bool "Allow special group"
57480+ default y if GRKERNSEC_CONFIG_AUTO
57481+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
57482+ help
57483+ If you say Y here, you will be able to select a group that will be
57484+ able to view all processes and network-related information. If you've
57485+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57486+ remain hidden. This option is useful if you want to run identd as
57487+ a non-root user. The group you select may also be chosen at boot time
57488+ via "grsec_proc_gid=" on the kernel commandline.
57489+
57490+config GRKERNSEC_PROC_GID
57491+ int "GID for special group"
57492+ depends on GRKERNSEC_PROC_USERGROUP
57493+ default 1001
57494+
57495+config GRKERNSEC_PROC_ADD
57496+ bool "Additional restrictions"
57497+ default y if GRKERNSEC_CONFIG_AUTO
57498+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57499+ help
57500+ If you say Y here, additional restrictions will be placed on
57501+ /proc that keep normal users from viewing device information and
57502+ slabinfo information that could be useful for exploits.
57503+
57504+config GRKERNSEC_LINK
57505+ bool "Linking restrictions"
57506+ default y if GRKERNSEC_CONFIG_AUTO
57507+ help
57508+ If you say Y here, /tmp race exploits will be prevented, since users
57509+ will no longer be able to follow symlinks owned by other users in
57510+ world-writable +t directories (e.g. /tmp), unless the owner of the
57511+ symlink is the owner of the directory. users will also not be
57512+ able to hardlink to files they do not own. If the sysctl option is
57513+ enabled, a sysctl option with name "linking_restrictions" is created.
57514+
57515+config GRKERNSEC_SYMLINKOWN
57516+ bool "Kernel-enforced SymlinksIfOwnerMatch"
57517+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57518+ help
57519+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57520+ that prevents it from being used as a security feature. As Apache
57521+ verifies the symlink by performing a stat() against the target of
57522+ the symlink before it is followed, an attacker can setup a symlink
57523+ to point to a same-owned file, then replace the symlink with one
57524+ that targets another user's file just after Apache "validates" the
57525+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57526+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57527+ will be in place for the group you specify. If the sysctl option
57528+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57529+ created.
57530+
57531+config GRKERNSEC_SYMLINKOWN_GID
57532+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57533+ depends on GRKERNSEC_SYMLINKOWN
57534+ default 1006
57535+ help
57536+ Setting this GID determines what group kernel-enforced
57537+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57538+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57539+
57540+config GRKERNSEC_FIFO
57541+ bool "FIFO restrictions"
57542+ default y if GRKERNSEC_CONFIG_AUTO
57543+ help
57544+ If you say Y here, users will not be able to write to FIFOs they don't
57545+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57546+ the FIFO is the same owner of the directory it's held in. If the sysctl
57547+ option is enabled, a sysctl option with name "fifo_restrictions" is
57548+ created.
57549+
57550+config GRKERNSEC_SYSFS_RESTRICT
57551+ bool "Sysfs/debugfs restriction"
57552+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57553+ depends on SYSFS
57554+ help
57555+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57556+ any filesystem normally mounted under it (e.g. debugfs) will be
57557+ mostly accessible only by root. These filesystems generally provide access
57558+ to hardware and debug information that isn't appropriate for unprivileged
57559+ users of the system. Sysfs and debugfs have also become a large source
57560+ of new vulnerabilities, ranging from infoleaks to local compromise.
57561+ There has been very little oversight with an eye toward security involved
57562+ in adding new exporters of information to these filesystems, so their
57563+ use is discouraged.
57564+ For reasons of compatibility, a few directories have been whitelisted
57565+ for access by non-root users:
57566+ /sys/fs/selinux
57567+ /sys/fs/fuse
57568+ /sys/devices/system/cpu
57569+
57570+config GRKERNSEC_ROFS
57571+ bool "Runtime read-only mount protection"
57572+ help
57573+ If you say Y here, a sysctl option with name "romount_protect" will
57574+ be created. By setting this option to 1 at runtime, filesystems
57575+ will be protected in the following ways:
57576+ * No new writable mounts will be allowed
57577+ * Existing read-only mounts won't be able to be remounted read/write
57578+ * Write operations will be denied on all block devices
57579+ This option acts independently of grsec_lock: once it is set to 1,
57580+ it cannot be turned off. Therefore, please be mindful of the resulting
57581+ behavior if this option is enabled in an init script on a read-only
57582+ filesystem. This feature is mainly intended for secure embedded systems.
57583+
57584+config GRKERNSEC_DEVICE_SIDECHANNEL
57585+ bool "Eliminate stat/notify-based device sidechannels"
57586+ default y if GRKERNSEC_CONFIG_AUTO
57587+ help
57588+ If you say Y here, timing analyses on block or character
57589+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
57590+ will be thwarted for unprivileged users. If a process without
57591+ CAP_MKNOD stats such a device, the last access and last modify times
57592+ will match the device's create time. No access or modify events
57593+ will be triggered through inotify/dnotify/fanotify for such devices.
57594+ This feature will prevent attacks that may at a minimum
57595+ allow an attacker to determine the administrator's password length.
57596+
57597+config GRKERNSEC_CHROOT
57598+ bool "Chroot jail restrictions"
57599+ default y if GRKERNSEC_CONFIG_AUTO
57600+ help
57601+ If you say Y here, you will be able to choose several options that will
57602+ make breaking out of a chrooted jail much more difficult. If you
57603+ encounter no software incompatibilities with the following options, it
57604+ is recommended that you enable each one.
57605+
57606+config GRKERNSEC_CHROOT_MOUNT
57607+ bool "Deny mounts"
57608+ default y if GRKERNSEC_CONFIG_AUTO
57609+ depends on GRKERNSEC_CHROOT
57610+ help
57611+ If you say Y here, processes inside a chroot will not be able to
57612+ mount or remount filesystems. If the sysctl option is enabled, a
57613+ sysctl option with name "chroot_deny_mount" is created.
57614+
57615+config GRKERNSEC_CHROOT_DOUBLE
57616+ bool "Deny double-chroots"
57617+ default y if GRKERNSEC_CONFIG_AUTO
57618+ depends on GRKERNSEC_CHROOT
57619+ help
57620+ If you say Y here, processes inside a chroot will not be able to chroot
57621+ again outside the chroot. This is a widely used method of breaking
57622+ out of a chroot jail and should not be allowed. If the sysctl
57623+ option is enabled, a sysctl option with name
57624+ "chroot_deny_chroot" is created.
57625+
57626+config GRKERNSEC_CHROOT_PIVOT
57627+ bool "Deny pivot_root in chroot"
57628+ default y if GRKERNSEC_CONFIG_AUTO
57629+ depends on GRKERNSEC_CHROOT
57630+ help
57631+ If you say Y here, processes inside a chroot will not be able to use
57632+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57633+ works similar to chroot in that it changes the root filesystem. This
57634+ function could be misused in a chrooted process to attempt to break out
57635+ of the chroot, and therefore should not be allowed. If the sysctl
57636+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57637+ created.
57638+
57639+config GRKERNSEC_CHROOT_CHDIR
57640+ bool "Enforce chdir(\"/\") on all chroots"
57641+ default y if GRKERNSEC_CONFIG_AUTO
57642+ depends on GRKERNSEC_CHROOT
57643+ help
57644+ If you say Y here, the current working directory of all newly-chrooted
57645+ applications will be set to the the root directory of the chroot.
57646+ The man page on chroot(2) states:
57647+ Note that this call does not change the current working
57648+ directory, so that `.' can be outside the tree rooted at
57649+ `/'. In particular, the super-user can escape from a
57650+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57651+
57652+ It is recommended that you say Y here, since it's not known to break
57653+ any software. If the sysctl option is enabled, a sysctl option with
57654+ name "chroot_enforce_chdir" is created.
57655+
57656+config GRKERNSEC_CHROOT_CHMOD
57657+ bool "Deny (f)chmod +s"
57658+ default y if GRKERNSEC_CONFIG_AUTO
57659+ depends on GRKERNSEC_CHROOT
57660+ help
57661+ If you say Y here, processes inside a chroot will not be able to chmod
57662+ or fchmod files to make them have suid or sgid bits. This protects
57663+ against another published method of breaking a chroot. If the sysctl
57664+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57665+ created.
57666+
57667+config GRKERNSEC_CHROOT_FCHDIR
57668+ bool "Deny fchdir out of chroot"
57669+ default y if GRKERNSEC_CONFIG_AUTO
57670+ depends on GRKERNSEC_CHROOT
57671+ help
57672+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57673+ to a file descriptor of the chrooting process that points to a directory
57674+ outside the filesystem will be stopped. If the sysctl option
57675+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57676+
57677+config GRKERNSEC_CHROOT_MKNOD
57678+ bool "Deny mknod"
57679+ default y if GRKERNSEC_CONFIG_AUTO
57680+ depends on GRKERNSEC_CHROOT
57681+ help
57682+ If you say Y here, processes inside a chroot will not be allowed to
57683+ mknod. The problem with using mknod inside a chroot is that it
57684+ would allow an attacker to create a device entry that is the same
57685+ as one on the physical root of your system, which could range from
57686+ anything from the console device to a device for your harddrive (which
57687+ they could then use to wipe the drive or steal data). It is recommended
57688+ that you say Y here, unless you run into software incompatibilities.
57689+ If the sysctl option is enabled, a sysctl option with name
57690+ "chroot_deny_mknod" is created.
57691+
57692+config GRKERNSEC_CHROOT_SHMAT
57693+ bool "Deny shmat() out of chroot"
57694+ default y if GRKERNSEC_CONFIG_AUTO
57695+ depends on GRKERNSEC_CHROOT
57696+ help
57697+ If you say Y here, processes inside a chroot will not be able to attach
57698+ to shared memory segments that were created outside of the chroot jail.
57699+ It is recommended that you say Y here. If the sysctl option is enabled,
57700+ a sysctl option with name "chroot_deny_shmat" is created.
57701+
57702+config GRKERNSEC_CHROOT_UNIX
57703+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57704+ default y if GRKERNSEC_CONFIG_AUTO
57705+ depends on GRKERNSEC_CHROOT
57706+ help
57707+ If you say Y here, processes inside a chroot will not be able to
57708+ connect to abstract (meaning not belonging to a filesystem) Unix
57709+ domain sockets that were bound outside of a chroot. It is recommended
57710+ that you say Y here. If the sysctl option is enabled, a sysctl option
57711+ with name "chroot_deny_unix" is created.
57712+
57713+config GRKERNSEC_CHROOT_FINDTASK
57714+ bool "Protect outside processes"
57715+ default y if GRKERNSEC_CONFIG_AUTO
57716+ depends on GRKERNSEC_CHROOT
57717+ help
57718+ If you say Y here, processes inside a chroot will not be able to
57719+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57720+ getsid, or view any process outside of the chroot. If the sysctl
57721+ option is enabled, a sysctl option with name "chroot_findtask" is
57722+ created.
57723+
57724+config GRKERNSEC_CHROOT_NICE
57725+ bool "Restrict priority changes"
57726+ default y if GRKERNSEC_CONFIG_AUTO
57727+ depends on GRKERNSEC_CHROOT
57728+ help
57729+ If you say Y here, processes inside a chroot will not be able to raise
57730+ the priority of processes in the chroot, or alter the priority of
57731+ processes outside the chroot. This provides more security than simply
57732+ removing CAP_SYS_NICE from the process' capability set. If the
57733+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57734+ is created.
57735+
57736+config GRKERNSEC_CHROOT_SYSCTL
57737+ bool "Deny sysctl writes"
57738+ default y if GRKERNSEC_CONFIG_AUTO
57739+ depends on GRKERNSEC_CHROOT
57740+ help
57741+ If you say Y here, an attacker in a chroot will not be able to
57742+ write to sysctl entries, either by sysctl(2) or through a /proc
57743+ interface. It is strongly recommended that you say Y here. If the
57744+ sysctl option is enabled, a sysctl option with name
57745+ "chroot_deny_sysctl" is created.
57746+
57747+config GRKERNSEC_CHROOT_CAPS
57748+ bool "Capability restrictions"
57749+ default y if GRKERNSEC_CONFIG_AUTO
57750+ depends on GRKERNSEC_CHROOT
57751+ help
57752+ If you say Y here, the capabilities on all processes within a
57753+ chroot jail will be lowered to stop module insertion, raw i/o,
57754+ system and net admin tasks, rebooting the system, modifying immutable
57755+ files, modifying IPC owned by another, and changing the system time.
57756+ This is left an option because it can break some apps. Disable this
57757+ if your chrooted apps are having problems performing those kinds of
57758+ tasks. If the sysctl option is enabled, a sysctl option with
57759+ name "chroot_caps" is created.
57760+
57761+config GRKERNSEC_CHROOT_INITRD
57762+ bool "Exempt initrd tasks from restrictions"
57763+ default y if GRKERNSEC_CONFIG_AUTO
57764+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
57765+ help
57766+ If you say Y here, tasks started prior to init will be exempted from
57767+ grsecurity's chroot restrictions. This option is mainly meant to
57768+ resolve Plymouth's performing privileged operations unnecessarily
57769+ in a chroot.
57770+
57771+endmenu
57772+menu "Kernel Auditing"
57773+depends on GRKERNSEC
57774+
57775+config GRKERNSEC_AUDIT_GROUP
57776+ bool "Single group for auditing"
57777+ help
57778+ If you say Y here, the exec and chdir logging features will only operate
57779+ on a group you specify. This option is recommended if you only want to
57780+ watch certain users instead of having a large amount of logs from the
57781+ entire system. If the sysctl option is enabled, a sysctl option with
57782+ name "audit_group" is created.
57783+
57784+config GRKERNSEC_AUDIT_GID
57785+ int "GID for auditing"
57786+ depends on GRKERNSEC_AUDIT_GROUP
57787+ default 1007
57788+
57789+config GRKERNSEC_EXECLOG
57790+ bool "Exec logging"
57791+ help
57792+ If you say Y here, all execve() calls will be logged (since the
57793+ other exec*() calls are frontends to execve(), all execution
57794+ will be logged). Useful for shell-servers that like to keep track
57795+ of their users. If the sysctl option is enabled, a sysctl option with
57796+ name "exec_logging" is created.
57797+ WARNING: This option when enabled will produce a LOT of logs, especially
57798+ on an active system.
57799+
57800+config GRKERNSEC_RESLOG
57801+ bool "Resource logging"
57802+ default y if GRKERNSEC_CONFIG_AUTO
57803+ help
57804+ If you say Y here, all attempts to overstep resource limits will
57805+ be logged with the resource name, the requested size, and the current
57806+ limit. It is highly recommended that you say Y here. If the sysctl
57807+ option is enabled, a sysctl option with name "resource_logging" is
57808+ created. If the RBAC system is enabled, the sysctl value is ignored.
57809+
57810+config GRKERNSEC_CHROOT_EXECLOG
57811+ bool "Log execs within chroot"
57812+ help
57813+ If you say Y here, all executions inside a chroot jail will be logged
57814+ to syslog. This can cause a large amount of logs if certain
57815+ applications (eg. djb's daemontools) are installed on the system, and
57816+ is therefore left as an option. If the sysctl option is enabled, a
57817+ sysctl option with name "chroot_execlog" is created.
57818+
57819+config GRKERNSEC_AUDIT_PTRACE
57820+ bool "Ptrace logging"
57821+ help
57822+ If you say Y here, all attempts to attach to a process via ptrace
57823+ will be logged. If the sysctl option is enabled, a sysctl option
57824+ with name "audit_ptrace" is created.
57825+
57826+config GRKERNSEC_AUDIT_CHDIR
57827+ bool "Chdir logging"
57828+ help
57829+ If you say Y here, all chdir() calls will be logged. If the sysctl
57830+ option is enabled, a sysctl option with name "audit_chdir" is created.
57831+
57832+config GRKERNSEC_AUDIT_MOUNT
57833+ bool "(Un)Mount logging"
57834+ help
57835+ If you say Y here, all mounts and unmounts will be logged. If the
57836+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57837+ created.
57838+
57839+config GRKERNSEC_SIGNAL
57840+ bool "Signal logging"
57841+ default y if GRKERNSEC_CONFIG_AUTO
57842+ help
57843+ If you say Y here, certain important signals will be logged, such as
57844+ SIGSEGV, which will as a result inform you of when a error in a program
57845+ occurred, which in some cases could mean a possible exploit attempt.
57846+ If the sysctl option is enabled, a sysctl option with name
57847+ "signal_logging" is created.
57848+
57849+config GRKERNSEC_FORKFAIL
57850+ bool "Fork failure logging"
57851+ help
57852+ If you say Y here, all failed fork() attempts will be logged.
57853+ This could suggest a fork bomb, or someone attempting to overstep
57854+ their process limit. If the sysctl option is enabled, a sysctl option
57855+ with name "forkfail_logging" is created.
57856+
57857+config GRKERNSEC_TIME
57858+ bool "Time change logging"
57859+ default y if GRKERNSEC_CONFIG_AUTO
57860+ help
57861+ If you say Y here, any changes of the system clock will be logged.
57862+ If the sysctl option is enabled, a sysctl option with name
57863+ "timechange_logging" is created.
57864+
57865+config GRKERNSEC_PROC_IPADDR
57866+ bool "/proc/<pid>/ipaddr support"
57867+ default y if GRKERNSEC_CONFIG_AUTO
57868+ help
57869+ If you say Y here, a new entry will be added to each /proc/<pid>
57870+ directory that contains the IP address of the person using the task.
57871+ The IP is carried across local TCP and AF_UNIX stream sockets.
57872+ This information can be useful for IDS/IPSes to perform remote response
57873+ to a local attack. The entry is readable by only the owner of the
57874+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57875+ the RBAC system), and thus does not create privacy concerns.
57876+
57877+config GRKERNSEC_RWXMAP_LOG
57878+ bool 'Denied RWX mmap/mprotect logging'
57879+ default y if GRKERNSEC_CONFIG_AUTO
57880+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57881+ help
57882+ If you say Y here, calls to mmap() and mprotect() with explicit
57883+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57884+ denied by the PAX_MPROTECT feature. If the sysctl option is
57885+ enabled, a sysctl option with name "rwxmap_logging" is created.
57886+
57887+config GRKERNSEC_AUDIT_TEXTREL
57888+ bool 'ELF text relocations logging (READ HELP)'
57889+ depends on PAX_MPROTECT
57890+ help
57891+ If you say Y here, text relocations will be logged with the filename
57892+ of the offending library or binary. The purpose of the feature is
57893+ to help Linux distribution developers get rid of libraries and
57894+ binaries that need text relocations which hinder the future progress
57895+ of PaX. Only Linux distribution developers should say Y here, and
57896+ never on a production machine, as this option creates an information
57897+ leak that could aid an attacker in defeating the randomization of
57898+ a single memory region. If the sysctl option is enabled, a sysctl
57899+ option with name "audit_textrel" is created.
57900+
57901+endmenu
57902+
57903+menu "Executable Protections"
57904+depends on GRKERNSEC
57905+
57906+config GRKERNSEC_DMESG
57907+ bool "Dmesg(8) restriction"
57908+ default y if GRKERNSEC_CONFIG_AUTO
57909+ help
57910+ If you say Y here, non-root users will not be able to use dmesg(8)
57911+ to view the contents of the kernel's circular log buffer.
57912+ The kernel's log buffer often contains kernel addresses and other
57913+ identifying information useful to an attacker in fingerprinting a
57914+ system for a targeted exploit.
57915+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
57916+ created.
57917+
57918+config GRKERNSEC_HARDEN_PTRACE
57919+ bool "Deter ptrace-based process snooping"
57920+ default y if GRKERNSEC_CONFIG_AUTO
57921+ help
57922+ If you say Y here, TTY sniffers and other malicious monitoring
57923+ programs implemented through ptrace will be defeated. If you
57924+ have been using the RBAC system, this option has already been
57925+ enabled for several years for all users, with the ability to make
57926+ fine-grained exceptions.
57927+
57928+ This option only affects the ability of non-root users to ptrace
57929+ processes that are not a descendent of the ptracing process.
57930+ This means that strace ./binary and gdb ./binary will still work,
57931+ but attaching to arbitrary processes will not. If the sysctl
57932+ option is enabled, a sysctl option with name "harden_ptrace" is
57933+ created.
57934+
57935+config GRKERNSEC_PTRACE_READEXEC
57936+ bool "Require read access to ptrace sensitive binaries"
57937+ default y if GRKERNSEC_CONFIG_AUTO
57938+ help
57939+ If you say Y here, unprivileged users will not be able to ptrace unreadable
57940+ binaries. This option is useful in environments that
57941+ remove the read bits (e.g. file mode 4711) from suid binaries to
57942+ prevent infoleaking of their contents. This option adds
57943+ consistency to the use of that file mode, as the binary could normally
57944+ be read out when run without privileges while ptracing.
57945+
57946+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
57947+ is created.
57948+
57949+config GRKERNSEC_SETXID
57950+ bool "Enforce consistent multithreaded privileges"
57951+ default y if GRKERNSEC_CONFIG_AUTO
57952+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
57953+ help
57954+ If you say Y here, a change from a root uid to a non-root uid
57955+ in a multithreaded application will cause the resulting uids,
57956+ gids, supplementary groups, and capabilities in that thread
57957+ to be propagated to the other threads of the process. In most
57958+ cases this is unnecessary, as glibc will emulate this behavior
57959+ on behalf of the application. Other libcs do not act in the
57960+ same way, allowing the other threads of the process to continue
57961+ running with root privileges. If the sysctl option is enabled,
57962+ a sysctl option with name "consistent_setxid" is created.
57963+
57964+config GRKERNSEC_TPE
57965+ bool "Trusted Path Execution (TPE)"
57966+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57967+ help
57968+ If you say Y here, you will be able to choose a gid to add to the
57969+ supplementary groups of users you want to mark as "untrusted."
57970+ These users will not be able to execute any files that are not in
57971+ root-owned directories writable only by root. If the sysctl option
57972+ is enabled, a sysctl option with name "tpe" is created.
57973+
57974+config GRKERNSEC_TPE_ALL
57975+ bool "Partially restrict all non-root users"
57976+ depends on GRKERNSEC_TPE
57977+ help
57978+ If you say Y here, all non-root users will be covered under
57979+ a weaker TPE restriction. This is separate from, and in addition to,
57980+ the main TPE options that you have selected elsewhere. Thus, if a
57981+ "trusted" GID is chosen, this restriction applies to even that GID.
57982+ Under this restriction, all non-root users will only be allowed to
57983+ execute files in directories they own that are not group or
57984+ world-writable, or in directories owned by root and writable only by
57985+ root. If the sysctl option is enabled, a sysctl option with name
57986+ "tpe_restrict_all" is created.
57987+
57988+config GRKERNSEC_TPE_INVERT
57989+ bool "Invert GID option"
57990+ depends on GRKERNSEC_TPE
57991+ help
57992+ If you say Y here, the group you specify in the TPE configuration will
57993+ decide what group TPE restrictions will be *disabled* for. This
57994+ option is useful if you want TPE restrictions to be applied to most
57995+ users on the system. If the sysctl option is enabled, a sysctl option
57996+ with name "tpe_invert" is created. Unlike other sysctl options, this
57997+ entry will default to on for backward-compatibility.
57998+
57999+config GRKERNSEC_TPE_GID
58000+ int
58001+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
58002+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
58003+
58004+config GRKERNSEC_TPE_UNTRUSTED_GID
58005+ int "GID for TPE-untrusted users"
58006+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
58007+ default 1005
58008+ help
58009+ Setting this GID determines what group TPE restrictions will be
58010+ *enabled* for. If the sysctl option is enabled, a sysctl option
58011+ with name "tpe_gid" is created.
58012+
58013+config GRKERNSEC_TPE_TRUSTED_GID
58014+ int "GID for TPE-trusted users"
58015+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
58016+ default 1005
58017+ help
58018+ Setting this GID determines what group TPE restrictions will be
58019+ *disabled* for. If the sysctl option is enabled, a sysctl option
58020+ with name "tpe_gid" is created.
58021+
58022+endmenu
58023+menu "Network Protections"
58024+depends on GRKERNSEC
58025+
58026+config GRKERNSEC_RANDNET
58027+ bool "Larger entropy pools"
58028+ default y if GRKERNSEC_CONFIG_AUTO
58029+ help
58030+ If you say Y here, the entropy pools used for many features of Linux
58031+ and grsecurity will be doubled in size. Since several grsecurity
58032+ features use additional randomness, it is recommended that you say Y
58033+ here. Saying Y here has a similar effect as modifying
58034+ /proc/sys/kernel/random/poolsize.
58035+
58036+config GRKERNSEC_BLACKHOLE
58037+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
58038+ default y if GRKERNSEC_CONFIG_AUTO
58039+ depends on NET
58040+ help
58041+ If you say Y here, neither TCP resets nor ICMP
58042+ destination-unreachable packets will be sent in response to packets
58043+ sent to ports for which no associated listening process exists.
58044+ This feature supports both IPV4 and IPV6 and exempts the
58045+ loopback interface from blackholing. Enabling this feature
58046+ makes a host more resilient to DoS attacks and reduces network
58047+ visibility against scanners.
58048+
58049+ The blackhole feature as-implemented is equivalent to the FreeBSD
58050+ blackhole feature, as it prevents RST responses to all packets, not
58051+ just SYNs. Under most application behavior this causes no
58052+ problems, but applications (like haproxy) may not close certain
58053+ connections in a way that cleanly terminates them on the remote
58054+ end, leaving the remote host in LAST_ACK state. Because of this
58055+ side-effect and to prevent intentional LAST_ACK DoSes, this
58056+ feature also adds automatic mitigation against such attacks.
58057+ The mitigation drastically reduces the amount of time a socket
58058+ can spend in LAST_ACK state. If you're using haproxy and not
58059+ all servers it connects to have this option enabled, consider
58060+ disabling this feature on the haproxy host.
58061+
58062+ If the sysctl option is enabled, two sysctl options with names
58063+ "ip_blackhole" and "lastack_retries" will be created.
58064+ While "ip_blackhole" takes the standard zero/non-zero on/off
58065+ toggle, "lastack_retries" uses the same kinds of values as
58066+ "tcp_retries1" and "tcp_retries2". The default value of 4
58067+ prevents a socket from lasting more than 45 seconds in LAST_ACK
58068+ state.
58069+
58070+config GRKERNSEC_NO_SIMULT_CONNECT
58071+ bool "Disable TCP Simultaneous Connect"
58072+ default y if GRKERNSEC_CONFIG_AUTO
58073+ depends on NET
58074+ help
58075+ If you say Y here, a feature by Willy Tarreau will be enabled that
58076+ removes a weakness in Linux's strict implementation of TCP that
58077+ allows two clients to connect to each other without either entering
58078+ a listening state. The weakness allows an attacker to easily prevent
58079+ a client from connecting to a known server provided the source port
58080+ for the connection is guessed correctly.
58081+
58082+ As the weakness could be used to prevent an antivirus or IPS from
58083+ fetching updates, or prevent an SSL gateway from fetching a CRL,
58084+ it should be eliminated by enabling this option. Though Linux is
58085+ one of few operating systems supporting simultaneous connect, it
58086+ has no legitimate use in practice and is rarely supported by firewalls.
58087+
58088+config GRKERNSEC_SOCKET
58089+ bool "Socket restrictions"
58090+ depends on NET
58091+ help
58092+ If you say Y here, you will be able to choose from several options.
58093+ If you assign a GID on your system and add it to the supplementary
58094+ groups of users you want to restrict socket access to, this patch
58095+ will perform up to three things, based on the option(s) you choose.
58096+
58097+config GRKERNSEC_SOCKET_ALL
58098+ bool "Deny any sockets to group"
58099+ depends on GRKERNSEC_SOCKET
58100+ help
58101+ If you say Y here, you will be able to choose a GID of whose users will
58102+ be unable to connect to other hosts from your machine or run server
58103+ applications from your machine. If the sysctl option is enabled, a
58104+ sysctl option with name "socket_all" is created.
58105+
58106+config GRKERNSEC_SOCKET_ALL_GID
58107+ int "GID to deny all sockets for"
58108+ depends on GRKERNSEC_SOCKET_ALL
58109+ default 1004
58110+ help
58111+ Here you can choose the GID to disable socket access for. Remember to
58112+ add the users you want socket access disabled for to the GID
58113+ specified here. If the sysctl option is enabled, a sysctl option
58114+ with name "socket_all_gid" is created.
58115+
58116+config GRKERNSEC_SOCKET_CLIENT
58117+ bool "Deny client sockets to group"
58118+ depends on GRKERNSEC_SOCKET
58119+ help
58120+ If you say Y here, you will be able to choose a GID of whose users will
58121+ be unable to connect to other hosts from your machine, but will be
58122+ able to run servers. If this option is enabled, all users in the group
58123+ you specify will have to use passive mode when initiating ftp transfers
58124+ from the shell on your machine. If the sysctl option is enabled, a
58125+ sysctl option with name "socket_client" is created.
58126+
58127+config GRKERNSEC_SOCKET_CLIENT_GID
58128+ int "GID to deny client sockets for"
58129+ depends on GRKERNSEC_SOCKET_CLIENT
58130+ default 1003
58131+ help
58132+ Here you can choose the GID to disable client socket access for.
58133+ Remember to add the users you want client socket access disabled for to
58134+ the GID specified here. If the sysctl option is enabled, a sysctl
58135+ option with name "socket_client_gid" is created.
58136+
58137+config GRKERNSEC_SOCKET_SERVER
58138+ bool "Deny server sockets to group"
58139+ depends on GRKERNSEC_SOCKET
58140+ help
58141+ If you say Y here, you will be able to choose a GID of whose users will
58142+ be unable to run server applications from your machine. If the sysctl
58143+ option is enabled, a sysctl option with name "socket_server" is created.
58144+
58145+config GRKERNSEC_SOCKET_SERVER_GID
58146+ int "GID to deny server sockets for"
58147+ depends on GRKERNSEC_SOCKET_SERVER
58148+ default 1002
58149+ help
58150+ Here you can choose the GID to disable server socket access for.
58151+ Remember to add the users you want server socket access disabled for to
58152+ the GID specified here. If the sysctl option is enabled, a sysctl
58153+ option with name "socket_server_gid" is created.
58154+
58155+endmenu
58156+menu "Sysctl Support"
58157+depends on GRKERNSEC && SYSCTL
58158+
58159+config GRKERNSEC_SYSCTL
58160+ bool "Sysctl support"
58161+ default y if GRKERNSEC_CONFIG_AUTO
58162+ help
58163+ If you say Y here, you will be able to change the options that
58164+ grsecurity runs with at bootup, without having to recompile your
58165+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
58166+ to enable (1) or disable (0) various features. All the sysctl entries
58167+ are mutable until the "grsec_lock" entry is set to a non-zero value.
58168+ All features enabled in the kernel configuration are disabled at boot
58169+ if you do not say Y to the "Turn on features by default" option.
58170+ All options should be set at startup, and the grsec_lock entry should
58171+ be set to a non-zero value after all the options are set.
58172+ *THIS IS EXTREMELY IMPORTANT*
58173+
58174+config GRKERNSEC_SYSCTL_DISTRO
58175+ bool "Extra sysctl support for distro makers (READ HELP)"
58176+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
58177+ help
58178+ If you say Y here, additional sysctl options will be created
58179+ for features that affect processes running as root. Therefore,
58180+ it is critical when using this option that the grsec_lock entry be
58181+ enabled after boot. Only distros with prebuilt kernel packages
58182+ with this option enabled that can ensure grsec_lock is enabled
58183+ after boot should use this option.
58184+ *Failure to set grsec_lock after boot makes all grsec features
58185+ this option covers useless*
58186+
58187+ Currently this option creates the following sysctl entries:
58188+ "Disable Privileged I/O": "disable_priv_io"
58189+
58190+config GRKERNSEC_SYSCTL_ON
58191+ bool "Turn on features by default"
58192+ default y if GRKERNSEC_CONFIG_AUTO
58193+ depends on GRKERNSEC_SYSCTL
58194+ help
58195+ If you say Y here, instead of having all features enabled in the
58196+ kernel configuration disabled at boot time, the features will be
58197+ enabled at boot time. It is recommended you say Y here unless
58198+ there is some reason you would want all sysctl-tunable features to
58199+ be disabled by default. As mentioned elsewhere, it is important
58200+ to enable the grsec_lock entry once you have finished modifying
58201+ the sysctl entries.
58202+
58203+endmenu
58204+menu "Logging Options"
58205+depends on GRKERNSEC
58206+
58207+config GRKERNSEC_FLOODTIME
58208+ int "Seconds in between log messages (minimum)"
58209+ default 10
58210+ help
58211+ This option allows you to enforce the number of seconds between
58212+ grsecurity log messages. The default should be suitable for most
58213+ people, however, if you choose to change it, choose a value small enough
58214+ to allow informative logs to be produced, but large enough to
58215+ prevent flooding.
58216+
58217+config GRKERNSEC_FLOODBURST
58218+ int "Number of messages in a burst (maximum)"
58219+ default 6
58220+ help
58221+ This option allows you to choose the maximum number of messages allowed
58222+ within the flood time interval you chose in a separate option. The
58223+ default should be suitable for most people, however if you find that
58224+ many of your logs are being interpreted as flooding, you may want to
58225+ raise this value.
58226+
58227+endmenu
58228diff --git a/grsecurity/Makefile b/grsecurity/Makefile
58229new file mode 100644
58230index 0000000..1b9afa9
58231--- /dev/null
58232+++ b/grsecurity/Makefile
58233@@ -0,0 +1,38 @@
58234+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
58235+# during 2001-2009 it has been completely redesigned by Brad Spengler
58236+# into an RBAC system
58237+#
58238+# All code in this directory and various hooks inserted throughout the kernel
58239+# are copyright Brad Spengler - Open Source Security, Inc., and released
58240+# under the GPL v2 or higher
58241+
58242+KBUILD_CFLAGS += -Werror
58243+
58244+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
58245+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
58246+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58247+
58248+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
58249+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
58250+ gracl_learn.o grsec_log.o
58251+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58252+
58253+ifdef CONFIG_NET
58254+obj-y += grsec_sock.o
58255+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
58256+endif
58257+
58258+ifndef CONFIG_GRKERNSEC
58259+obj-y += grsec_disabled.o
58260+endif
58261+
58262+ifdef CONFIG_GRKERNSEC_HIDESYM
58263+extra-y := grsec_hidesym.o
58264+$(obj)/grsec_hidesym.o:
58265+ @-chmod -f 500 /boot
58266+ @-chmod -f 500 /lib/modules
58267+ @-chmod -f 500 /lib64/modules
58268+ @-chmod -f 500 /lib32/modules
58269+ @-chmod -f 700 .
58270+ @echo ' grsec: protected kernel image paths'
58271+endif
58272diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
58273new file mode 100644
58274index 0000000..0d5c602
58275--- /dev/null
58276+++ b/grsecurity/gracl.c
58277@@ -0,0 +1,4073 @@
58278+#include <linux/kernel.h>
58279+#include <linux/module.h>
58280+#include <linux/sched.h>
58281+#include <linux/mm.h>
58282+#include <linux/file.h>
58283+#include <linux/fs.h>
58284+#include <linux/namei.h>
58285+#include <linux/mount.h>
58286+#include <linux/tty.h>
58287+#include <linux/proc_fs.h>
58288+#include <linux/lglock.h>
58289+#include <linux/slab.h>
58290+#include <linux/vmalloc.h>
58291+#include <linux/types.h>
58292+#include <linux/sysctl.h>
58293+#include <linux/netdevice.h>
58294+#include <linux/ptrace.h>
58295+#include <linux/gracl.h>
58296+#include <linux/gralloc.h>
58297+#include <linux/security.h>
58298+#include <linux/grinternal.h>
58299+#include <linux/pid_namespace.h>
58300+#include <linux/stop_machine.h>
58301+#include <linux/fdtable.h>
58302+#include <linux/percpu.h>
58303+#include <linux/lglock.h>
58304+#include <linux/hugetlb.h>
58305+#include <linux/posix-timers.h>
58306+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
58307+#include <linux/magic.h>
58308+#include <linux/pagemap.h>
58309+#include "../fs/btrfs/async-thread.h"
58310+#include "../fs/btrfs/ctree.h"
58311+#include "../fs/btrfs/btrfs_inode.h"
58312+#endif
58313+#include "../fs/mount.h"
58314+
58315+#include <asm/uaccess.h>
58316+#include <asm/errno.h>
58317+#include <asm/mman.h>
58318+
58319+extern struct lglock vfsmount_lock;
58320+
58321+static struct acl_role_db acl_role_set;
58322+static struct name_db name_set;
58323+static struct inodev_db inodev_set;
58324+
58325+/* for keeping track of userspace pointers used for subjects, so we
58326+ can share references in the kernel as well
58327+*/
58328+
58329+static struct path real_root;
58330+
58331+static struct acl_subj_map_db subj_map_set;
58332+
58333+static struct acl_role_label *default_role;
58334+
58335+static struct acl_role_label *role_list;
58336+
58337+static u16 acl_sp_role_value;
58338+
58339+extern char *gr_shared_page[4];
58340+static DEFINE_MUTEX(gr_dev_mutex);
58341+DEFINE_RWLOCK(gr_inode_lock);
58342+
58343+struct gr_arg *gr_usermode;
58344+
58345+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58346+
58347+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
58348+extern void gr_clear_learn_entries(void);
58349+
58350+unsigned char *gr_system_salt;
58351+unsigned char *gr_system_sum;
58352+
58353+static struct sprole_pw **acl_special_roles = NULL;
58354+static __u16 num_sprole_pws = 0;
58355+
58356+static struct acl_role_label *kernel_role = NULL;
58357+
58358+static unsigned int gr_auth_attempts = 0;
58359+static unsigned long gr_auth_expires = 0UL;
58360+
58361+#ifdef CONFIG_NET
58362+extern struct vfsmount *sock_mnt;
58363+#endif
58364+
58365+extern struct vfsmount *pipe_mnt;
58366+extern struct vfsmount *shm_mnt;
58367+
58368+#ifdef CONFIG_HUGETLBFS
58369+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58370+#endif
58371+
58372+static struct acl_object_label *fakefs_obj_rw;
58373+static struct acl_object_label *fakefs_obj_rwx;
58374+
58375+extern int gr_init_uidset(void);
58376+extern void gr_free_uidset(void);
58377+extern void gr_remove_uid(uid_t uid);
58378+extern int gr_find_uid(uid_t uid);
58379+
58380+__inline__ int
58381+gr_acl_is_enabled(void)
58382+{
58383+ return (gr_status & GR_READY);
58384+}
58385+
58386+static inline dev_t __get_dev(const struct dentry *dentry)
58387+{
58388+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
58389+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
58390+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
58391+ else
58392+#endif
58393+ return dentry->d_sb->s_dev;
58394+}
58395+
58396+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58397+{
58398+ return __get_dev(dentry);
58399+}
58400+
58401+static char gr_task_roletype_to_char(struct task_struct *task)
58402+{
58403+ switch (task->role->roletype &
58404+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
58405+ GR_ROLE_SPECIAL)) {
58406+ case GR_ROLE_DEFAULT:
58407+ return 'D';
58408+ case GR_ROLE_USER:
58409+ return 'U';
58410+ case GR_ROLE_GROUP:
58411+ return 'G';
58412+ case GR_ROLE_SPECIAL:
58413+ return 'S';
58414+ }
58415+
58416+ return 'X';
58417+}
58418+
58419+char gr_roletype_to_char(void)
58420+{
58421+ return gr_task_roletype_to_char(current);
58422+}
58423+
58424+__inline__ int
58425+gr_acl_tpe_check(void)
58426+{
58427+ if (unlikely(!(gr_status & GR_READY)))
58428+ return 0;
58429+ if (current->role->roletype & GR_ROLE_TPE)
58430+ return 1;
58431+ else
58432+ return 0;
58433+}
58434+
58435+int
58436+gr_handle_rawio(const struct inode *inode)
58437+{
58438+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58439+ if (inode && S_ISBLK(inode->i_mode) &&
58440+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
58441+ !capable(CAP_SYS_RAWIO))
58442+ return 1;
58443+#endif
58444+ return 0;
58445+}
58446+
58447+static int
58448+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
58449+{
58450+ if (likely(lena != lenb))
58451+ return 0;
58452+
58453+ return !memcmp(a, b, lena);
58454+}
58455+
58456+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
58457+{
58458+ *buflen -= namelen;
58459+ if (*buflen < 0)
58460+ return -ENAMETOOLONG;
58461+ *buffer -= namelen;
58462+ memcpy(*buffer, str, namelen);
58463+ return 0;
58464+}
58465+
58466+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
58467+{
58468+ return prepend(buffer, buflen, name->name, name->len);
58469+}
58470+
58471+static int prepend_path(const struct path *path, struct path *root,
58472+ char **buffer, int *buflen)
58473+{
58474+ struct dentry *dentry = path->dentry;
58475+ struct vfsmount *vfsmnt = path->mnt;
58476+ struct mount *mnt = real_mount(vfsmnt);
58477+ bool slash = false;
58478+ int error = 0;
58479+
58480+ while (dentry != root->dentry || vfsmnt != root->mnt) {
58481+ struct dentry * parent;
58482+
58483+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
58484+ /* Global root? */
58485+ if (!mnt_has_parent(mnt)) {
58486+ goto out;
58487+ }
58488+ dentry = mnt->mnt_mountpoint;
58489+ mnt = mnt->mnt_parent;
58490+ vfsmnt = &mnt->mnt;
58491+ continue;
58492+ }
58493+ parent = dentry->d_parent;
58494+ prefetch(parent);
58495+ spin_lock(&dentry->d_lock);
58496+ error = prepend_name(buffer, buflen, &dentry->d_name);
58497+ spin_unlock(&dentry->d_lock);
58498+ if (!error)
58499+ error = prepend(buffer, buflen, "/", 1);
58500+ if (error)
58501+ break;
58502+
58503+ slash = true;
58504+ dentry = parent;
58505+ }
58506+
58507+out:
58508+ if (!error && !slash)
58509+ error = prepend(buffer, buflen, "/", 1);
58510+
58511+ return error;
58512+}
58513+
58514+/* this must be called with vfsmount_lock and rename_lock held */
58515+
58516+static char *__our_d_path(const struct path *path, struct path *root,
58517+ char *buf, int buflen)
58518+{
58519+ char *res = buf + buflen;
58520+ int error;
58521+
58522+ prepend(&res, &buflen, "\0", 1);
58523+ error = prepend_path(path, root, &res, &buflen);
58524+ if (error)
58525+ return ERR_PTR(error);
58526+
58527+ return res;
58528+}
58529+
58530+static char *
58531+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58532+{
58533+ char *retval;
58534+
58535+ retval = __our_d_path(path, root, buf, buflen);
58536+ if (unlikely(IS_ERR(retval)))
58537+ retval = strcpy(buf, "<path too long>");
58538+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58539+ retval[1] = '\0';
58540+
58541+ return retval;
58542+}
58543+
58544+static char *
58545+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58546+ char *buf, int buflen)
58547+{
58548+ struct path path;
58549+ char *res;
58550+
58551+ path.dentry = (struct dentry *)dentry;
58552+ path.mnt = (struct vfsmount *)vfsmnt;
58553+
58554+ /* we can use real_root.dentry, real_root.mnt, because this is only called
58555+ by the RBAC system */
58556+ res = gen_full_path(&path, &real_root, buf, buflen);
58557+
58558+ return res;
58559+}
58560+
58561+static char *
58562+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58563+ char *buf, int buflen)
58564+{
58565+ char *res;
58566+ struct path path;
58567+ struct path root;
58568+ struct task_struct *reaper = init_pid_ns.child_reaper;
58569+
58570+ path.dentry = (struct dentry *)dentry;
58571+ path.mnt = (struct vfsmount *)vfsmnt;
58572+
58573+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
58574+ get_fs_root(reaper->fs, &root);
58575+
58576+ br_read_lock(&vfsmount_lock);
58577+ write_seqlock(&rename_lock);
58578+ res = gen_full_path(&path, &root, buf, buflen);
58579+ write_sequnlock(&rename_lock);
58580+ br_read_unlock(&vfsmount_lock);
58581+
58582+ path_put(&root);
58583+ return res;
58584+}
58585+
58586+static char *
58587+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58588+{
58589+ char *ret;
58590+ br_read_lock(&vfsmount_lock);
58591+ write_seqlock(&rename_lock);
58592+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58593+ PAGE_SIZE);
58594+ write_sequnlock(&rename_lock);
58595+ br_read_unlock(&vfsmount_lock);
58596+ return ret;
58597+}
58598+
58599+static char *
58600+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58601+{
58602+ char *ret;
58603+ char *buf;
58604+ int buflen;
58605+
58606+ br_read_lock(&vfsmount_lock);
58607+ write_seqlock(&rename_lock);
58608+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58609+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
58610+ buflen = (int)(ret - buf);
58611+ if (buflen >= 5)
58612+ prepend(&ret, &buflen, "/proc", 5);
58613+ else
58614+ ret = strcpy(buf, "<path too long>");
58615+ write_sequnlock(&rename_lock);
58616+ br_read_unlock(&vfsmount_lock);
58617+ return ret;
58618+}
58619+
58620+char *
58621+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
58622+{
58623+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58624+ PAGE_SIZE);
58625+}
58626+
58627+char *
58628+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
58629+{
58630+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58631+ PAGE_SIZE);
58632+}
58633+
58634+char *
58635+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
58636+{
58637+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
58638+ PAGE_SIZE);
58639+}
58640+
58641+char *
58642+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
58643+{
58644+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
58645+ PAGE_SIZE);
58646+}
58647+
58648+char *
58649+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
58650+{
58651+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
58652+ PAGE_SIZE);
58653+}
58654+
58655+__inline__ __u32
58656+to_gr_audit(const __u32 reqmode)
58657+{
58658+ /* masks off auditable permission flags, then shifts them to create
58659+ auditing flags, and adds the special case of append auditing if
58660+ we're requesting write */
58661+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
58662+}
58663+
58664+struct acl_subject_label *
58665+lookup_subject_map(const struct acl_subject_label *userp)
58666+{
58667+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
58668+ struct subject_map *match;
58669+
58670+ match = subj_map_set.s_hash[index];
58671+
58672+ while (match && match->user != userp)
58673+ match = match->next;
58674+
58675+ if (match != NULL)
58676+ return match->kernel;
58677+ else
58678+ return NULL;
58679+}
58680+
58681+static void
58682+insert_subj_map_entry(struct subject_map *subjmap)
58683+{
58684+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
58685+ struct subject_map **curr;
58686+
58687+ subjmap->prev = NULL;
58688+
58689+ curr = &subj_map_set.s_hash[index];
58690+ if (*curr != NULL)
58691+ (*curr)->prev = subjmap;
58692+
58693+ subjmap->next = *curr;
58694+ *curr = subjmap;
58695+
58696+ return;
58697+}
58698+
58699+static struct acl_role_label *
58700+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
58701+ const gid_t gid)
58702+{
58703+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
58704+ struct acl_role_label *match;
58705+ struct role_allowed_ip *ipp;
58706+ unsigned int x;
58707+ u32 curr_ip = task->signal->curr_ip;
58708+
58709+ task->signal->saved_ip = curr_ip;
58710+
58711+ match = acl_role_set.r_hash[index];
58712+
58713+ while (match) {
58714+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
58715+ for (x = 0; x < match->domain_child_num; x++) {
58716+ if (match->domain_children[x] == uid)
58717+ goto found;
58718+ }
58719+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
58720+ break;
58721+ match = match->next;
58722+ }
58723+found:
58724+ if (match == NULL) {
58725+ try_group:
58726+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
58727+ match = acl_role_set.r_hash[index];
58728+
58729+ while (match) {
58730+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
58731+ for (x = 0; x < match->domain_child_num; x++) {
58732+ if (match->domain_children[x] == gid)
58733+ goto found2;
58734+ }
58735+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
58736+ break;
58737+ match = match->next;
58738+ }
58739+found2:
58740+ if (match == NULL)
58741+ match = default_role;
58742+ if (match->allowed_ips == NULL)
58743+ return match;
58744+ else {
58745+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58746+ if (likely
58747+ ((ntohl(curr_ip) & ipp->netmask) ==
58748+ (ntohl(ipp->addr) & ipp->netmask)))
58749+ return match;
58750+ }
58751+ match = default_role;
58752+ }
58753+ } else if (match->allowed_ips == NULL) {
58754+ return match;
58755+ } else {
58756+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58757+ if (likely
58758+ ((ntohl(curr_ip) & ipp->netmask) ==
58759+ (ntohl(ipp->addr) & ipp->netmask)))
58760+ return match;
58761+ }
58762+ goto try_group;
58763+ }
58764+
58765+ return match;
58766+}
58767+
58768+struct acl_subject_label *
58769+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
58770+ const struct acl_role_label *role)
58771+{
58772+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58773+ struct acl_subject_label *match;
58774+
58775+ match = role->subj_hash[index];
58776+
58777+ while (match && (match->inode != ino || match->device != dev ||
58778+ (match->mode & GR_DELETED))) {
58779+ match = match->next;
58780+ }
58781+
58782+ if (match && !(match->mode & GR_DELETED))
58783+ return match;
58784+ else
58785+ return NULL;
58786+}
58787+
58788+struct acl_subject_label *
58789+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
58790+ const struct acl_role_label *role)
58791+{
58792+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58793+ struct acl_subject_label *match;
58794+
58795+ match = role->subj_hash[index];
58796+
58797+ while (match && (match->inode != ino || match->device != dev ||
58798+ !(match->mode & GR_DELETED))) {
58799+ match = match->next;
58800+ }
58801+
58802+ if (match && (match->mode & GR_DELETED))
58803+ return match;
58804+ else
58805+ return NULL;
58806+}
58807+
58808+static struct acl_object_label *
58809+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
58810+ const struct acl_subject_label *subj)
58811+{
58812+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58813+ struct acl_object_label *match;
58814+
58815+ match = subj->obj_hash[index];
58816+
58817+ while (match && (match->inode != ino || match->device != dev ||
58818+ (match->mode & GR_DELETED))) {
58819+ match = match->next;
58820+ }
58821+
58822+ if (match && !(match->mode & GR_DELETED))
58823+ return match;
58824+ else
58825+ return NULL;
58826+}
58827+
58828+static struct acl_object_label *
58829+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
58830+ const struct acl_subject_label *subj)
58831+{
58832+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58833+ struct acl_object_label *match;
58834+
58835+ match = subj->obj_hash[index];
58836+
58837+ while (match && (match->inode != ino || match->device != dev ||
58838+ !(match->mode & GR_DELETED))) {
58839+ match = match->next;
58840+ }
58841+
58842+ if (match && (match->mode & GR_DELETED))
58843+ return match;
58844+
58845+ match = subj->obj_hash[index];
58846+
58847+ while (match && (match->inode != ino || match->device != dev ||
58848+ (match->mode & GR_DELETED))) {
58849+ match = match->next;
58850+ }
58851+
58852+ if (match && !(match->mode & GR_DELETED))
58853+ return match;
58854+ else
58855+ return NULL;
58856+}
58857+
58858+static struct name_entry *
58859+lookup_name_entry(const char *name)
58860+{
58861+ unsigned int len = strlen(name);
58862+ unsigned int key = full_name_hash(name, len);
58863+ unsigned int index = key % name_set.n_size;
58864+ struct name_entry *match;
58865+
58866+ match = name_set.n_hash[index];
58867+
58868+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58869+ match = match->next;
58870+
58871+ return match;
58872+}
58873+
58874+static struct name_entry *
58875+lookup_name_entry_create(const char *name)
58876+{
58877+ unsigned int len = strlen(name);
58878+ unsigned int key = full_name_hash(name, len);
58879+ unsigned int index = key % name_set.n_size;
58880+ struct name_entry *match;
58881+
58882+ match = name_set.n_hash[index];
58883+
58884+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58885+ !match->deleted))
58886+ match = match->next;
58887+
58888+ if (match && match->deleted)
58889+ return match;
58890+
58891+ match = name_set.n_hash[index];
58892+
58893+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58894+ match->deleted))
58895+ match = match->next;
58896+
58897+ if (match && !match->deleted)
58898+ return match;
58899+ else
58900+ return NULL;
58901+}
58902+
58903+static struct inodev_entry *
58904+lookup_inodev_entry(const ino_t ino, const dev_t dev)
58905+{
58906+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
58907+ struct inodev_entry *match;
58908+
58909+ match = inodev_set.i_hash[index];
58910+
58911+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
58912+ match = match->next;
58913+
58914+ return match;
58915+}
58916+
58917+static void
58918+insert_inodev_entry(struct inodev_entry *entry)
58919+{
58920+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
58921+ inodev_set.i_size);
58922+ struct inodev_entry **curr;
58923+
58924+ entry->prev = NULL;
58925+
58926+ curr = &inodev_set.i_hash[index];
58927+ if (*curr != NULL)
58928+ (*curr)->prev = entry;
58929+
58930+ entry->next = *curr;
58931+ *curr = entry;
58932+
58933+ return;
58934+}
58935+
58936+static void
58937+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
58938+{
58939+ unsigned int index =
58940+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
58941+ struct acl_role_label **curr;
58942+ struct acl_role_label *tmp, *tmp2;
58943+
58944+ curr = &acl_role_set.r_hash[index];
58945+
58946+ /* simple case, slot is empty, just set it to our role */
58947+ if (*curr == NULL) {
58948+ *curr = role;
58949+ } else {
58950+ /* example:
58951+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
58952+ 2 -> 3
58953+ */
58954+ /* first check to see if we can already be reached via this slot */
58955+ tmp = *curr;
58956+ while (tmp && tmp != role)
58957+ tmp = tmp->next;
58958+ if (tmp == role) {
58959+ /* we don't need to add ourselves to this slot's chain */
58960+ return;
58961+ }
58962+ /* we need to add ourselves to this chain, two cases */
58963+ if (role->next == NULL) {
58964+ /* simple case, append the current chain to our role */
58965+ role->next = *curr;
58966+ *curr = role;
58967+ } else {
58968+ /* 1 -> 2 -> 3 -> 4
58969+ 2 -> 3 -> 4
58970+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
58971+ */
58972+ /* trickier case: walk our role's chain until we find
58973+ the role for the start of the current slot's chain */
58974+ tmp = role;
58975+ tmp2 = *curr;
58976+ while (tmp->next && tmp->next != tmp2)
58977+ tmp = tmp->next;
58978+ if (tmp->next == tmp2) {
58979+ /* from example above, we found 3, so just
58980+ replace this slot's chain with ours */
58981+ *curr = role;
58982+ } else {
58983+ /* we didn't find a subset of our role's chain
58984+ in the current slot's chain, so append their
58985+ chain to ours, and set us as the first role in
58986+ the slot's chain
58987+
58988+ we could fold this case with the case above,
58989+ but making it explicit for clarity
58990+ */
58991+ tmp->next = tmp2;
58992+ *curr = role;
58993+ }
58994+ }
58995+ }
58996+
58997+ return;
58998+}
58999+
59000+static void
59001+insert_acl_role_label(struct acl_role_label *role)
59002+{
59003+ int i;
59004+
59005+ if (role_list == NULL) {
59006+ role_list = role;
59007+ role->prev = NULL;
59008+ } else {
59009+ role->prev = role_list;
59010+ role_list = role;
59011+ }
59012+
59013+ /* used for hash chains */
59014+ role->next = NULL;
59015+
59016+ if (role->roletype & GR_ROLE_DOMAIN) {
59017+ for (i = 0; i < role->domain_child_num; i++)
59018+ __insert_acl_role_label(role, role->domain_children[i]);
59019+ } else
59020+ __insert_acl_role_label(role, role->uidgid);
59021+}
59022+
59023+static int
59024+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
59025+{
59026+ struct name_entry **curr, *nentry;
59027+ struct inodev_entry *ientry;
59028+ unsigned int len = strlen(name);
59029+ unsigned int key = full_name_hash(name, len);
59030+ unsigned int index = key % name_set.n_size;
59031+
59032+ curr = &name_set.n_hash[index];
59033+
59034+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
59035+ curr = &((*curr)->next);
59036+
59037+ if (*curr != NULL)
59038+ return 1;
59039+
59040+ nentry = acl_alloc(sizeof (struct name_entry));
59041+ if (nentry == NULL)
59042+ return 0;
59043+ ientry = acl_alloc(sizeof (struct inodev_entry));
59044+ if (ientry == NULL)
59045+ return 0;
59046+ ientry->nentry = nentry;
59047+
59048+ nentry->key = key;
59049+ nentry->name = name;
59050+ nentry->inode = inode;
59051+ nentry->device = device;
59052+ nentry->len = len;
59053+ nentry->deleted = deleted;
59054+
59055+ nentry->prev = NULL;
59056+ curr = &name_set.n_hash[index];
59057+ if (*curr != NULL)
59058+ (*curr)->prev = nentry;
59059+ nentry->next = *curr;
59060+ *curr = nentry;
59061+
59062+ /* insert us into the table searchable by inode/dev */
59063+ insert_inodev_entry(ientry);
59064+
59065+ return 1;
59066+}
59067+
59068+static void
59069+insert_acl_obj_label(struct acl_object_label *obj,
59070+ struct acl_subject_label *subj)
59071+{
59072+ unsigned int index =
59073+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
59074+ struct acl_object_label **curr;
59075+
59076+
59077+ obj->prev = NULL;
59078+
59079+ curr = &subj->obj_hash[index];
59080+ if (*curr != NULL)
59081+ (*curr)->prev = obj;
59082+
59083+ obj->next = *curr;
59084+ *curr = obj;
59085+
59086+ return;
59087+}
59088+
59089+static void
59090+insert_acl_subj_label(struct acl_subject_label *obj,
59091+ struct acl_role_label *role)
59092+{
59093+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
59094+ struct acl_subject_label **curr;
59095+
59096+ obj->prev = NULL;
59097+
59098+ curr = &role->subj_hash[index];
59099+ if (*curr != NULL)
59100+ (*curr)->prev = obj;
59101+
59102+ obj->next = *curr;
59103+ *curr = obj;
59104+
59105+ return;
59106+}
59107+
59108+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
59109+
59110+static void *
59111+create_table(__u32 * len, int elementsize)
59112+{
59113+ unsigned int table_sizes[] = {
59114+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
59115+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
59116+ 4194301, 8388593, 16777213, 33554393, 67108859
59117+ };
59118+ void *newtable = NULL;
59119+ unsigned int pwr = 0;
59120+
59121+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
59122+ table_sizes[pwr] <= *len)
59123+ pwr++;
59124+
59125+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
59126+ return newtable;
59127+
59128+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
59129+ newtable =
59130+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
59131+ else
59132+ newtable = vmalloc(table_sizes[pwr] * elementsize);
59133+
59134+ *len = table_sizes[pwr];
59135+
59136+ return newtable;
59137+}
59138+
59139+static int
59140+init_variables(const struct gr_arg *arg)
59141+{
59142+ struct task_struct *reaper = init_pid_ns.child_reaper;
59143+ unsigned int stacksize;
59144+
59145+ subj_map_set.s_size = arg->role_db.num_subjects;
59146+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
59147+ name_set.n_size = arg->role_db.num_objects;
59148+ inodev_set.i_size = arg->role_db.num_objects;
59149+
59150+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
59151+ !name_set.n_size || !inodev_set.i_size)
59152+ return 1;
59153+
59154+ if (!gr_init_uidset())
59155+ return 1;
59156+
59157+ /* set up the stack that holds allocation info */
59158+
59159+ stacksize = arg->role_db.num_pointers + 5;
59160+
59161+ if (!acl_alloc_stack_init(stacksize))
59162+ return 1;
59163+
59164+ /* grab reference for the real root dentry and vfsmount */
59165+ get_fs_root(reaper->fs, &real_root);
59166+
59167+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59168+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
59169+#endif
59170+
59171+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
59172+ if (fakefs_obj_rw == NULL)
59173+ return 1;
59174+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
59175+
59176+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
59177+ if (fakefs_obj_rwx == NULL)
59178+ return 1;
59179+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
59180+
59181+ subj_map_set.s_hash =
59182+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
59183+ acl_role_set.r_hash =
59184+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
59185+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
59186+ inodev_set.i_hash =
59187+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
59188+
59189+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
59190+ !name_set.n_hash || !inodev_set.i_hash)
59191+ return 1;
59192+
59193+ memset(subj_map_set.s_hash, 0,
59194+ sizeof(struct subject_map *) * subj_map_set.s_size);
59195+ memset(acl_role_set.r_hash, 0,
59196+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
59197+ memset(name_set.n_hash, 0,
59198+ sizeof (struct name_entry *) * name_set.n_size);
59199+ memset(inodev_set.i_hash, 0,
59200+ sizeof (struct inodev_entry *) * inodev_set.i_size);
59201+
59202+ return 0;
59203+}
59204+
59205+/* free information not needed after startup
59206+ currently contains user->kernel pointer mappings for subjects
59207+*/
59208+
59209+static void
59210+free_init_variables(void)
59211+{
59212+ __u32 i;
59213+
59214+ if (subj_map_set.s_hash) {
59215+ for (i = 0; i < subj_map_set.s_size; i++) {
59216+ if (subj_map_set.s_hash[i]) {
59217+ kfree(subj_map_set.s_hash[i]);
59218+ subj_map_set.s_hash[i] = NULL;
59219+ }
59220+ }
59221+
59222+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
59223+ PAGE_SIZE)
59224+ kfree(subj_map_set.s_hash);
59225+ else
59226+ vfree(subj_map_set.s_hash);
59227+ }
59228+
59229+ return;
59230+}
59231+
59232+static void
59233+free_variables(void)
59234+{
59235+ struct acl_subject_label *s;
59236+ struct acl_role_label *r;
59237+ struct task_struct *task, *task2;
59238+ unsigned int x;
59239+
59240+ gr_clear_learn_entries();
59241+
59242+ read_lock(&tasklist_lock);
59243+ do_each_thread(task2, task) {
59244+ task->acl_sp_role = 0;
59245+ task->acl_role_id = 0;
59246+ task->acl = NULL;
59247+ task->role = NULL;
59248+ } while_each_thread(task2, task);
59249+ read_unlock(&tasklist_lock);
59250+
59251+ /* release the reference to the real root dentry and vfsmount */
59252+ path_put(&real_root);
59253+ memset(&real_root, 0, sizeof(real_root));
59254+
59255+ /* free all object hash tables */
59256+
59257+ FOR_EACH_ROLE_START(r)
59258+ if (r->subj_hash == NULL)
59259+ goto next_role;
59260+ FOR_EACH_SUBJECT_START(r, s, x)
59261+ if (s->obj_hash == NULL)
59262+ break;
59263+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59264+ kfree(s->obj_hash);
59265+ else
59266+ vfree(s->obj_hash);
59267+ FOR_EACH_SUBJECT_END(s, x)
59268+ FOR_EACH_NESTED_SUBJECT_START(r, s)
59269+ if (s->obj_hash == NULL)
59270+ break;
59271+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59272+ kfree(s->obj_hash);
59273+ else
59274+ vfree(s->obj_hash);
59275+ FOR_EACH_NESTED_SUBJECT_END(s)
59276+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
59277+ kfree(r->subj_hash);
59278+ else
59279+ vfree(r->subj_hash);
59280+ r->subj_hash = NULL;
59281+next_role:
59282+ FOR_EACH_ROLE_END(r)
59283+
59284+ acl_free_all();
59285+
59286+ if (acl_role_set.r_hash) {
59287+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
59288+ PAGE_SIZE)
59289+ kfree(acl_role_set.r_hash);
59290+ else
59291+ vfree(acl_role_set.r_hash);
59292+ }
59293+ if (name_set.n_hash) {
59294+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
59295+ PAGE_SIZE)
59296+ kfree(name_set.n_hash);
59297+ else
59298+ vfree(name_set.n_hash);
59299+ }
59300+
59301+ if (inodev_set.i_hash) {
59302+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
59303+ PAGE_SIZE)
59304+ kfree(inodev_set.i_hash);
59305+ else
59306+ vfree(inodev_set.i_hash);
59307+ }
59308+
59309+ gr_free_uidset();
59310+
59311+ memset(&name_set, 0, sizeof (struct name_db));
59312+ memset(&inodev_set, 0, sizeof (struct inodev_db));
59313+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
59314+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
59315+
59316+ default_role = NULL;
59317+ kernel_role = NULL;
59318+ role_list = NULL;
59319+
59320+ return;
59321+}
59322+
59323+static __u32
59324+count_user_objs(struct acl_object_label *userp)
59325+{
59326+ struct acl_object_label o_tmp;
59327+ __u32 num = 0;
59328+
59329+ while (userp) {
59330+ if (copy_from_user(&o_tmp, userp,
59331+ sizeof (struct acl_object_label)))
59332+ break;
59333+
59334+ userp = o_tmp.prev;
59335+ num++;
59336+ }
59337+
59338+ return num;
59339+}
59340+
59341+static struct acl_subject_label *
59342+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
59343+
59344+static int
59345+copy_user_glob(struct acl_object_label *obj)
59346+{
59347+ struct acl_object_label *g_tmp, **guser;
59348+ unsigned int len;
59349+ char *tmp;
59350+
59351+ if (obj->globbed == NULL)
59352+ return 0;
59353+
59354+ guser = &obj->globbed;
59355+ while (*guser) {
59356+ g_tmp = (struct acl_object_label *)
59357+ acl_alloc(sizeof (struct acl_object_label));
59358+ if (g_tmp == NULL)
59359+ return -ENOMEM;
59360+
59361+ if (copy_from_user(g_tmp, *guser,
59362+ sizeof (struct acl_object_label)))
59363+ return -EFAULT;
59364+
59365+ len = strnlen_user(g_tmp->filename, PATH_MAX);
59366+
59367+ if (!len || len >= PATH_MAX)
59368+ return -EINVAL;
59369+
59370+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59371+ return -ENOMEM;
59372+
59373+ if (copy_from_user(tmp, g_tmp->filename, len))
59374+ return -EFAULT;
59375+ tmp[len-1] = '\0';
59376+ g_tmp->filename = tmp;
59377+
59378+ *guser = g_tmp;
59379+ guser = &(g_tmp->next);
59380+ }
59381+
59382+ return 0;
59383+}
59384+
59385+static int
59386+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
59387+ struct acl_role_label *role)
59388+{
59389+ struct acl_object_label *o_tmp;
59390+ unsigned int len;
59391+ int ret;
59392+ char *tmp;
59393+
59394+ while (userp) {
59395+ if ((o_tmp = (struct acl_object_label *)
59396+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
59397+ return -ENOMEM;
59398+
59399+ if (copy_from_user(o_tmp, userp,
59400+ sizeof (struct acl_object_label)))
59401+ return -EFAULT;
59402+
59403+ userp = o_tmp->prev;
59404+
59405+ len = strnlen_user(o_tmp->filename, PATH_MAX);
59406+
59407+ if (!len || len >= PATH_MAX)
59408+ return -EINVAL;
59409+
59410+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59411+ return -ENOMEM;
59412+
59413+ if (copy_from_user(tmp, o_tmp->filename, len))
59414+ return -EFAULT;
59415+ tmp[len-1] = '\0';
59416+ o_tmp->filename = tmp;
59417+
59418+ insert_acl_obj_label(o_tmp, subj);
59419+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
59420+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
59421+ return -ENOMEM;
59422+
59423+ ret = copy_user_glob(o_tmp);
59424+ if (ret)
59425+ return ret;
59426+
59427+ if (o_tmp->nested) {
59428+ int already_copied;
59429+
59430+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
59431+ if (IS_ERR(o_tmp->nested))
59432+ return PTR_ERR(o_tmp->nested);
59433+
59434+ /* insert into nested subject list if we haven't copied this one yet
59435+ to prevent duplicate entries */
59436+ if (!already_copied) {
59437+ o_tmp->nested->next = role->hash->first;
59438+ role->hash->first = o_tmp->nested;
59439+ }
59440+ }
59441+ }
59442+
59443+ return 0;
59444+}
59445+
59446+static __u32
59447+count_user_subjs(struct acl_subject_label *userp)
59448+{
59449+ struct acl_subject_label s_tmp;
59450+ __u32 num = 0;
59451+
59452+ while (userp) {
59453+ if (copy_from_user(&s_tmp, userp,
59454+ sizeof (struct acl_subject_label)))
59455+ break;
59456+
59457+ userp = s_tmp.prev;
59458+ }
59459+
59460+ return num;
59461+}
59462+
59463+static int
59464+copy_user_allowedips(struct acl_role_label *rolep)
59465+{
59466+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
59467+
59468+ ruserip = rolep->allowed_ips;
59469+
59470+ while (ruserip) {
59471+ rlast = rtmp;
59472+
59473+ if ((rtmp = (struct role_allowed_ip *)
59474+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
59475+ return -ENOMEM;
59476+
59477+ if (copy_from_user(rtmp, ruserip,
59478+ sizeof (struct role_allowed_ip)))
59479+ return -EFAULT;
59480+
59481+ ruserip = rtmp->prev;
59482+
59483+ if (!rlast) {
59484+ rtmp->prev = NULL;
59485+ rolep->allowed_ips = rtmp;
59486+ } else {
59487+ rlast->next = rtmp;
59488+ rtmp->prev = rlast;
59489+ }
59490+
59491+ if (!ruserip)
59492+ rtmp->next = NULL;
59493+ }
59494+
59495+ return 0;
59496+}
59497+
59498+static int
59499+copy_user_transitions(struct acl_role_label *rolep)
59500+{
59501+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
59502+
59503+ unsigned int len;
59504+ char *tmp;
59505+
59506+ rusertp = rolep->transitions;
59507+
59508+ while (rusertp) {
59509+ rlast = rtmp;
59510+
59511+ if ((rtmp = (struct role_transition *)
59512+ acl_alloc(sizeof (struct role_transition))) == NULL)
59513+ return -ENOMEM;
59514+
59515+ if (copy_from_user(rtmp, rusertp,
59516+ sizeof (struct role_transition)))
59517+ return -EFAULT;
59518+
59519+ rusertp = rtmp->prev;
59520+
59521+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
59522+
59523+ if (!len || len >= GR_SPROLE_LEN)
59524+ return -EINVAL;
59525+
59526+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59527+ return -ENOMEM;
59528+
59529+ if (copy_from_user(tmp, rtmp->rolename, len))
59530+ return -EFAULT;
59531+ tmp[len-1] = '\0';
59532+ rtmp->rolename = tmp;
59533+
59534+ if (!rlast) {
59535+ rtmp->prev = NULL;
59536+ rolep->transitions = rtmp;
59537+ } else {
59538+ rlast->next = rtmp;
59539+ rtmp->prev = rlast;
59540+ }
59541+
59542+ if (!rusertp)
59543+ rtmp->next = NULL;
59544+ }
59545+
59546+ return 0;
59547+}
59548+
59549+static struct acl_subject_label *
59550+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59551+{
59552+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59553+ unsigned int len;
59554+ char *tmp;
59555+ __u32 num_objs;
59556+ struct acl_ip_label **i_tmp, *i_utmp2;
59557+ struct gr_hash_struct ghash;
59558+ struct subject_map *subjmap;
59559+ unsigned int i_num;
59560+ int err;
59561+
59562+ if (already_copied != NULL)
59563+ *already_copied = 0;
59564+
59565+ s_tmp = lookup_subject_map(userp);
59566+
59567+ /* we've already copied this subject into the kernel, just return
59568+ the reference to it, and don't copy it over again
59569+ */
59570+ if (s_tmp) {
59571+ if (already_copied != NULL)
59572+ *already_copied = 1;
59573+ return(s_tmp);
59574+ }
59575+
59576+ if ((s_tmp = (struct acl_subject_label *)
59577+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
59578+ return ERR_PTR(-ENOMEM);
59579+
59580+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
59581+ if (subjmap == NULL)
59582+ return ERR_PTR(-ENOMEM);
59583+
59584+ subjmap->user = userp;
59585+ subjmap->kernel = s_tmp;
59586+ insert_subj_map_entry(subjmap);
59587+
59588+ if (copy_from_user(s_tmp, userp,
59589+ sizeof (struct acl_subject_label)))
59590+ return ERR_PTR(-EFAULT);
59591+
59592+ len = strnlen_user(s_tmp->filename, PATH_MAX);
59593+
59594+ if (!len || len >= PATH_MAX)
59595+ return ERR_PTR(-EINVAL);
59596+
59597+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59598+ return ERR_PTR(-ENOMEM);
59599+
59600+ if (copy_from_user(tmp, s_tmp->filename, len))
59601+ return ERR_PTR(-EFAULT);
59602+ tmp[len-1] = '\0';
59603+ s_tmp->filename = tmp;
59604+
59605+ if (!strcmp(s_tmp->filename, "/"))
59606+ role->root_label = s_tmp;
59607+
59608+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
59609+ return ERR_PTR(-EFAULT);
59610+
59611+ /* copy user and group transition tables */
59612+
59613+ if (s_tmp->user_trans_num) {
59614+ uid_t *uidlist;
59615+
59616+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
59617+ if (uidlist == NULL)
59618+ return ERR_PTR(-ENOMEM);
59619+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
59620+ return ERR_PTR(-EFAULT);
59621+
59622+ s_tmp->user_transitions = uidlist;
59623+ }
59624+
59625+ if (s_tmp->group_trans_num) {
59626+ gid_t *gidlist;
59627+
59628+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
59629+ if (gidlist == NULL)
59630+ return ERR_PTR(-ENOMEM);
59631+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
59632+ return ERR_PTR(-EFAULT);
59633+
59634+ s_tmp->group_transitions = gidlist;
59635+ }
59636+
59637+ /* set up object hash table */
59638+ num_objs = count_user_objs(ghash.first);
59639+
59640+ s_tmp->obj_hash_size = num_objs;
59641+ s_tmp->obj_hash =
59642+ (struct acl_object_label **)
59643+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
59644+
59645+ if (!s_tmp->obj_hash)
59646+ return ERR_PTR(-ENOMEM);
59647+
59648+ memset(s_tmp->obj_hash, 0,
59649+ s_tmp->obj_hash_size *
59650+ sizeof (struct acl_object_label *));
59651+
59652+ /* add in objects */
59653+ err = copy_user_objs(ghash.first, s_tmp, role);
59654+
59655+ if (err)
59656+ return ERR_PTR(err);
59657+
59658+ /* set pointer for parent subject */
59659+ if (s_tmp->parent_subject) {
59660+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
59661+
59662+ if (IS_ERR(s_tmp2))
59663+ return s_tmp2;
59664+
59665+ s_tmp->parent_subject = s_tmp2;
59666+ }
59667+
59668+ /* add in ip acls */
59669+
59670+ if (!s_tmp->ip_num) {
59671+ s_tmp->ips = NULL;
59672+ goto insert;
59673+ }
59674+
59675+ i_tmp =
59676+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
59677+ sizeof (struct acl_ip_label *));
59678+
59679+ if (!i_tmp)
59680+ return ERR_PTR(-ENOMEM);
59681+
59682+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
59683+ *(i_tmp + i_num) =
59684+ (struct acl_ip_label *)
59685+ acl_alloc(sizeof (struct acl_ip_label));
59686+ if (!*(i_tmp + i_num))
59687+ return ERR_PTR(-ENOMEM);
59688+
59689+ if (copy_from_user
59690+ (&i_utmp2, s_tmp->ips + i_num,
59691+ sizeof (struct acl_ip_label *)))
59692+ return ERR_PTR(-EFAULT);
59693+
59694+ if (copy_from_user
59695+ (*(i_tmp + i_num), i_utmp2,
59696+ sizeof (struct acl_ip_label)))
59697+ return ERR_PTR(-EFAULT);
59698+
59699+ if ((*(i_tmp + i_num))->iface == NULL)
59700+ continue;
59701+
59702+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
59703+ if (!len || len >= IFNAMSIZ)
59704+ return ERR_PTR(-EINVAL);
59705+ tmp = acl_alloc(len);
59706+ if (tmp == NULL)
59707+ return ERR_PTR(-ENOMEM);
59708+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
59709+ return ERR_PTR(-EFAULT);
59710+ (*(i_tmp + i_num))->iface = tmp;
59711+ }
59712+
59713+ s_tmp->ips = i_tmp;
59714+
59715+insert:
59716+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
59717+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
59718+ return ERR_PTR(-ENOMEM);
59719+
59720+ return s_tmp;
59721+}
59722+
59723+static int
59724+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
59725+{
59726+ struct acl_subject_label s_pre;
59727+ struct acl_subject_label * ret;
59728+ int err;
59729+
59730+ while (userp) {
59731+ if (copy_from_user(&s_pre, userp,
59732+ sizeof (struct acl_subject_label)))
59733+ return -EFAULT;
59734+
59735+ ret = do_copy_user_subj(userp, role, NULL);
59736+
59737+ err = PTR_ERR(ret);
59738+ if (IS_ERR(ret))
59739+ return err;
59740+
59741+ insert_acl_subj_label(ret, role);
59742+
59743+ userp = s_pre.prev;
59744+ }
59745+
59746+ return 0;
59747+}
59748+
59749+static int
59750+copy_user_acl(struct gr_arg *arg)
59751+{
59752+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
59753+ struct acl_subject_label *subj_list;
59754+ struct sprole_pw *sptmp;
59755+ struct gr_hash_struct *ghash;
59756+ uid_t *domainlist;
59757+ unsigned int r_num;
59758+ unsigned int len;
59759+ char *tmp;
59760+ int err = 0;
59761+ __u16 i;
59762+ __u32 num_subjs;
59763+
59764+ /* we need a default and kernel role */
59765+ if (arg->role_db.num_roles < 2)
59766+ return -EINVAL;
59767+
59768+ /* copy special role authentication info from userspace */
59769+
59770+ num_sprole_pws = arg->num_sprole_pws;
59771+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
59772+
59773+ if (!acl_special_roles && num_sprole_pws)
59774+ return -ENOMEM;
59775+
59776+ for (i = 0; i < num_sprole_pws; i++) {
59777+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
59778+ if (!sptmp)
59779+ return -ENOMEM;
59780+ if (copy_from_user(sptmp, arg->sprole_pws + i,
59781+ sizeof (struct sprole_pw)))
59782+ return -EFAULT;
59783+
59784+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
59785+
59786+ if (!len || len >= GR_SPROLE_LEN)
59787+ return -EINVAL;
59788+
59789+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59790+ return -ENOMEM;
59791+
59792+ if (copy_from_user(tmp, sptmp->rolename, len))
59793+ return -EFAULT;
59794+
59795+ tmp[len-1] = '\0';
59796+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59797+ printk(KERN_ALERT "Copying special role %s\n", tmp);
59798+#endif
59799+ sptmp->rolename = tmp;
59800+ acl_special_roles[i] = sptmp;
59801+ }
59802+
59803+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
59804+
59805+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
59806+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
59807+
59808+ if (!r_tmp)
59809+ return -ENOMEM;
59810+
59811+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
59812+ sizeof (struct acl_role_label *)))
59813+ return -EFAULT;
59814+
59815+ if (copy_from_user(r_tmp, r_utmp2,
59816+ sizeof (struct acl_role_label)))
59817+ return -EFAULT;
59818+
59819+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
59820+
59821+ if (!len || len >= PATH_MAX)
59822+ return -EINVAL;
59823+
59824+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59825+ return -ENOMEM;
59826+
59827+ if (copy_from_user(tmp, r_tmp->rolename, len))
59828+ return -EFAULT;
59829+
59830+ tmp[len-1] = '\0';
59831+ r_tmp->rolename = tmp;
59832+
59833+ if (!strcmp(r_tmp->rolename, "default")
59834+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
59835+ default_role = r_tmp;
59836+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
59837+ kernel_role = r_tmp;
59838+ }
59839+
59840+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
59841+ return -ENOMEM;
59842+
59843+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
59844+ return -EFAULT;
59845+
59846+ r_tmp->hash = ghash;
59847+
59848+ num_subjs = count_user_subjs(r_tmp->hash->first);
59849+
59850+ r_tmp->subj_hash_size = num_subjs;
59851+ r_tmp->subj_hash =
59852+ (struct acl_subject_label **)
59853+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
59854+
59855+ if (!r_tmp->subj_hash)
59856+ return -ENOMEM;
59857+
59858+ err = copy_user_allowedips(r_tmp);
59859+ if (err)
59860+ return err;
59861+
59862+ /* copy domain info */
59863+ if (r_tmp->domain_children != NULL) {
59864+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59865+ if (domainlist == NULL)
59866+ return -ENOMEM;
59867+
59868+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59869+ return -EFAULT;
59870+
59871+ r_tmp->domain_children = domainlist;
59872+ }
59873+
59874+ err = copy_user_transitions(r_tmp);
59875+ if (err)
59876+ return err;
59877+
59878+ memset(r_tmp->subj_hash, 0,
59879+ r_tmp->subj_hash_size *
59880+ sizeof (struct acl_subject_label *));
59881+
59882+ /* acquire the list of subjects, then NULL out
59883+ the list prior to parsing the subjects for this role,
59884+ as during this parsing the list is replaced with a list
59885+ of *nested* subjects for the role
59886+ */
59887+ subj_list = r_tmp->hash->first;
59888+
59889+ /* set nested subject list to null */
59890+ r_tmp->hash->first = NULL;
59891+
59892+ err = copy_user_subjs(subj_list, r_tmp);
59893+
59894+ if (err)
59895+ return err;
59896+
59897+ insert_acl_role_label(r_tmp);
59898+ }
59899+
59900+ if (default_role == NULL || kernel_role == NULL)
59901+ return -EINVAL;
59902+
59903+ return err;
59904+}
59905+
59906+static int
59907+gracl_init(struct gr_arg *args)
59908+{
59909+ int error = 0;
59910+
59911+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
59912+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
59913+
59914+ if (init_variables(args)) {
59915+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
59916+ error = -ENOMEM;
59917+ free_variables();
59918+ goto out;
59919+ }
59920+
59921+ error = copy_user_acl(args);
59922+ free_init_variables();
59923+ if (error) {
59924+ free_variables();
59925+ goto out;
59926+ }
59927+
59928+ if ((error = gr_set_acls(0))) {
59929+ free_variables();
59930+ goto out;
59931+ }
59932+
59933+ pax_open_kernel();
59934+ gr_status |= GR_READY;
59935+ pax_close_kernel();
59936+
59937+ out:
59938+ return error;
59939+}
59940+
59941+/* derived from glibc fnmatch() 0: match, 1: no match*/
59942+
59943+static int
59944+glob_match(const char *p, const char *n)
59945+{
59946+ char c;
59947+
59948+ while ((c = *p++) != '\0') {
59949+ switch (c) {
59950+ case '?':
59951+ if (*n == '\0')
59952+ return 1;
59953+ else if (*n == '/')
59954+ return 1;
59955+ break;
59956+ case '\\':
59957+ if (*n != c)
59958+ return 1;
59959+ break;
59960+ case '*':
59961+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
59962+ if (*n == '/')
59963+ return 1;
59964+ else if (c == '?') {
59965+ if (*n == '\0')
59966+ return 1;
59967+ else
59968+ ++n;
59969+ }
59970+ }
59971+ if (c == '\0') {
59972+ return 0;
59973+ } else {
59974+ const char *endp;
59975+
59976+ if ((endp = strchr(n, '/')) == NULL)
59977+ endp = n + strlen(n);
59978+
59979+ if (c == '[') {
59980+ for (--p; n < endp; ++n)
59981+ if (!glob_match(p, n))
59982+ return 0;
59983+ } else if (c == '/') {
59984+ while (*n != '\0' && *n != '/')
59985+ ++n;
59986+ if (*n == '/' && !glob_match(p, n + 1))
59987+ return 0;
59988+ } else {
59989+ for (--p; n < endp; ++n)
59990+ if (*n == c && !glob_match(p, n))
59991+ return 0;
59992+ }
59993+
59994+ return 1;
59995+ }
59996+ case '[':
59997+ {
59998+ int not;
59999+ char cold;
60000+
60001+ if (*n == '\0' || *n == '/')
60002+ return 1;
60003+
60004+ not = (*p == '!' || *p == '^');
60005+ if (not)
60006+ ++p;
60007+
60008+ c = *p++;
60009+ for (;;) {
60010+ unsigned char fn = (unsigned char)*n;
60011+
60012+ if (c == '\0')
60013+ return 1;
60014+ else {
60015+ if (c == fn)
60016+ goto matched;
60017+ cold = c;
60018+ c = *p++;
60019+
60020+ if (c == '-' && *p != ']') {
60021+ unsigned char cend = *p++;
60022+
60023+ if (cend == '\0')
60024+ return 1;
60025+
60026+ if (cold <= fn && fn <= cend)
60027+ goto matched;
60028+
60029+ c = *p++;
60030+ }
60031+ }
60032+
60033+ if (c == ']')
60034+ break;
60035+ }
60036+ if (!not)
60037+ return 1;
60038+ break;
60039+ matched:
60040+ while (c != ']') {
60041+ if (c == '\0')
60042+ return 1;
60043+
60044+ c = *p++;
60045+ }
60046+ if (not)
60047+ return 1;
60048+ }
60049+ break;
60050+ default:
60051+ if (c != *n)
60052+ return 1;
60053+ }
60054+
60055+ ++n;
60056+ }
60057+
60058+ if (*n == '\0')
60059+ return 0;
60060+
60061+ if (*n == '/')
60062+ return 0;
60063+
60064+ return 1;
60065+}
60066+
60067+static struct acl_object_label *
60068+chk_glob_label(struct acl_object_label *globbed,
60069+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
60070+{
60071+ struct acl_object_label *tmp;
60072+
60073+ if (*path == NULL)
60074+ *path = gr_to_filename_nolock(dentry, mnt);
60075+
60076+ tmp = globbed;
60077+
60078+ while (tmp) {
60079+ if (!glob_match(tmp->filename, *path))
60080+ return tmp;
60081+ tmp = tmp->next;
60082+ }
60083+
60084+ return NULL;
60085+}
60086+
60087+static struct acl_object_label *
60088+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
60089+ const ino_t curr_ino, const dev_t curr_dev,
60090+ const struct acl_subject_label *subj, char **path, const int checkglob)
60091+{
60092+ struct acl_subject_label *tmpsubj;
60093+ struct acl_object_label *retval;
60094+ struct acl_object_label *retval2;
60095+
60096+ tmpsubj = (struct acl_subject_label *) subj;
60097+ read_lock(&gr_inode_lock);
60098+ do {
60099+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
60100+ if (retval) {
60101+ if (checkglob && retval->globbed) {
60102+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
60103+ if (retval2)
60104+ retval = retval2;
60105+ }
60106+ break;
60107+ }
60108+ } while ((tmpsubj = tmpsubj->parent_subject));
60109+ read_unlock(&gr_inode_lock);
60110+
60111+ return retval;
60112+}
60113+
60114+static __inline__ struct acl_object_label *
60115+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
60116+ struct dentry *curr_dentry,
60117+ const struct acl_subject_label *subj, char **path, const int checkglob)
60118+{
60119+ int newglob = checkglob;
60120+ ino_t inode;
60121+ dev_t device;
60122+
60123+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
60124+ as we don't want a / * rule to match instead of the / object
60125+ don't do this for create lookups that call this function though, since they're looking up
60126+ on the parent and thus need globbing checks on all paths
60127+ */
60128+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
60129+ newglob = GR_NO_GLOB;
60130+
60131+ spin_lock(&curr_dentry->d_lock);
60132+ inode = curr_dentry->d_inode->i_ino;
60133+ device = __get_dev(curr_dentry);
60134+ spin_unlock(&curr_dentry->d_lock);
60135+
60136+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
60137+}
60138+
60139+#ifdef CONFIG_HUGETLBFS
60140+static inline bool
60141+is_hugetlbfs_mnt(const struct vfsmount *mnt)
60142+{
60143+ int i;
60144+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
60145+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
60146+ return true;
60147+ }
60148+
60149+ return false;
60150+}
60151+#endif
60152+
60153+static struct acl_object_label *
60154+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60155+ const struct acl_subject_label *subj, char *path, const int checkglob)
60156+{
60157+ struct dentry *dentry = (struct dentry *) l_dentry;
60158+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60159+ struct mount *real_mnt = real_mount(mnt);
60160+ struct acl_object_label *retval;
60161+ struct dentry *parent;
60162+
60163+ br_read_lock(&vfsmount_lock);
60164+ write_seqlock(&rename_lock);
60165+
60166+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
60167+#ifdef CONFIG_NET
60168+ mnt == sock_mnt ||
60169+#endif
60170+#ifdef CONFIG_HUGETLBFS
60171+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
60172+#endif
60173+ /* ignore Eric Biederman */
60174+ IS_PRIVATE(l_dentry->d_inode))) {
60175+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
60176+ goto out;
60177+ }
60178+
60179+ for (;;) {
60180+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60181+ break;
60182+
60183+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60184+ if (!mnt_has_parent(real_mnt))
60185+ break;
60186+
60187+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60188+ if (retval != NULL)
60189+ goto out;
60190+
60191+ dentry = real_mnt->mnt_mountpoint;
60192+ real_mnt = real_mnt->mnt_parent;
60193+ mnt = &real_mnt->mnt;
60194+ continue;
60195+ }
60196+
60197+ parent = dentry->d_parent;
60198+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60199+ if (retval != NULL)
60200+ goto out;
60201+
60202+ dentry = parent;
60203+ }
60204+
60205+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60206+
60207+ /* real_root is pinned so we don't have to hold a reference */
60208+ if (retval == NULL)
60209+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
60210+out:
60211+ write_sequnlock(&rename_lock);
60212+ br_read_unlock(&vfsmount_lock);
60213+
60214+ BUG_ON(retval == NULL);
60215+
60216+ return retval;
60217+}
60218+
60219+static __inline__ struct acl_object_label *
60220+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60221+ const struct acl_subject_label *subj)
60222+{
60223+ char *path = NULL;
60224+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
60225+}
60226+
60227+static __inline__ struct acl_object_label *
60228+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60229+ const struct acl_subject_label *subj)
60230+{
60231+ char *path = NULL;
60232+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
60233+}
60234+
60235+static __inline__ struct acl_object_label *
60236+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60237+ const struct acl_subject_label *subj, char *path)
60238+{
60239+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
60240+}
60241+
60242+static struct acl_subject_label *
60243+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60244+ const struct acl_role_label *role)
60245+{
60246+ struct dentry *dentry = (struct dentry *) l_dentry;
60247+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60248+ struct mount *real_mnt = real_mount(mnt);
60249+ struct acl_subject_label *retval;
60250+ struct dentry *parent;
60251+
60252+ br_read_lock(&vfsmount_lock);
60253+ write_seqlock(&rename_lock);
60254+
60255+ for (;;) {
60256+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60257+ break;
60258+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60259+ if (!mnt_has_parent(real_mnt))
60260+ break;
60261+
60262+ spin_lock(&dentry->d_lock);
60263+ read_lock(&gr_inode_lock);
60264+ retval =
60265+ lookup_acl_subj_label(dentry->d_inode->i_ino,
60266+ __get_dev(dentry), role);
60267+ read_unlock(&gr_inode_lock);
60268+ spin_unlock(&dentry->d_lock);
60269+ if (retval != NULL)
60270+ goto out;
60271+
60272+ dentry = real_mnt->mnt_mountpoint;
60273+ real_mnt = real_mnt->mnt_parent;
60274+ mnt = &real_mnt->mnt;
60275+ continue;
60276+ }
60277+
60278+ spin_lock(&dentry->d_lock);
60279+ read_lock(&gr_inode_lock);
60280+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60281+ __get_dev(dentry), role);
60282+ read_unlock(&gr_inode_lock);
60283+ parent = dentry->d_parent;
60284+ spin_unlock(&dentry->d_lock);
60285+
60286+ if (retval != NULL)
60287+ goto out;
60288+
60289+ dentry = parent;
60290+ }
60291+
60292+ spin_lock(&dentry->d_lock);
60293+ read_lock(&gr_inode_lock);
60294+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60295+ __get_dev(dentry), role);
60296+ read_unlock(&gr_inode_lock);
60297+ spin_unlock(&dentry->d_lock);
60298+
60299+ if (unlikely(retval == NULL)) {
60300+ /* real_root is pinned, we don't need to hold a reference */
60301+ read_lock(&gr_inode_lock);
60302+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
60303+ __get_dev(real_root.dentry), role);
60304+ read_unlock(&gr_inode_lock);
60305+ }
60306+out:
60307+ write_sequnlock(&rename_lock);
60308+ br_read_unlock(&vfsmount_lock);
60309+
60310+ BUG_ON(retval == NULL);
60311+
60312+ return retval;
60313+}
60314+
60315+static void
60316+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
60317+{
60318+ struct task_struct *task = current;
60319+ const struct cred *cred = current_cred();
60320+
60321+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
60322+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60323+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60324+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
60325+
60326+ return;
60327+}
60328+
60329+static void
60330+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
60331+{
60332+ struct task_struct *task = current;
60333+ const struct cred *cred = current_cred();
60334+
60335+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60336+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60337+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60338+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
60339+
60340+ return;
60341+}
60342+
60343+static void
60344+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
60345+{
60346+ struct task_struct *task = current;
60347+ const struct cred *cred = current_cred();
60348+
60349+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60350+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60351+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60352+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
60353+
60354+ return;
60355+}
60356+
60357+__u32
60358+gr_search_file(const struct dentry * dentry, const __u32 mode,
60359+ const struct vfsmount * mnt)
60360+{
60361+ __u32 retval = mode;
60362+ struct acl_subject_label *curracl;
60363+ struct acl_object_label *currobj;
60364+
60365+ if (unlikely(!(gr_status & GR_READY)))
60366+ return (mode & ~GR_AUDITS);
60367+
60368+ curracl = current->acl;
60369+
60370+ currobj = chk_obj_label(dentry, mnt, curracl);
60371+ retval = currobj->mode & mode;
60372+
60373+ /* if we're opening a specified transfer file for writing
60374+ (e.g. /dev/initctl), then transfer our role to init
60375+ */
60376+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
60377+ current->role->roletype & GR_ROLE_PERSIST)) {
60378+ struct task_struct *task = init_pid_ns.child_reaper;
60379+
60380+ if (task->role != current->role) {
60381+ task->acl_sp_role = 0;
60382+ task->acl_role_id = current->acl_role_id;
60383+ task->role = current->role;
60384+ rcu_read_lock();
60385+ read_lock(&grsec_exec_file_lock);
60386+ gr_apply_subject_to_task(task);
60387+ read_unlock(&grsec_exec_file_lock);
60388+ rcu_read_unlock();
60389+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
60390+ }
60391+ }
60392+
60393+ if (unlikely
60394+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
60395+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
60396+ __u32 new_mode = mode;
60397+
60398+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60399+
60400+ retval = new_mode;
60401+
60402+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
60403+ new_mode |= GR_INHERIT;
60404+
60405+ if (!(mode & GR_NOLEARN))
60406+ gr_log_learn(dentry, mnt, new_mode);
60407+ }
60408+
60409+ return retval;
60410+}
60411+
60412+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
60413+ const struct dentry *parent,
60414+ const struct vfsmount *mnt)
60415+{
60416+ struct name_entry *match;
60417+ struct acl_object_label *matchpo;
60418+ struct acl_subject_label *curracl;
60419+ char *path;
60420+
60421+ if (unlikely(!(gr_status & GR_READY)))
60422+ return NULL;
60423+
60424+ preempt_disable();
60425+ path = gr_to_filename_rbac(new_dentry, mnt);
60426+ match = lookup_name_entry_create(path);
60427+
60428+ curracl = current->acl;
60429+
60430+ if (match) {
60431+ read_lock(&gr_inode_lock);
60432+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
60433+ read_unlock(&gr_inode_lock);
60434+
60435+ if (matchpo) {
60436+ preempt_enable();
60437+ return matchpo;
60438+ }
60439+ }
60440+
60441+ // lookup parent
60442+
60443+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
60444+
60445+ preempt_enable();
60446+ return matchpo;
60447+}
60448+
60449+__u32
60450+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
60451+ const struct vfsmount * mnt, const __u32 mode)
60452+{
60453+ struct acl_object_label *matchpo;
60454+ __u32 retval;
60455+
60456+ if (unlikely(!(gr_status & GR_READY)))
60457+ return (mode & ~GR_AUDITS);
60458+
60459+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
60460+
60461+ retval = matchpo->mode & mode;
60462+
60463+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
60464+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60465+ __u32 new_mode = mode;
60466+
60467+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60468+
60469+ gr_log_learn(new_dentry, mnt, new_mode);
60470+ return new_mode;
60471+ }
60472+
60473+ return retval;
60474+}
60475+
60476+__u32
60477+gr_check_link(const struct dentry * new_dentry,
60478+ const struct dentry * parent_dentry,
60479+ const struct vfsmount * parent_mnt,
60480+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
60481+{
60482+ struct acl_object_label *obj;
60483+ __u32 oldmode, newmode;
60484+ __u32 needmode;
60485+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
60486+ GR_DELETE | GR_INHERIT;
60487+
60488+ if (unlikely(!(gr_status & GR_READY)))
60489+ return (GR_CREATE | GR_LINK);
60490+
60491+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
60492+ oldmode = obj->mode;
60493+
60494+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
60495+ newmode = obj->mode;
60496+
60497+ needmode = newmode & checkmodes;
60498+
60499+ // old name for hardlink must have at least the permissions of the new name
60500+ if ((oldmode & needmode) != needmode)
60501+ goto bad;
60502+
60503+ // if old name had restrictions/auditing, make sure the new name does as well
60504+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
60505+
60506+ // don't allow hardlinking of suid/sgid/fcapped files without permission
60507+ if (is_privileged_binary(old_dentry))
60508+ needmode |= GR_SETID;
60509+
60510+ if ((newmode & needmode) != needmode)
60511+ goto bad;
60512+
60513+ // enforce minimum permissions
60514+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
60515+ return newmode;
60516+bad:
60517+ needmode = oldmode;
60518+ if (is_privileged_binary(old_dentry))
60519+ needmode |= GR_SETID;
60520+
60521+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
60522+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
60523+ return (GR_CREATE | GR_LINK);
60524+ } else if (newmode & GR_SUPPRESS)
60525+ return GR_SUPPRESS;
60526+ else
60527+ return 0;
60528+}
60529+
60530+int
60531+gr_check_hidden_task(const struct task_struct *task)
60532+{
60533+ if (unlikely(!(gr_status & GR_READY)))
60534+ return 0;
60535+
60536+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60537+ return 1;
60538+
60539+ return 0;
60540+}
60541+
60542+int
60543+gr_check_protected_task(const struct task_struct *task)
60544+{
60545+ if (unlikely(!(gr_status & GR_READY) || !task))
60546+ return 0;
60547+
60548+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60549+ task->acl != current->acl)
60550+ return 1;
60551+
60552+ return 0;
60553+}
60554+
60555+int
60556+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60557+{
60558+ struct task_struct *p;
60559+ int ret = 0;
60560+
60561+ if (unlikely(!(gr_status & GR_READY) || !pid))
60562+ return ret;
60563+
60564+ read_lock(&tasklist_lock);
60565+ do_each_pid_task(pid, type, p) {
60566+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60567+ p->acl != current->acl) {
60568+ ret = 1;
60569+ goto out;
60570+ }
60571+ } while_each_pid_task(pid, type, p);
60572+out:
60573+ read_unlock(&tasklist_lock);
60574+
60575+ return ret;
60576+}
60577+
60578+void
60579+gr_copy_label(struct task_struct *tsk)
60580+{
60581+ tsk->signal->used_accept = 0;
60582+ tsk->acl_sp_role = 0;
60583+ tsk->acl_role_id = current->acl_role_id;
60584+ tsk->acl = current->acl;
60585+ tsk->role = current->role;
60586+ tsk->signal->curr_ip = current->signal->curr_ip;
60587+ tsk->signal->saved_ip = current->signal->saved_ip;
60588+ if (current->exec_file)
60589+ get_file(current->exec_file);
60590+ tsk->exec_file = current->exec_file;
60591+ tsk->is_writable = current->is_writable;
60592+ if (unlikely(current->signal->used_accept)) {
60593+ current->signal->curr_ip = 0;
60594+ current->signal->saved_ip = 0;
60595+ }
60596+
60597+ return;
60598+}
60599+
60600+static void
60601+gr_set_proc_res(struct task_struct *task)
60602+{
60603+ struct acl_subject_label *proc;
60604+ unsigned short i;
60605+
60606+ proc = task->acl;
60607+
60608+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
60609+ return;
60610+
60611+ for (i = 0; i < RLIM_NLIMITS; i++) {
60612+ if (!(proc->resmask & (1U << i)))
60613+ continue;
60614+
60615+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
60616+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
60617+
60618+ if (i == RLIMIT_CPU)
60619+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
60620+ }
60621+
60622+ return;
60623+}
60624+
60625+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
60626+
60627+int
60628+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60629+{
60630+ unsigned int i;
60631+ __u16 num;
60632+ uid_t *uidlist;
60633+ uid_t curuid;
60634+ int realok = 0;
60635+ int effectiveok = 0;
60636+ int fsok = 0;
60637+ uid_t globalreal, globaleffective, globalfs;
60638+
60639+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
60640+ struct user_struct *user;
60641+
60642+ if (!uid_valid(real))
60643+ goto skipit;
60644+
60645+ /* find user based on global namespace */
60646+
60647+ globalreal = GR_GLOBAL_UID(real);
60648+
60649+ user = find_user(make_kuid(&init_user_ns, globalreal));
60650+ if (user == NULL)
60651+ goto skipit;
60652+
60653+ if (gr_process_kernel_setuid_ban(user)) {
60654+ /* for find_user */
60655+ free_uid(user);
60656+ return 1;
60657+ }
60658+
60659+ /* for find_user */
60660+ free_uid(user);
60661+
60662+skipit:
60663+#endif
60664+
60665+ if (unlikely(!(gr_status & GR_READY)))
60666+ return 0;
60667+
60668+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60669+ gr_log_learn_uid_change(real, effective, fs);
60670+
60671+ num = current->acl->user_trans_num;
60672+ uidlist = current->acl->user_transitions;
60673+
60674+ if (uidlist == NULL)
60675+ return 0;
60676+
60677+ if (!uid_valid(real)) {
60678+ realok = 1;
60679+ globalreal = (uid_t)-1;
60680+ } else {
60681+ globalreal = GR_GLOBAL_UID(real);
60682+ }
60683+ if (!uid_valid(effective)) {
60684+ effectiveok = 1;
60685+ globaleffective = (uid_t)-1;
60686+ } else {
60687+ globaleffective = GR_GLOBAL_UID(effective);
60688+ }
60689+ if (!uid_valid(fs)) {
60690+ fsok = 1;
60691+ globalfs = (uid_t)-1;
60692+ } else {
60693+ globalfs = GR_GLOBAL_UID(fs);
60694+ }
60695+
60696+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
60697+ for (i = 0; i < num; i++) {
60698+ curuid = uidlist[i];
60699+ if (globalreal == curuid)
60700+ realok = 1;
60701+ if (globaleffective == curuid)
60702+ effectiveok = 1;
60703+ if (globalfs == curuid)
60704+ fsok = 1;
60705+ }
60706+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
60707+ for (i = 0; i < num; i++) {
60708+ curuid = uidlist[i];
60709+ if (globalreal == curuid)
60710+ break;
60711+ if (globaleffective == curuid)
60712+ break;
60713+ if (globalfs == curuid)
60714+ break;
60715+ }
60716+ /* not in deny list */
60717+ if (i == num) {
60718+ realok = 1;
60719+ effectiveok = 1;
60720+ fsok = 1;
60721+ }
60722+ }
60723+
60724+ if (realok && effectiveok && fsok)
60725+ return 0;
60726+ else {
60727+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60728+ return 1;
60729+ }
60730+}
60731+
60732+int
60733+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60734+{
60735+ unsigned int i;
60736+ __u16 num;
60737+ gid_t *gidlist;
60738+ gid_t curgid;
60739+ int realok = 0;
60740+ int effectiveok = 0;
60741+ int fsok = 0;
60742+ gid_t globalreal, globaleffective, globalfs;
60743+
60744+ if (unlikely(!(gr_status & GR_READY)))
60745+ return 0;
60746+
60747+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60748+ gr_log_learn_gid_change(real, effective, fs);
60749+
60750+ num = current->acl->group_trans_num;
60751+ gidlist = current->acl->group_transitions;
60752+
60753+ if (gidlist == NULL)
60754+ return 0;
60755+
60756+ if (!gid_valid(real)) {
60757+ realok = 1;
60758+ globalreal = (gid_t)-1;
60759+ } else {
60760+ globalreal = GR_GLOBAL_GID(real);
60761+ }
60762+ if (!gid_valid(effective)) {
60763+ effectiveok = 1;
60764+ globaleffective = (gid_t)-1;
60765+ } else {
60766+ globaleffective = GR_GLOBAL_GID(effective);
60767+ }
60768+ if (!gid_valid(fs)) {
60769+ fsok = 1;
60770+ globalfs = (gid_t)-1;
60771+ } else {
60772+ globalfs = GR_GLOBAL_GID(fs);
60773+ }
60774+
60775+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
60776+ for (i = 0; i < num; i++) {
60777+ curgid = gidlist[i];
60778+ if (globalreal == curgid)
60779+ realok = 1;
60780+ if (globaleffective == curgid)
60781+ effectiveok = 1;
60782+ if (globalfs == curgid)
60783+ fsok = 1;
60784+ }
60785+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
60786+ for (i = 0; i < num; i++) {
60787+ curgid = gidlist[i];
60788+ if (globalreal == curgid)
60789+ break;
60790+ if (globaleffective == curgid)
60791+ break;
60792+ if (globalfs == curgid)
60793+ break;
60794+ }
60795+ /* not in deny list */
60796+ if (i == num) {
60797+ realok = 1;
60798+ effectiveok = 1;
60799+ fsok = 1;
60800+ }
60801+ }
60802+
60803+ if (realok && effectiveok && fsok)
60804+ return 0;
60805+ else {
60806+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60807+ return 1;
60808+ }
60809+}
60810+
60811+extern int gr_acl_is_capable(const int cap);
60812+
60813+void
60814+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
60815+{
60816+ struct acl_role_label *role = task->role;
60817+ struct acl_subject_label *subj = NULL;
60818+ struct acl_object_label *obj;
60819+ struct file *filp;
60820+ uid_t uid;
60821+ gid_t gid;
60822+
60823+ if (unlikely(!(gr_status & GR_READY)))
60824+ return;
60825+
60826+ uid = GR_GLOBAL_UID(kuid);
60827+ gid = GR_GLOBAL_GID(kgid);
60828+
60829+ filp = task->exec_file;
60830+
60831+ /* kernel process, we'll give them the kernel role */
60832+ if (unlikely(!filp)) {
60833+ task->role = kernel_role;
60834+ task->acl = kernel_role->root_label;
60835+ return;
60836+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
60837+ role = lookup_acl_role_label(task, uid, gid);
60838+
60839+ /* don't change the role if we're not a privileged process */
60840+ if (role && task->role != role &&
60841+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
60842+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
60843+ return;
60844+
60845+ /* perform subject lookup in possibly new role
60846+ we can use this result below in the case where role == task->role
60847+ */
60848+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
60849+
60850+ /* if we changed uid/gid, but result in the same role
60851+ and are using inheritance, don't lose the inherited subject
60852+ if current subject is other than what normal lookup
60853+ would result in, we arrived via inheritance, don't
60854+ lose subject
60855+ */
60856+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
60857+ (subj == task->acl)))
60858+ task->acl = subj;
60859+
60860+ task->role = role;
60861+
60862+ task->is_writable = 0;
60863+
60864+ /* ignore additional mmap checks for processes that are writable
60865+ by the default ACL */
60866+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60867+ if (unlikely(obj->mode & GR_WRITE))
60868+ task->is_writable = 1;
60869+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60870+ if (unlikely(obj->mode & GR_WRITE))
60871+ task->is_writable = 1;
60872+
60873+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60874+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60875+#endif
60876+
60877+ gr_set_proc_res(task);
60878+
60879+ return;
60880+}
60881+
60882+int
60883+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60884+ const int unsafe_flags)
60885+{
60886+ struct task_struct *task = current;
60887+ struct acl_subject_label *newacl;
60888+ struct acl_object_label *obj;
60889+ __u32 retmode;
60890+
60891+ if (unlikely(!(gr_status & GR_READY)))
60892+ return 0;
60893+
60894+ newacl = chk_subj_label(dentry, mnt, task->role);
60895+
60896+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
60897+ did an exec
60898+ */
60899+ rcu_read_lock();
60900+ read_lock(&tasklist_lock);
60901+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
60902+ (task->parent->acl->mode & GR_POVERRIDE))) {
60903+ read_unlock(&tasklist_lock);
60904+ rcu_read_unlock();
60905+ goto skip_check;
60906+ }
60907+ read_unlock(&tasklist_lock);
60908+ rcu_read_unlock();
60909+
60910+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
60911+ !(task->role->roletype & GR_ROLE_GOD) &&
60912+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
60913+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60914+ if (unsafe_flags & LSM_UNSAFE_SHARE)
60915+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
60916+ else
60917+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
60918+ return -EACCES;
60919+ }
60920+
60921+skip_check:
60922+
60923+ obj = chk_obj_label(dentry, mnt, task->acl);
60924+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
60925+
60926+ if (!(task->acl->mode & GR_INHERITLEARN) &&
60927+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
60928+ if (obj->nested)
60929+ task->acl = obj->nested;
60930+ else
60931+ task->acl = newacl;
60932+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
60933+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
60934+
60935+ task->is_writable = 0;
60936+
60937+ /* ignore additional mmap checks for processes that are writable
60938+ by the default ACL */
60939+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
60940+ if (unlikely(obj->mode & GR_WRITE))
60941+ task->is_writable = 1;
60942+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
60943+ if (unlikely(obj->mode & GR_WRITE))
60944+ task->is_writable = 1;
60945+
60946+ gr_set_proc_res(task);
60947+
60948+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60949+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60950+#endif
60951+ return 0;
60952+}
60953+
60954+/* always called with valid inodev ptr */
60955+static void
60956+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
60957+{
60958+ struct acl_object_label *matchpo;
60959+ struct acl_subject_label *matchps;
60960+ struct acl_subject_label *subj;
60961+ struct acl_role_label *role;
60962+ unsigned int x;
60963+
60964+ FOR_EACH_ROLE_START(role)
60965+ FOR_EACH_SUBJECT_START(role, subj, x)
60966+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60967+ matchpo->mode |= GR_DELETED;
60968+ FOR_EACH_SUBJECT_END(subj,x)
60969+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
60970+ /* nested subjects aren't in the role's subj_hash table */
60971+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
60972+ matchpo->mode |= GR_DELETED;
60973+ FOR_EACH_NESTED_SUBJECT_END(subj)
60974+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
60975+ matchps->mode |= GR_DELETED;
60976+ FOR_EACH_ROLE_END(role)
60977+
60978+ inodev->nentry->deleted = 1;
60979+
60980+ return;
60981+}
60982+
60983+void
60984+gr_handle_delete(const ino_t ino, const dev_t dev)
60985+{
60986+ struct inodev_entry *inodev;
60987+
60988+ if (unlikely(!(gr_status & GR_READY)))
60989+ return;
60990+
60991+ write_lock(&gr_inode_lock);
60992+ inodev = lookup_inodev_entry(ino, dev);
60993+ if (inodev != NULL)
60994+ do_handle_delete(inodev, ino, dev);
60995+ write_unlock(&gr_inode_lock);
60996+
60997+ return;
60998+}
60999+
61000+static void
61001+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
61002+ const ino_t newinode, const dev_t newdevice,
61003+ struct acl_subject_label *subj)
61004+{
61005+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
61006+ struct acl_object_label *match;
61007+
61008+ match = subj->obj_hash[index];
61009+
61010+ while (match && (match->inode != oldinode ||
61011+ match->device != olddevice ||
61012+ !(match->mode & GR_DELETED)))
61013+ match = match->next;
61014+
61015+ if (match && (match->inode == oldinode)
61016+ && (match->device == olddevice)
61017+ && (match->mode & GR_DELETED)) {
61018+ if (match->prev == NULL) {
61019+ subj->obj_hash[index] = match->next;
61020+ if (match->next != NULL)
61021+ match->next->prev = NULL;
61022+ } else {
61023+ match->prev->next = match->next;
61024+ if (match->next != NULL)
61025+ match->next->prev = match->prev;
61026+ }
61027+ match->prev = NULL;
61028+ match->next = NULL;
61029+ match->inode = newinode;
61030+ match->device = newdevice;
61031+ match->mode &= ~GR_DELETED;
61032+
61033+ insert_acl_obj_label(match, subj);
61034+ }
61035+
61036+ return;
61037+}
61038+
61039+static void
61040+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
61041+ const ino_t newinode, const dev_t newdevice,
61042+ struct acl_role_label *role)
61043+{
61044+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
61045+ struct acl_subject_label *match;
61046+
61047+ match = role->subj_hash[index];
61048+
61049+ while (match && (match->inode != oldinode ||
61050+ match->device != olddevice ||
61051+ !(match->mode & GR_DELETED)))
61052+ match = match->next;
61053+
61054+ if (match && (match->inode == oldinode)
61055+ && (match->device == olddevice)
61056+ && (match->mode & GR_DELETED)) {
61057+ if (match->prev == NULL) {
61058+ role->subj_hash[index] = match->next;
61059+ if (match->next != NULL)
61060+ match->next->prev = NULL;
61061+ } else {
61062+ match->prev->next = match->next;
61063+ if (match->next != NULL)
61064+ match->next->prev = match->prev;
61065+ }
61066+ match->prev = NULL;
61067+ match->next = NULL;
61068+ match->inode = newinode;
61069+ match->device = newdevice;
61070+ match->mode &= ~GR_DELETED;
61071+
61072+ insert_acl_subj_label(match, role);
61073+ }
61074+
61075+ return;
61076+}
61077+
61078+static void
61079+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
61080+ const ino_t newinode, const dev_t newdevice)
61081+{
61082+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
61083+ struct inodev_entry *match;
61084+
61085+ match = inodev_set.i_hash[index];
61086+
61087+ while (match && (match->nentry->inode != oldinode ||
61088+ match->nentry->device != olddevice || !match->nentry->deleted))
61089+ match = match->next;
61090+
61091+ if (match && (match->nentry->inode == oldinode)
61092+ && (match->nentry->device == olddevice) &&
61093+ match->nentry->deleted) {
61094+ if (match->prev == NULL) {
61095+ inodev_set.i_hash[index] = match->next;
61096+ if (match->next != NULL)
61097+ match->next->prev = NULL;
61098+ } else {
61099+ match->prev->next = match->next;
61100+ if (match->next != NULL)
61101+ match->next->prev = match->prev;
61102+ }
61103+ match->prev = NULL;
61104+ match->next = NULL;
61105+ match->nentry->inode = newinode;
61106+ match->nentry->device = newdevice;
61107+ match->nentry->deleted = 0;
61108+
61109+ insert_inodev_entry(match);
61110+ }
61111+
61112+ return;
61113+}
61114+
61115+static void
61116+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
61117+{
61118+ struct acl_subject_label *subj;
61119+ struct acl_role_label *role;
61120+ unsigned int x;
61121+
61122+ FOR_EACH_ROLE_START(role)
61123+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
61124+
61125+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
61126+ if ((subj->inode == ino) && (subj->device == dev)) {
61127+ subj->inode = ino;
61128+ subj->device = dev;
61129+ }
61130+ /* nested subjects aren't in the role's subj_hash table */
61131+ update_acl_obj_label(matchn->inode, matchn->device,
61132+ ino, dev, subj);
61133+ FOR_EACH_NESTED_SUBJECT_END(subj)
61134+ FOR_EACH_SUBJECT_START(role, subj, x)
61135+ update_acl_obj_label(matchn->inode, matchn->device,
61136+ ino, dev, subj);
61137+ FOR_EACH_SUBJECT_END(subj,x)
61138+ FOR_EACH_ROLE_END(role)
61139+
61140+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
61141+
61142+ return;
61143+}
61144+
61145+static void
61146+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
61147+ const struct vfsmount *mnt)
61148+{
61149+ ino_t ino = dentry->d_inode->i_ino;
61150+ dev_t dev = __get_dev(dentry);
61151+
61152+ __do_handle_create(matchn, ino, dev);
61153+
61154+ return;
61155+}
61156+
61157+void
61158+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61159+{
61160+ struct name_entry *matchn;
61161+
61162+ if (unlikely(!(gr_status & GR_READY)))
61163+ return;
61164+
61165+ preempt_disable();
61166+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
61167+
61168+ if (unlikely((unsigned long)matchn)) {
61169+ write_lock(&gr_inode_lock);
61170+ do_handle_create(matchn, dentry, mnt);
61171+ write_unlock(&gr_inode_lock);
61172+ }
61173+ preempt_enable();
61174+
61175+ return;
61176+}
61177+
61178+void
61179+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61180+{
61181+ struct name_entry *matchn;
61182+
61183+ if (unlikely(!(gr_status & GR_READY)))
61184+ return;
61185+
61186+ preempt_disable();
61187+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
61188+
61189+ if (unlikely((unsigned long)matchn)) {
61190+ write_lock(&gr_inode_lock);
61191+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
61192+ write_unlock(&gr_inode_lock);
61193+ }
61194+ preempt_enable();
61195+
61196+ return;
61197+}
61198+
61199+void
61200+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61201+ struct dentry *old_dentry,
61202+ struct dentry *new_dentry,
61203+ struct vfsmount *mnt, const __u8 replace)
61204+{
61205+ struct name_entry *matchn;
61206+ struct inodev_entry *inodev;
61207+ struct inode *inode = new_dentry->d_inode;
61208+ ino_t old_ino = old_dentry->d_inode->i_ino;
61209+ dev_t old_dev = __get_dev(old_dentry);
61210+
61211+ /* vfs_rename swaps the name and parent link for old_dentry and
61212+ new_dentry
61213+ at this point, old_dentry has the new name, parent link, and inode
61214+ for the renamed file
61215+ if a file is being replaced by a rename, new_dentry has the inode
61216+ and name for the replaced file
61217+ */
61218+
61219+ if (unlikely(!(gr_status & GR_READY)))
61220+ return;
61221+
61222+ preempt_disable();
61223+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
61224+
61225+ /* we wouldn't have to check d_inode if it weren't for
61226+ NFS silly-renaming
61227+ */
61228+
61229+ write_lock(&gr_inode_lock);
61230+ if (unlikely(replace && inode)) {
61231+ ino_t new_ino = inode->i_ino;
61232+ dev_t new_dev = __get_dev(new_dentry);
61233+
61234+ inodev = lookup_inodev_entry(new_ino, new_dev);
61235+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
61236+ do_handle_delete(inodev, new_ino, new_dev);
61237+ }
61238+
61239+ inodev = lookup_inodev_entry(old_ino, old_dev);
61240+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
61241+ do_handle_delete(inodev, old_ino, old_dev);
61242+
61243+ if (unlikely((unsigned long)matchn))
61244+ do_handle_create(matchn, old_dentry, mnt);
61245+
61246+ write_unlock(&gr_inode_lock);
61247+ preempt_enable();
61248+
61249+ return;
61250+}
61251+
61252+static int
61253+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
61254+ unsigned char **sum)
61255+{
61256+ struct acl_role_label *r;
61257+ struct role_allowed_ip *ipp;
61258+ struct role_transition *trans;
61259+ unsigned int i;
61260+ int found = 0;
61261+ u32 curr_ip = current->signal->curr_ip;
61262+
61263+ current->signal->saved_ip = curr_ip;
61264+
61265+ /* check transition table */
61266+
61267+ for (trans = current->role->transitions; trans; trans = trans->next) {
61268+ if (!strcmp(rolename, trans->rolename)) {
61269+ found = 1;
61270+ break;
61271+ }
61272+ }
61273+
61274+ if (!found)
61275+ return 0;
61276+
61277+ /* handle special roles that do not require authentication
61278+ and check ip */
61279+
61280+ FOR_EACH_ROLE_START(r)
61281+ if (!strcmp(rolename, r->rolename) &&
61282+ (r->roletype & GR_ROLE_SPECIAL)) {
61283+ found = 0;
61284+ if (r->allowed_ips != NULL) {
61285+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
61286+ if ((ntohl(curr_ip) & ipp->netmask) ==
61287+ (ntohl(ipp->addr) & ipp->netmask))
61288+ found = 1;
61289+ }
61290+ } else
61291+ found = 2;
61292+ if (!found)
61293+ return 0;
61294+
61295+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
61296+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
61297+ *salt = NULL;
61298+ *sum = NULL;
61299+ return 1;
61300+ }
61301+ }
61302+ FOR_EACH_ROLE_END(r)
61303+
61304+ for (i = 0; i < num_sprole_pws; i++) {
61305+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
61306+ *salt = acl_special_roles[i]->salt;
61307+ *sum = acl_special_roles[i]->sum;
61308+ return 1;
61309+ }
61310+ }
61311+
61312+ return 0;
61313+}
61314+
61315+static void
61316+assign_special_role(char *rolename)
61317+{
61318+ struct acl_object_label *obj;
61319+ struct acl_role_label *r;
61320+ struct acl_role_label *assigned = NULL;
61321+ struct task_struct *tsk;
61322+ struct file *filp;
61323+
61324+ FOR_EACH_ROLE_START(r)
61325+ if (!strcmp(rolename, r->rolename) &&
61326+ (r->roletype & GR_ROLE_SPECIAL)) {
61327+ assigned = r;
61328+ break;
61329+ }
61330+ FOR_EACH_ROLE_END(r)
61331+
61332+ if (!assigned)
61333+ return;
61334+
61335+ read_lock(&tasklist_lock);
61336+ read_lock(&grsec_exec_file_lock);
61337+
61338+ tsk = current->real_parent;
61339+ if (tsk == NULL)
61340+ goto out_unlock;
61341+
61342+ filp = tsk->exec_file;
61343+ if (filp == NULL)
61344+ goto out_unlock;
61345+
61346+ tsk->is_writable = 0;
61347+
61348+ tsk->acl_sp_role = 1;
61349+ tsk->acl_role_id = ++acl_sp_role_value;
61350+ tsk->role = assigned;
61351+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
61352+
61353+ /* ignore additional mmap checks for processes that are writable
61354+ by the default ACL */
61355+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61356+ if (unlikely(obj->mode & GR_WRITE))
61357+ tsk->is_writable = 1;
61358+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
61359+ if (unlikely(obj->mode & GR_WRITE))
61360+ tsk->is_writable = 1;
61361+
61362+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61363+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
61364+#endif
61365+
61366+out_unlock:
61367+ read_unlock(&grsec_exec_file_lock);
61368+ read_unlock(&tasklist_lock);
61369+ return;
61370+}
61371+
61372+int gr_check_secure_terminal(struct task_struct *task)
61373+{
61374+ struct task_struct *p, *p2, *p3;
61375+ struct files_struct *files;
61376+ struct fdtable *fdt;
61377+ struct file *our_file = NULL, *file;
61378+ int i;
61379+
61380+ if (task->signal->tty == NULL)
61381+ return 1;
61382+
61383+ files = get_files_struct(task);
61384+ if (files != NULL) {
61385+ rcu_read_lock();
61386+ fdt = files_fdtable(files);
61387+ for (i=0; i < fdt->max_fds; i++) {
61388+ file = fcheck_files(files, i);
61389+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
61390+ get_file(file);
61391+ our_file = file;
61392+ }
61393+ }
61394+ rcu_read_unlock();
61395+ put_files_struct(files);
61396+ }
61397+
61398+ if (our_file == NULL)
61399+ return 1;
61400+
61401+ read_lock(&tasklist_lock);
61402+ do_each_thread(p2, p) {
61403+ files = get_files_struct(p);
61404+ if (files == NULL ||
61405+ (p->signal && p->signal->tty == task->signal->tty)) {
61406+ if (files != NULL)
61407+ put_files_struct(files);
61408+ continue;
61409+ }
61410+ rcu_read_lock();
61411+ fdt = files_fdtable(files);
61412+ for (i=0; i < fdt->max_fds; i++) {
61413+ file = fcheck_files(files, i);
61414+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
61415+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
61416+ p3 = task;
61417+ while (task_pid_nr(p3) > 0) {
61418+ if (p3 == p)
61419+ break;
61420+ p3 = p3->real_parent;
61421+ }
61422+ if (p3 == p)
61423+ break;
61424+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
61425+ gr_handle_alertkill(p);
61426+ rcu_read_unlock();
61427+ put_files_struct(files);
61428+ read_unlock(&tasklist_lock);
61429+ fput(our_file);
61430+ return 0;
61431+ }
61432+ }
61433+ rcu_read_unlock();
61434+ put_files_struct(files);
61435+ } while_each_thread(p2, p);
61436+ read_unlock(&tasklist_lock);
61437+
61438+ fput(our_file);
61439+ return 1;
61440+}
61441+
61442+static int gr_rbac_disable(void *unused)
61443+{
61444+ pax_open_kernel();
61445+ gr_status &= ~GR_READY;
61446+ pax_close_kernel();
61447+
61448+ return 0;
61449+}
61450+
61451+ssize_t
61452+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
61453+{
61454+ struct gr_arg_wrapper uwrap;
61455+ unsigned char *sprole_salt = NULL;
61456+ unsigned char *sprole_sum = NULL;
61457+ int error = sizeof (struct gr_arg_wrapper);
61458+ int error2 = 0;
61459+
61460+ mutex_lock(&gr_dev_mutex);
61461+
61462+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
61463+ error = -EPERM;
61464+ goto out;
61465+ }
61466+
61467+ if (count != sizeof (struct gr_arg_wrapper)) {
61468+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
61469+ error = -EINVAL;
61470+ goto out;
61471+ }
61472+
61473+
61474+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
61475+ gr_auth_expires = 0;
61476+ gr_auth_attempts = 0;
61477+ }
61478+
61479+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
61480+ error = -EFAULT;
61481+ goto out;
61482+ }
61483+
61484+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
61485+ error = -EINVAL;
61486+ goto out;
61487+ }
61488+
61489+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
61490+ error = -EFAULT;
61491+ goto out;
61492+ }
61493+
61494+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61495+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61496+ time_after(gr_auth_expires, get_seconds())) {
61497+ error = -EBUSY;
61498+ goto out;
61499+ }
61500+
61501+ /* if non-root trying to do anything other than use a special role,
61502+ do not attempt authentication, do not count towards authentication
61503+ locking
61504+ */
61505+
61506+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
61507+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61508+ gr_is_global_nonroot(current_uid())) {
61509+ error = -EPERM;
61510+ goto out;
61511+ }
61512+
61513+ /* ensure pw and special role name are null terminated */
61514+
61515+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
61516+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
61517+
61518+ /* Okay.
61519+ * We have our enough of the argument structure..(we have yet
61520+ * to copy_from_user the tables themselves) . Copy the tables
61521+ * only if we need them, i.e. for loading operations. */
61522+
61523+ switch (gr_usermode->mode) {
61524+ case GR_STATUS:
61525+ if (gr_status & GR_READY) {
61526+ error = 1;
61527+ if (!gr_check_secure_terminal(current))
61528+ error = 3;
61529+ } else
61530+ error = 2;
61531+ goto out;
61532+ case GR_SHUTDOWN:
61533+ if ((gr_status & GR_READY)
61534+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61535+ stop_machine(gr_rbac_disable, NULL, NULL);
61536+ free_variables();
61537+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61538+ memset(gr_system_salt, 0, GR_SALT_LEN);
61539+ memset(gr_system_sum, 0, GR_SHA_LEN);
61540+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61541+ } else if (gr_status & GR_READY) {
61542+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61543+ error = -EPERM;
61544+ } else {
61545+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61546+ error = -EAGAIN;
61547+ }
61548+ break;
61549+ case GR_ENABLE:
61550+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61551+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61552+ else {
61553+ if (gr_status & GR_READY)
61554+ error = -EAGAIN;
61555+ else
61556+ error = error2;
61557+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
61558+ }
61559+ break;
61560+ case GR_RELOAD:
61561+ if (!(gr_status & GR_READY)) {
61562+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
61563+ error = -EAGAIN;
61564+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61565+ stop_machine(gr_rbac_disable, NULL, NULL);
61566+ free_variables();
61567+ error2 = gracl_init(gr_usermode);
61568+ if (!error2)
61569+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
61570+ else {
61571+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61572+ error = error2;
61573+ }
61574+ } else {
61575+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61576+ error = -EPERM;
61577+ }
61578+ break;
61579+ case GR_SEGVMOD:
61580+ if (unlikely(!(gr_status & GR_READY))) {
61581+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
61582+ error = -EAGAIN;
61583+ break;
61584+ }
61585+
61586+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61587+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
61588+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
61589+ struct acl_subject_label *segvacl;
61590+ segvacl =
61591+ lookup_acl_subj_label(gr_usermode->segv_inode,
61592+ gr_usermode->segv_device,
61593+ current->role);
61594+ if (segvacl) {
61595+ segvacl->crashes = 0;
61596+ segvacl->expires = 0;
61597+ }
61598+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
61599+ gr_remove_uid(gr_usermode->segv_uid);
61600+ }
61601+ } else {
61602+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
61603+ error = -EPERM;
61604+ }
61605+ break;
61606+ case GR_SPROLE:
61607+ case GR_SPROLEPAM:
61608+ if (unlikely(!(gr_status & GR_READY))) {
61609+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
61610+ error = -EAGAIN;
61611+ break;
61612+ }
61613+
61614+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
61615+ current->role->expires = 0;
61616+ current->role->auth_attempts = 0;
61617+ }
61618+
61619+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61620+ time_after(current->role->expires, get_seconds())) {
61621+ error = -EBUSY;
61622+ goto out;
61623+ }
61624+
61625+ if (lookup_special_role_auth
61626+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
61627+ && ((!sprole_salt && !sprole_sum)
61628+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
61629+ char *p = "";
61630+ assign_special_role(gr_usermode->sp_role);
61631+ read_lock(&tasklist_lock);
61632+ if (current->real_parent)
61633+ p = current->real_parent->role->rolename;
61634+ read_unlock(&tasklist_lock);
61635+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
61636+ p, acl_sp_role_value);
61637+ } else {
61638+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
61639+ error = -EPERM;
61640+ if(!(current->role->auth_attempts++))
61641+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61642+
61643+ goto out;
61644+ }
61645+ break;
61646+ case GR_UNSPROLE:
61647+ if (unlikely(!(gr_status & GR_READY))) {
61648+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
61649+ error = -EAGAIN;
61650+ break;
61651+ }
61652+
61653+ if (current->role->roletype & GR_ROLE_SPECIAL) {
61654+ char *p = "";
61655+ int i = 0;
61656+
61657+ read_lock(&tasklist_lock);
61658+ if (current->real_parent) {
61659+ p = current->real_parent->role->rolename;
61660+ i = current->real_parent->acl_role_id;
61661+ }
61662+ read_unlock(&tasklist_lock);
61663+
61664+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
61665+ gr_set_acls(1);
61666+ } else {
61667+ error = -EPERM;
61668+ goto out;
61669+ }
61670+ break;
61671+ default:
61672+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
61673+ error = -EINVAL;
61674+ break;
61675+ }
61676+
61677+ if (error != -EPERM)
61678+ goto out;
61679+
61680+ if(!(gr_auth_attempts++))
61681+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61682+
61683+ out:
61684+ mutex_unlock(&gr_dev_mutex);
61685+ return error;
61686+}
61687+
61688+/* must be called with
61689+ rcu_read_lock();
61690+ read_lock(&tasklist_lock);
61691+ read_lock(&grsec_exec_file_lock);
61692+*/
61693+int gr_apply_subject_to_task(struct task_struct *task)
61694+{
61695+ struct acl_object_label *obj;
61696+ char *tmpname;
61697+ struct acl_subject_label *tmpsubj;
61698+ struct file *filp;
61699+ struct name_entry *nmatch;
61700+
61701+ filp = task->exec_file;
61702+ if (filp == NULL)
61703+ return 0;
61704+
61705+ /* the following is to apply the correct subject
61706+ on binaries running when the RBAC system
61707+ is enabled, when the binaries have been
61708+ replaced or deleted since their execution
61709+ -----
61710+ when the RBAC system starts, the inode/dev
61711+ from exec_file will be one the RBAC system
61712+ is unaware of. It only knows the inode/dev
61713+ of the present file on disk, or the absence
61714+ of it.
61715+ */
61716+ preempt_disable();
61717+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
61718+
61719+ nmatch = lookup_name_entry(tmpname);
61720+ preempt_enable();
61721+ tmpsubj = NULL;
61722+ if (nmatch) {
61723+ if (nmatch->deleted)
61724+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
61725+ else
61726+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
61727+ if (tmpsubj != NULL)
61728+ task->acl = tmpsubj;
61729+ }
61730+ if (tmpsubj == NULL)
61731+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
61732+ task->role);
61733+ if (task->acl) {
61734+ task->is_writable = 0;
61735+ /* ignore additional mmap checks for processes that are writable
61736+ by the default ACL */
61737+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61738+ if (unlikely(obj->mode & GR_WRITE))
61739+ task->is_writable = 1;
61740+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61741+ if (unlikely(obj->mode & GR_WRITE))
61742+ task->is_writable = 1;
61743+
61744+ gr_set_proc_res(task);
61745+
61746+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61747+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61748+#endif
61749+ } else {
61750+ return 1;
61751+ }
61752+
61753+ return 0;
61754+}
61755+
61756+int
61757+gr_set_acls(const int type)
61758+{
61759+ struct task_struct *task, *task2;
61760+ struct acl_role_label *role = current->role;
61761+ __u16 acl_role_id = current->acl_role_id;
61762+ const struct cred *cred;
61763+ int ret;
61764+
61765+ rcu_read_lock();
61766+ read_lock(&tasklist_lock);
61767+ read_lock(&grsec_exec_file_lock);
61768+ do_each_thread(task2, task) {
61769+ /* check to see if we're called from the exit handler,
61770+ if so, only replace ACLs that have inherited the admin
61771+ ACL */
61772+
61773+ if (type && (task->role != role ||
61774+ task->acl_role_id != acl_role_id))
61775+ continue;
61776+
61777+ task->acl_role_id = 0;
61778+ task->acl_sp_role = 0;
61779+
61780+ if (task->exec_file) {
61781+ cred = __task_cred(task);
61782+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
61783+ ret = gr_apply_subject_to_task(task);
61784+ if (ret) {
61785+ read_unlock(&grsec_exec_file_lock);
61786+ read_unlock(&tasklist_lock);
61787+ rcu_read_unlock();
61788+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
61789+ return ret;
61790+ }
61791+ } else {
61792+ // it's a kernel process
61793+ task->role = kernel_role;
61794+ task->acl = kernel_role->root_label;
61795+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
61796+ task->acl->mode &= ~GR_PROCFIND;
61797+#endif
61798+ }
61799+ } while_each_thread(task2, task);
61800+ read_unlock(&grsec_exec_file_lock);
61801+ read_unlock(&tasklist_lock);
61802+ rcu_read_unlock();
61803+
61804+ return 0;
61805+}
61806+
61807+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
61808+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
61809+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
61810+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
61811+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
61812+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
61813+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
61814+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
61815+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
61816+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
61817+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
61818+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
61819+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
61820+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
61821+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
61822+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
61823+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
61824+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
61825+};
61826+
61827+void
61828+gr_learn_resource(const struct task_struct *task,
61829+ const int res, const unsigned long wanted, const int gt)
61830+{
61831+ struct acl_subject_label *acl;
61832+ const struct cred *cred;
61833+
61834+ if (unlikely((gr_status & GR_READY) &&
61835+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
61836+ goto skip_reslog;
61837+
61838+ gr_log_resource(task, res, wanted, gt);
61839+skip_reslog:
61840+
61841+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
61842+ return;
61843+
61844+ acl = task->acl;
61845+
61846+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
61847+ !(acl->resmask & (1U << (unsigned short) res))))
61848+ return;
61849+
61850+ if (wanted >= acl->res[res].rlim_cur) {
61851+ unsigned long res_add;
61852+
61853+ res_add = wanted + res_learn_bumps[res];
61854+
61855+ acl->res[res].rlim_cur = res_add;
61856+
61857+ if (wanted > acl->res[res].rlim_max)
61858+ acl->res[res].rlim_max = res_add;
61859+
61860+ /* only log the subject filename, since resource logging is supported for
61861+ single-subject learning only */
61862+ rcu_read_lock();
61863+ cred = __task_cred(task);
61864+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61865+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61866+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61867+ "", (unsigned long) res, &task->signal->saved_ip);
61868+ rcu_read_unlock();
61869+ }
61870+
61871+ return;
61872+}
61873+EXPORT_SYMBOL(gr_learn_resource);
61874+#endif
61875+
61876+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61877+void
61878+pax_set_initial_flags(struct linux_binprm *bprm)
61879+{
61880+ struct task_struct *task = current;
61881+ struct acl_subject_label *proc;
61882+ unsigned long flags;
61883+
61884+ if (unlikely(!(gr_status & GR_READY)))
61885+ return;
61886+
61887+ flags = pax_get_flags(task);
61888+
61889+ proc = task->acl;
61890+
61891+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
61892+ flags &= ~MF_PAX_PAGEEXEC;
61893+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
61894+ flags &= ~MF_PAX_SEGMEXEC;
61895+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
61896+ flags &= ~MF_PAX_RANDMMAP;
61897+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
61898+ flags &= ~MF_PAX_EMUTRAMP;
61899+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
61900+ flags &= ~MF_PAX_MPROTECT;
61901+
61902+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
61903+ flags |= MF_PAX_PAGEEXEC;
61904+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
61905+ flags |= MF_PAX_SEGMEXEC;
61906+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
61907+ flags |= MF_PAX_RANDMMAP;
61908+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
61909+ flags |= MF_PAX_EMUTRAMP;
61910+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
61911+ flags |= MF_PAX_MPROTECT;
61912+
61913+ pax_set_flags(task, flags);
61914+
61915+ return;
61916+}
61917+#endif
61918+
61919+int
61920+gr_handle_proc_ptrace(struct task_struct *task)
61921+{
61922+ struct file *filp;
61923+ struct task_struct *tmp = task;
61924+ struct task_struct *curtemp = current;
61925+ __u32 retmode;
61926+
61927+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61928+ if (unlikely(!(gr_status & GR_READY)))
61929+ return 0;
61930+#endif
61931+
61932+ read_lock(&tasklist_lock);
61933+ read_lock(&grsec_exec_file_lock);
61934+ filp = task->exec_file;
61935+
61936+ while (task_pid_nr(tmp) > 0) {
61937+ if (tmp == curtemp)
61938+ break;
61939+ tmp = tmp->real_parent;
61940+ }
61941+
61942+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
61943+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
61944+ read_unlock(&grsec_exec_file_lock);
61945+ read_unlock(&tasklist_lock);
61946+ return 1;
61947+ }
61948+
61949+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
61950+ if (!(gr_status & GR_READY)) {
61951+ read_unlock(&grsec_exec_file_lock);
61952+ read_unlock(&tasklist_lock);
61953+ return 0;
61954+ }
61955+#endif
61956+
61957+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
61958+ read_unlock(&grsec_exec_file_lock);
61959+ read_unlock(&tasklist_lock);
61960+
61961+ if (retmode & GR_NOPTRACE)
61962+ return 1;
61963+
61964+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
61965+ && (current->acl != task->acl || (current->acl != current->role->root_label
61966+ && task_pid_nr(current) != task_pid_nr(task))))
61967+ return 1;
61968+
61969+ return 0;
61970+}
61971+
61972+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
61973+{
61974+ if (unlikely(!(gr_status & GR_READY)))
61975+ return;
61976+
61977+ if (!(current->role->roletype & GR_ROLE_GOD))
61978+ return;
61979+
61980+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
61981+ p->role->rolename, gr_task_roletype_to_char(p),
61982+ p->acl->filename);
61983+}
61984+
61985+int
61986+gr_handle_ptrace(struct task_struct *task, const long request)
61987+{
61988+ struct task_struct *tmp = task;
61989+ struct task_struct *curtemp = current;
61990+ __u32 retmode;
61991+
61992+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
61993+ if (unlikely(!(gr_status & GR_READY)))
61994+ return 0;
61995+#endif
61996+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
61997+ read_lock(&tasklist_lock);
61998+ while (task_pid_nr(tmp) > 0) {
61999+ if (tmp == curtemp)
62000+ break;
62001+ tmp = tmp->real_parent;
62002+ }
62003+
62004+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
62005+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
62006+ read_unlock(&tasklist_lock);
62007+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62008+ return 1;
62009+ }
62010+ read_unlock(&tasklist_lock);
62011+ }
62012+
62013+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62014+ if (!(gr_status & GR_READY))
62015+ return 0;
62016+#endif
62017+
62018+ read_lock(&grsec_exec_file_lock);
62019+ if (unlikely(!task->exec_file)) {
62020+ read_unlock(&grsec_exec_file_lock);
62021+ return 0;
62022+ }
62023+
62024+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
62025+ read_unlock(&grsec_exec_file_lock);
62026+
62027+ if (retmode & GR_NOPTRACE) {
62028+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62029+ return 1;
62030+ }
62031+
62032+ if (retmode & GR_PTRACERD) {
62033+ switch (request) {
62034+ case PTRACE_SEIZE:
62035+ case PTRACE_POKETEXT:
62036+ case PTRACE_POKEDATA:
62037+ case PTRACE_POKEUSR:
62038+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
62039+ case PTRACE_SETREGS:
62040+ case PTRACE_SETFPREGS:
62041+#endif
62042+#ifdef CONFIG_X86
62043+ case PTRACE_SETFPXREGS:
62044+#endif
62045+#ifdef CONFIG_ALTIVEC
62046+ case PTRACE_SETVRREGS:
62047+#endif
62048+ return 1;
62049+ default:
62050+ return 0;
62051+ }
62052+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
62053+ !(current->role->roletype & GR_ROLE_GOD) &&
62054+ (current->acl != task->acl)) {
62055+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62056+ return 1;
62057+ }
62058+
62059+ return 0;
62060+}
62061+
62062+static int is_writable_mmap(const struct file *filp)
62063+{
62064+ struct task_struct *task = current;
62065+ struct acl_object_label *obj, *obj2;
62066+
62067+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
62068+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
62069+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
62070+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
62071+ task->role->root_label);
62072+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
62073+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
62074+ return 1;
62075+ }
62076+ }
62077+ return 0;
62078+}
62079+
62080+int
62081+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
62082+{
62083+ __u32 mode;
62084+
62085+ if (unlikely(!file || !(prot & PROT_EXEC)))
62086+ return 1;
62087+
62088+ if (is_writable_mmap(file))
62089+ return 0;
62090+
62091+ mode =
62092+ gr_search_file(file->f_path.dentry,
62093+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62094+ file->f_path.mnt);
62095+
62096+ if (!gr_tpe_allow(file))
62097+ return 0;
62098+
62099+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62100+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62101+ return 0;
62102+ } else if (unlikely(!(mode & GR_EXEC))) {
62103+ return 0;
62104+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62105+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62106+ return 1;
62107+ }
62108+
62109+ return 1;
62110+}
62111+
62112+int
62113+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62114+{
62115+ __u32 mode;
62116+
62117+ if (unlikely(!file || !(prot & PROT_EXEC)))
62118+ return 1;
62119+
62120+ if (is_writable_mmap(file))
62121+ return 0;
62122+
62123+ mode =
62124+ gr_search_file(file->f_path.dentry,
62125+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62126+ file->f_path.mnt);
62127+
62128+ if (!gr_tpe_allow(file))
62129+ return 0;
62130+
62131+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62132+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62133+ return 0;
62134+ } else if (unlikely(!(mode & GR_EXEC))) {
62135+ return 0;
62136+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62137+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62138+ return 1;
62139+ }
62140+
62141+ return 1;
62142+}
62143+
62144+void
62145+gr_acl_handle_psacct(struct task_struct *task, const long code)
62146+{
62147+ unsigned long runtime;
62148+ unsigned long cputime;
62149+ unsigned int wday, cday;
62150+ __u8 whr, chr;
62151+ __u8 wmin, cmin;
62152+ __u8 wsec, csec;
62153+ struct timespec timeval;
62154+
62155+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
62156+ !(task->acl->mode & GR_PROCACCT)))
62157+ return;
62158+
62159+ do_posix_clock_monotonic_gettime(&timeval);
62160+ runtime = timeval.tv_sec - task->start_time.tv_sec;
62161+ wday = runtime / (3600 * 24);
62162+ runtime -= wday * (3600 * 24);
62163+ whr = runtime / 3600;
62164+ runtime -= whr * 3600;
62165+ wmin = runtime / 60;
62166+ runtime -= wmin * 60;
62167+ wsec = runtime;
62168+
62169+ cputime = (task->utime + task->stime) / HZ;
62170+ cday = cputime / (3600 * 24);
62171+ cputime -= cday * (3600 * 24);
62172+ chr = cputime / 3600;
62173+ cputime -= chr * 3600;
62174+ cmin = cputime / 60;
62175+ cputime -= cmin * 60;
62176+ csec = cputime;
62177+
62178+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
62179+
62180+ return;
62181+}
62182+
62183+void gr_set_kernel_label(struct task_struct *task)
62184+{
62185+ if (gr_status & GR_READY) {
62186+ task->role = kernel_role;
62187+ task->acl = kernel_role->root_label;
62188+ }
62189+ return;
62190+}
62191+
62192+#ifdef CONFIG_TASKSTATS
62193+int gr_is_taskstats_denied(int pid)
62194+{
62195+ struct task_struct *task;
62196+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62197+ const struct cred *cred;
62198+#endif
62199+ int ret = 0;
62200+
62201+ /* restrict taskstats viewing to un-chrooted root users
62202+ who have the 'view' subject flag if the RBAC system is enabled
62203+ */
62204+
62205+ rcu_read_lock();
62206+ read_lock(&tasklist_lock);
62207+ task = find_task_by_vpid(pid);
62208+ if (task) {
62209+#ifdef CONFIG_GRKERNSEC_CHROOT
62210+ if (proc_is_chrooted(task))
62211+ ret = -EACCES;
62212+#endif
62213+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62214+ cred = __task_cred(task);
62215+#ifdef CONFIG_GRKERNSEC_PROC_USER
62216+ if (gr_is_global_nonroot(cred->uid))
62217+ ret = -EACCES;
62218+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62219+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
62220+ ret = -EACCES;
62221+#endif
62222+#endif
62223+ if (gr_status & GR_READY) {
62224+ if (!(task->acl->mode & GR_VIEW))
62225+ ret = -EACCES;
62226+ }
62227+ } else
62228+ ret = -ENOENT;
62229+
62230+ read_unlock(&tasklist_lock);
62231+ rcu_read_unlock();
62232+
62233+ return ret;
62234+}
62235+#endif
62236+
62237+/* AUXV entries are filled via a descendant of search_binary_handler
62238+ after we've already applied the subject for the target
62239+*/
62240+int gr_acl_enable_at_secure(void)
62241+{
62242+ if (unlikely(!(gr_status & GR_READY)))
62243+ return 0;
62244+
62245+ if (current->acl->mode & GR_ATSECURE)
62246+ return 1;
62247+
62248+ return 0;
62249+}
62250+
62251+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
62252+{
62253+ struct task_struct *task = current;
62254+ struct dentry *dentry = file->f_path.dentry;
62255+ struct vfsmount *mnt = file->f_path.mnt;
62256+ struct acl_object_label *obj, *tmp;
62257+ struct acl_subject_label *subj;
62258+ unsigned int bufsize;
62259+ int is_not_root;
62260+ char *path;
62261+ dev_t dev = __get_dev(dentry);
62262+
62263+ if (unlikely(!(gr_status & GR_READY)))
62264+ return 1;
62265+
62266+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
62267+ return 1;
62268+
62269+ /* ignore Eric Biederman */
62270+ if (IS_PRIVATE(dentry->d_inode))
62271+ return 1;
62272+
62273+ subj = task->acl;
62274+ read_lock(&gr_inode_lock);
62275+ do {
62276+ obj = lookup_acl_obj_label(ino, dev, subj);
62277+ if (obj != NULL) {
62278+ read_unlock(&gr_inode_lock);
62279+ return (obj->mode & GR_FIND) ? 1 : 0;
62280+ }
62281+ } while ((subj = subj->parent_subject));
62282+ read_unlock(&gr_inode_lock);
62283+
62284+ /* this is purely an optimization since we're looking for an object
62285+ for the directory we're doing a readdir on
62286+ if it's possible for any globbed object to match the entry we're
62287+ filling into the directory, then the object we find here will be
62288+ an anchor point with attached globbed objects
62289+ */
62290+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
62291+ if (obj->globbed == NULL)
62292+ return (obj->mode & GR_FIND) ? 1 : 0;
62293+
62294+ is_not_root = ((obj->filename[0] == '/') &&
62295+ (obj->filename[1] == '\0')) ? 0 : 1;
62296+ bufsize = PAGE_SIZE - namelen - is_not_root;
62297+
62298+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
62299+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
62300+ return 1;
62301+
62302+ preempt_disable();
62303+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62304+ bufsize);
62305+
62306+ bufsize = strlen(path);
62307+
62308+ /* if base is "/", don't append an additional slash */
62309+ if (is_not_root)
62310+ *(path + bufsize) = '/';
62311+ memcpy(path + bufsize + is_not_root, name, namelen);
62312+ *(path + bufsize + namelen + is_not_root) = '\0';
62313+
62314+ tmp = obj->globbed;
62315+ while (tmp) {
62316+ if (!glob_match(tmp->filename, path)) {
62317+ preempt_enable();
62318+ return (tmp->mode & GR_FIND) ? 1 : 0;
62319+ }
62320+ tmp = tmp->next;
62321+ }
62322+ preempt_enable();
62323+ return (obj->mode & GR_FIND) ? 1 : 0;
62324+}
62325+
62326+void gr_put_exec_file(struct task_struct *task)
62327+{
62328+ struct file *filp;
62329+
62330+ write_lock(&grsec_exec_file_lock);
62331+ filp = task->exec_file;
62332+ task->exec_file = NULL;
62333+ write_unlock(&grsec_exec_file_lock);
62334+
62335+ if (filp)
62336+ fput(filp);
62337+
62338+ return;
62339+}
62340+
62341+
62342+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
62343+EXPORT_SYMBOL(gr_acl_is_enabled);
62344+#endif
62345+EXPORT_SYMBOL(gr_set_kernel_label);
62346+#ifdef CONFIG_SECURITY
62347+EXPORT_SYMBOL(gr_check_user_change);
62348+EXPORT_SYMBOL(gr_check_group_change);
62349+#endif
62350+
62351diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
62352new file mode 100644
62353index 0000000..34fefda
62354--- /dev/null
62355+++ b/grsecurity/gracl_alloc.c
62356@@ -0,0 +1,105 @@
62357+#include <linux/kernel.h>
62358+#include <linux/mm.h>
62359+#include <linux/slab.h>
62360+#include <linux/vmalloc.h>
62361+#include <linux/gracl.h>
62362+#include <linux/grsecurity.h>
62363+
62364+static unsigned long alloc_stack_next = 1;
62365+static unsigned long alloc_stack_size = 1;
62366+static void **alloc_stack;
62367+
62368+static __inline__ int
62369+alloc_pop(void)
62370+{
62371+ if (alloc_stack_next == 1)
62372+ return 0;
62373+
62374+ kfree(alloc_stack[alloc_stack_next - 2]);
62375+
62376+ alloc_stack_next--;
62377+
62378+ return 1;
62379+}
62380+
62381+static __inline__ int
62382+alloc_push(void *buf)
62383+{
62384+ if (alloc_stack_next >= alloc_stack_size)
62385+ return 1;
62386+
62387+ alloc_stack[alloc_stack_next - 1] = buf;
62388+
62389+ alloc_stack_next++;
62390+
62391+ return 0;
62392+}
62393+
62394+void *
62395+acl_alloc(unsigned long len)
62396+{
62397+ void *ret = NULL;
62398+
62399+ if (!len || len > PAGE_SIZE)
62400+ goto out;
62401+
62402+ ret = kmalloc(len, GFP_KERNEL);
62403+
62404+ if (ret) {
62405+ if (alloc_push(ret)) {
62406+ kfree(ret);
62407+ ret = NULL;
62408+ }
62409+ }
62410+
62411+out:
62412+ return ret;
62413+}
62414+
62415+void *
62416+acl_alloc_num(unsigned long num, unsigned long len)
62417+{
62418+ if (!len || (num > (PAGE_SIZE / len)))
62419+ return NULL;
62420+
62421+ return acl_alloc(num * len);
62422+}
62423+
62424+void
62425+acl_free_all(void)
62426+{
62427+ if (gr_acl_is_enabled() || !alloc_stack)
62428+ return;
62429+
62430+ while (alloc_pop()) ;
62431+
62432+ if (alloc_stack) {
62433+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
62434+ kfree(alloc_stack);
62435+ else
62436+ vfree(alloc_stack);
62437+ }
62438+
62439+ alloc_stack = NULL;
62440+ alloc_stack_size = 1;
62441+ alloc_stack_next = 1;
62442+
62443+ return;
62444+}
62445+
62446+int
62447+acl_alloc_stack_init(unsigned long size)
62448+{
62449+ if ((size * sizeof (void *)) <= PAGE_SIZE)
62450+ alloc_stack =
62451+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
62452+ else
62453+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
62454+
62455+ alloc_stack_size = size;
62456+
62457+ if (!alloc_stack)
62458+ return 0;
62459+ else
62460+ return 1;
62461+}
62462diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
62463new file mode 100644
62464index 0000000..bdd51ea
62465--- /dev/null
62466+++ b/grsecurity/gracl_cap.c
62467@@ -0,0 +1,110 @@
62468+#include <linux/kernel.h>
62469+#include <linux/module.h>
62470+#include <linux/sched.h>
62471+#include <linux/gracl.h>
62472+#include <linux/grsecurity.h>
62473+#include <linux/grinternal.h>
62474+
62475+extern const char *captab_log[];
62476+extern int captab_log_entries;
62477+
62478+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62479+{
62480+ struct acl_subject_label *curracl;
62481+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62482+ kernel_cap_t cap_audit = __cap_empty_set;
62483+
62484+ if (!gr_acl_is_enabled())
62485+ return 1;
62486+
62487+ curracl = task->acl;
62488+
62489+ cap_drop = curracl->cap_lower;
62490+ cap_mask = curracl->cap_mask;
62491+ cap_audit = curracl->cap_invert_audit;
62492+
62493+ while ((curracl = curracl->parent_subject)) {
62494+ /* if the cap isn't specified in the current computed mask but is specified in the
62495+ current level subject, and is lowered in the current level subject, then add
62496+ it to the set of dropped capabilities
62497+ otherwise, add the current level subject's mask to the current computed mask
62498+ */
62499+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62500+ cap_raise(cap_mask, cap);
62501+ if (cap_raised(curracl->cap_lower, cap))
62502+ cap_raise(cap_drop, cap);
62503+ if (cap_raised(curracl->cap_invert_audit, cap))
62504+ cap_raise(cap_audit, cap);
62505+ }
62506+ }
62507+
62508+ if (!cap_raised(cap_drop, cap)) {
62509+ if (cap_raised(cap_audit, cap))
62510+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
62511+ return 1;
62512+ }
62513+
62514+ curracl = task->acl;
62515+
62516+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
62517+ && cap_raised(cred->cap_effective, cap)) {
62518+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62519+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
62520+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
62521+ gr_to_filename(task->exec_file->f_path.dentry,
62522+ task->exec_file->f_path.mnt) : curracl->filename,
62523+ curracl->filename, 0UL,
62524+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
62525+ return 1;
62526+ }
62527+
62528+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
62529+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62530+
62531+ return 0;
62532+}
62533+
62534+int
62535+gr_acl_is_capable(const int cap)
62536+{
62537+ return gr_task_acl_is_capable(current, current_cred(), cap);
62538+}
62539+
62540+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62541+{
62542+ struct acl_subject_label *curracl;
62543+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62544+
62545+ if (!gr_acl_is_enabled())
62546+ return 1;
62547+
62548+ curracl = task->acl;
62549+
62550+ cap_drop = curracl->cap_lower;
62551+ cap_mask = curracl->cap_mask;
62552+
62553+ while ((curracl = curracl->parent_subject)) {
62554+ /* if the cap isn't specified in the current computed mask but is specified in the
62555+ current level subject, and is lowered in the current level subject, then add
62556+ it to the set of dropped capabilities
62557+ otherwise, add the current level subject's mask to the current computed mask
62558+ */
62559+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62560+ cap_raise(cap_mask, cap);
62561+ if (cap_raised(curracl->cap_lower, cap))
62562+ cap_raise(cap_drop, cap);
62563+ }
62564+ }
62565+
62566+ if (!cap_raised(cap_drop, cap))
62567+ return 1;
62568+
62569+ return 0;
62570+}
62571+
62572+int
62573+gr_acl_is_capable_nolog(const int cap)
62574+{
62575+ return gr_task_acl_is_capable_nolog(current, cap);
62576+}
62577+
62578diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
62579new file mode 100644
62580index 0000000..a340c17
62581--- /dev/null
62582+++ b/grsecurity/gracl_fs.c
62583@@ -0,0 +1,431 @@
62584+#include <linux/kernel.h>
62585+#include <linux/sched.h>
62586+#include <linux/types.h>
62587+#include <linux/fs.h>
62588+#include <linux/file.h>
62589+#include <linux/stat.h>
62590+#include <linux/grsecurity.h>
62591+#include <linux/grinternal.h>
62592+#include <linux/gracl.h>
62593+
62594+umode_t
62595+gr_acl_umask(void)
62596+{
62597+ if (unlikely(!gr_acl_is_enabled()))
62598+ return 0;
62599+
62600+ return current->role->umask;
62601+}
62602+
62603+__u32
62604+gr_acl_handle_hidden_file(const struct dentry * dentry,
62605+ const struct vfsmount * mnt)
62606+{
62607+ __u32 mode;
62608+
62609+ if (unlikely(!dentry->d_inode))
62610+ return GR_FIND;
62611+
62612+ mode =
62613+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
62614+
62615+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
62616+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62617+ return mode;
62618+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
62619+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62620+ return 0;
62621+ } else if (unlikely(!(mode & GR_FIND)))
62622+ return 0;
62623+
62624+ return GR_FIND;
62625+}
62626+
62627+__u32
62628+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62629+ int acc_mode)
62630+{
62631+ __u32 reqmode = GR_FIND;
62632+ __u32 mode;
62633+
62634+ if (unlikely(!dentry->d_inode))
62635+ return reqmode;
62636+
62637+ if (acc_mode & MAY_APPEND)
62638+ reqmode |= GR_APPEND;
62639+ else if (acc_mode & MAY_WRITE)
62640+ reqmode |= GR_WRITE;
62641+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
62642+ reqmode |= GR_READ;
62643+
62644+ mode =
62645+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62646+ mnt);
62647+
62648+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62649+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62650+ reqmode & GR_READ ? " reading" : "",
62651+ reqmode & GR_WRITE ? " writing" : reqmode &
62652+ GR_APPEND ? " appending" : "");
62653+ return reqmode;
62654+ } else
62655+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62656+ {
62657+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62658+ reqmode & GR_READ ? " reading" : "",
62659+ reqmode & GR_WRITE ? " writing" : reqmode &
62660+ GR_APPEND ? " appending" : "");
62661+ return 0;
62662+ } else if (unlikely((mode & reqmode) != reqmode))
62663+ return 0;
62664+
62665+ return reqmode;
62666+}
62667+
62668+__u32
62669+gr_acl_handle_creat(const struct dentry * dentry,
62670+ const struct dentry * p_dentry,
62671+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62672+ const int imode)
62673+{
62674+ __u32 reqmode = GR_WRITE | GR_CREATE;
62675+ __u32 mode;
62676+
62677+ if (acc_mode & MAY_APPEND)
62678+ reqmode |= GR_APPEND;
62679+ // if a directory was required or the directory already exists, then
62680+ // don't count this open as a read
62681+ if ((acc_mode & MAY_READ) &&
62682+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
62683+ reqmode |= GR_READ;
62684+ if ((open_flags & O_CREAT) &&
62685+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62686+ reqmode |= GR_SETID;
62687+
62688+ mode =
62689+ gr_check_create(dentry, p_dentry, p_mnt,
62690+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62691+
62692+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62693+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62694+ reqmode & GR_READ ? " reading" : "",
62695+ reqmode & GR_WRITE ? " writing" : reqmode &
62696+ GR_APPEND ? " appending" : "");
62697+ return reqmode;
62698+ } else
62699+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62700+ {
62701+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62702+ reqmode & GR_READ ? " reading" : "",
62703+ reqmode & GR_WRITE ? " writing" : reqmode &
62704+ GR_APPEND ? " appending" : "");
62705+ return 0;
62706+ } else if (unlikely((mode & reqmode) != reqmode))
62707+ return 0;
62708+
62709+ return reqmode;
62710+}
62711+
62712+__u32
62713+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
62714+ const int fmode)
62715+{
62716+ __u32 mode, reqmode = GR_FIND;
62717+
62718+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
62719+ reqmode |= GR_EXEC;
62720+ if (fmode & S_IWOTH)
62721+ reqmode |= GR_WRITE;
62722+ if (fmode & S_IROTH)
62723+ reqmode |= GR_READ;
62724+
62725+ mode =
62726+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62727+ mnt);
62728+
62729+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62730+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62731+ reqmode & GR_READ ? " reading" : "",
62732+ reqmode & GR_WRITE ? " writing" : "",
62733+ reqmode & GR_EXEC ? " executing" : "");
62734+ return reqmode;
62735+ } else
62736+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62737+ {
62738+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62739+ reqmode & GR_READ ? " reading" : "",
62740+ reqmode & GR_WRITE ? " writing" : "",
62741+ reqmode & GR_EXEC ? " executing" : "");
62742+ return 0;
62743+ } else if (unlikely((mode & reqmode) != reqmode))
62744+ return 0;
62745+
62746+ return reqmode;
62747+}
62748+
62749+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
62750+{
62751+ __u32 mode;
62752+
62753+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
62754+
62755+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62756+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
62757+ return mode;
62758+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62759+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
62760+ return 0;
62761+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62762+ return 0;
62763+
62764+ return (reqmode);
62765+}
62766+
62767+__u32
62768+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62769+{
62770+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
62771+}
62772+
62773+__u32
62774+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
62775+{
62776+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
62777+}
62778+
62779+__u32
62780+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
62781+{
62782+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
62783+}
62784+
62785+__u32
62786+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
62787+{
62788+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
62789+}
62790+
62791+__u32
62792+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
62793+ umode_t *modeptr)
62794+{
62795+ umode_t mode;
62796+
62797+ *modeptr &= ~gr_acl_umask();
62798+ mode = *modeptr;
62799+
62800+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
62801+ return 1;
62802+
62803+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
62804+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
62805+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
62806+ GR_CHMOD_ACL_MSG);
62807+ } else {
62808+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
62809+ }
62810+}
62811+
62812+__u32
62813+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
62814+{
62815+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
62816+}
62817+
62818+__u32
62819+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
62820+{
62821+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
62822+}
62823+
62824+__u32
62825+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
62826+{
62827+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
62828+}
62829+
62830+__u32
62831+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
62832+{
62833+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
62834+ GR_UNIXCONNECT_ACL_MSG);
62835+}
62836+
62837+/* hardlinks require at minimum create and link permission,
62838+ any additional privilege required is based on the
62839+ privilege of the file being linked to
62840+*/
62841+__u32
62842+gr_acl_handle_link(const struct dentry * new_dentry,
62843+ const struct dentry * parent_dentry,
62844+ const struct vfsmount * parent_mnt,
62845+ const struct dentry * old_dentry,
62846+ const struct vfsmount * old_mnt, const struct filename *to)
62847+{
62848+ __u32 mode;
62849+ __u32 needmode = GR_CREATE | GR_LINK;
62850+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
62851+
62852+ mode =
62853+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
62854+ old_mnt);
62855+
62856+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
62857+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62858+ return mode;
62859+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62860+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62861+ return 0;
62862+ } else if (unlikely((mode & needmode) != needmode))
62863+ return 0;
62864+
62865+ return 1;
62866+}
62867+
62868+__u32
62869+gr_acl_handle_symlink(const struct dentry * new_dentry,
62870+ const struct dentry * parent_dentry,
62871+ const struct vfsmount * parent_mnt, const struct filename *from)
62872+{
62873+ __u32 needmode = GR_WRITE | GR_CREATE;
62874+ __u32 mode;
62875+
62876+ mode =
62877+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62878+ GR_CREATE | GR_AUDIT_CREATE |
62879+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62880+
62881+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62882+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62883+ return mode;
62884+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62885+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62886+ return 0;
62887+ } else if (unlikely((mode & needmode) != needmode))
62888+ return 0;
62889+
62890+ return (GR_WRITE | GR_CREATE);
62891+}
62892+
62893+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
62894+{
62895+ __u32 mode;
62896+
62897+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62898+
62899+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62900+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
62901+ return mode;
62902+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62903+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
62904+ return 0;
62905+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62906+ return 0;
62907+
62908+ return (reqmode);
62909+}
62910+
62911+__u32
62912+gr_acl_handle_mknod(const struct dentry * new_dentry,
62913+ const struct dentry * parent_dentry,
62914+ const struct vfsmount * parent_mnt,
62915+ const int mode)
62916+{
62917+ __u32 reqmode = GR_WRITE | GR_CREATE;
62918+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62919+ reqmode |= GR_SETID;
62920+
62921+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62922+ reqmode, GR_MKNOD_ACL_MSG);
62923+}
62924+
62925+__u32
62926+gr_acl_handle_mkdir(const struct dentry *new_dentry,
62927+ const struct dentry *parent_dentry,
62928+ const struct vfsmount *parent_mnt)
62929+{
62930+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
62931+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
62932+}
62933+
62934+#define RENAME_CHECK_SUCCESS(old, new) \
62935+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
62936+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
62937+
62938+int
62939+gr_acl_handle_rename(struct dentry *new_dentry,
62940+ struct dentry *parent_dentry,
62941+ const struct vfsmount *parent_mnt,
62942+ struct dentry *old_dentry,
62943+ struct inode *old_parent_inode,
62944+ struct vfsmount *old_mnt, const struct filename *newname)
62945+{
62946+ __u32 comp1, comp2;
62947+ int error = 0;
62948+
62949+ if (unlikely(!gr_acl_is_enabled()))
62950+ return 0;
62951+
62952+ if (!new_dentry->d_inode) {
62953+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
62954+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
62955+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
62956+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
62957+ GR_DELETE | GR_AUDIT_DELETE |
62958+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62959+ GR_SUPPRESS, old_mnt);
62960+ } else {
62961+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
62962+ GR_CREATE | GR_DELETE |
62963+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
62964+ GR_AUDIT_READ | GR_AUDIT_WRITE |
62965+ GR_SUPPRESS, parent_mnt);
62966+ comp2 =
62967+ gr_search_file(old_dentry,
62968+ GR_READ | GR_WRITE | GR_AUDIT_READ |
62969+ GR_DELETE | GR_AUDIT_DELETE |
62970+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
62971+ }
62972+
62973+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
62974+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
62975+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62976+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
62977+ && !(comp2 & GR_SUPPRESS)) {
62978+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
62979+ error = -EACCES;
62980+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
62981+ error = -EACCES;
62982+
62983+ return error;
62984+}
62985+
62986+void
62987+gr_acl_handle_exit(void)
62988+{
62989+ u16 id;
62990+ char *rolename;
62991+
62992+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
62993+ !(current->role->roletype & GR_ROLE_PERSIST))) {
62994+ id = current->acl_role_id;
62995+ rolename = current->role->rolename;
62996+ gr_set_acls(1);
62997+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
62998+ }
62999+
63000+ gr_put_exec_file(current);
63001+ return;
63002+}
63003+
63004+int
63005+gr_acl_handle_procpidmem(const struct task_struct *task)
63006+{
63007+ if (unlikely(!gr_acl_is_enabled()))
63008+ return 0;
63009+
63010+ if (task != current && task->acl->mode & GR_PROTPROCFD)
63011+ return -EACCES;
63012+
63013+ return 0;
63014+}
63015diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
63016new file mode 100644
63017index 0000000..8132048
63018--- /dev/null
63019+++ b/grsecurity/gracl_ip.c
63020@@ -0,0 +1,387 @@
63021+#include <linux/kernel.h>
63022+#include <asm/uaccess.h>
63023+#include <asm/errno.h>
63024+#include <net/sock.h>
63025+#include <linux/file.h>
63026+#include <linux/fs.h>
63027+#include <linux/net.h>
63028+#include <linux/in.h>
63029+#include <linux/skbuff.h>
63030+#include <linux/ip.h>
63031+#include <linux/udp.h>
63032+#include <linux/types.h>
63033+#include <linux/sched.h>
63034+#include <linux/netdevice.h>
63035+#include <linux/inetdevice.h>
63036+#include <linux/gracl.h>
63037+#include <linux/grsecurity.h>
63038+#include <linux/grinternal.h>
63039+
63040+#define GR_BIND 0x01
63041+#define GR_CONNECT 0x02
63042+#define GR_INVERT 0x04
63043+#define GR_BINDOVERRIDE 0x08
63044+#define GR_CONNECTOVERRIDE 0x10
63045+#define GR_SOCK_FAMILY 0x20
63046+
63047+static const char * gr_protocols[IPPROTO_MAX] = {
63048+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
63049+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
63050+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
63051+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
63052+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
63053+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
63054+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
63055+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
63056+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
63057+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
63058+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
63059+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
63060+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
63061+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
63062+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
63063+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
63064+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
63065+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
63066+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
63067+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
63068+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
63069+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
63070+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
63071+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
63072+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
63073+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
63074+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
63075+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
63076+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
63077+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
63078+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
63079+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
63080+ };
63081+
63082+static const char * gr_socktypes[SOCK_MAX] = {
63083+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
63084+ "unknown:7", "unknown:8", "unknown:9", "packet"
63085+ };
63086+
63087+static const char * gr_sockfamilies[AF_MAX+1] = {
63088+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
63089+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
63090+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
63091+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
63092+ };
63093+
63094+const char *
63095+gr_proto_to_name(unsigned char proto)
63096+{
63097+ return gr_protocols[proto];
63098+}
63099+
63100+const char *
63101+gr_socktype_to_name(unsigned char type)
63102+{
63103+ return gr_socktypes[type];
63104+}
63105+
63106+const char *
63107+gr_sockfamily_to_name(unsigned char family)
63108+{
63109+ return gr_sockfamilies[family];
63110+}
63111+
63112+int
63113+gr_search_socket(const int domain, const int type, const int protocol)
63114+{
63115+ struct acl_subject_label *curr;
63116+ const struct cred *cred = current_cred();
63117+
63118+ if (unlikely(!gr_acl_is_enabled()))
63119+ goto exit;
63120+
63121+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
63122+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
63123+ goto exit; // let the kernel handle it
63124+
63125+ curr = current->acl;
63126+
63127+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
63128+ /* the family is allowed, if this is PF_INET allow it only if
63129+ the extra sock type/protocol checks pass */
63130+ if (domain == PF_INET)
63131+ goto inet_check;
63132+ goto exit;
63133+ } else {
63134+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63135+ __u32 fakeip = 0;
63136+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63137+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63138+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63139+ gr_to_filename(current->exec_file->f_path.dentry,
63140+ current->exec_file->f_path.mnt) :
63141+ curr->filename, curr->filename,
63142+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
63143+ &current->signal->saved_ip);
63144+ goto exit;
63145+ }
63146+ goto exit_fail;
63147+ }
63148+
63149+inet_check:
63150+ /* the rest of this checking is for IPv4 only */
63151+ if (!curr->ips)
63152+ goto exit;
63153+
63154+ if ((curr->ip_type & (1U << type)) &&
63155+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
63156+ goto exit;
63157+
63158+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63159+ /* we don't place acls on raw sockets , and sometimes
63160+ dgram/ip sockets are opened for ioctl and not
63161+ bind/connect, so we'll fake a bind learn log */
63162+ if (type == SOCK_RAW || type == SOCK_PACKET) {
63163+ __u32 fakeip = 0;
63164+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63165+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63166+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63167+ gr_to_filename(current->exec_file->f_path.dentry,
63168+ current->exec_file->f_path.mnt) :
63169+ curr->filename, curr->filename,
63170+ &fakeip, 0, type,
63171+ protocol, GR_CONNECT, &current->signal->saved_ip);
63172+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
63173+ __u32 fakeip = 0;
63174+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63175+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63176+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63177+ gr_to_filename(current->exec_file->f_path.dentry,
63178+ current->exec_file->f_path.mnt) :
63179+ curr->filename, curr->filename,
63180+ &fakeip, 0, type,
63181+ protocol, GR_BIND, &current->signal->saved_ip);
63182+ }
63183+ /* we'll log when they use connect or bind */
63184+ goto exit;
63185+ }
63186+
63187+exit_fail:
63188+ if (domain == PF_INET)
63189+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
63190+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
63191+ else
63192+#ifndef CONFIG_IPV6
63193+ if (domain != PF_INET6)
63194+#endif
63195+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
63196+ gr_socktype_to_name(type), protocol);
63197+
63198+ return 0;
63199+exit:
63200+ return 1;
63201+}
63202+
63203+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
63204+{
63205+ if ((ip->mode & mode) &&
63206+ (ip_port >= ip->low) &&
63207+ (ip_port <= ip->high) &&
63208+ ((ntohl(ip_addr) & our_netmask) ==
63209+ (ntohl(our_addr) & our_netmask))
63210+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
63211+ && (ip->type & (1U << type))) {
63212+ if (ip->mode & GR_INVERT)
63213+ return 2; // specifically denied
63214+ else
63215+ return 1; // allowed
63216+ }
63217+
63218+ return 0; // not specifically allowed, may continue parsing
63219+}
63220+
63221+static int
63222+gr_search_connectbind(const int full_mode, struct sock *sk,
63223+ struct sockaddr_in *addr, const int type)
63224+{
63225+ char iface[IFNAMSIZ] = {0};
63226+ struct acl_subject_label *curr;
63227+ struct acl_ip_label *ip;
63228+ struct inet_sock *isk;
63229+ struct net_device *dev;
63230+ struct in_device *idev;
63231+ unsigned long i;
63232+ int ret;
63233+ int mode = full_mode & (GR_BIND | GR_CONNECT);
63234+ __u32 ip_addr = 0;
63235+ __u32 our_addr;
63236+ __u32 our_netmask;
63237+ char *p;
63238+ __u16 ip_port = 0;
63239+ const struct cred *cred = current_cred();
63240+
63241+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
63242+ return 0;
63243+
63244+ curr = current->acl;
63245+ isk = inet_sk(sk);
63246+
63247+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
63248+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
63249+ addr->sin_addr.s_addr = curr->inaddr_any_override;
63250+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
63251+ struct sockaddr_in saddr;
63252+ int err;
63253+
63254+ saddr.sin_family = AF_INET;
63255+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
63256+ saddr.sin_port = isk->inet_sport;
63257+
63258+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63259+ if (err)
63260+ return err;
63261+
63262+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63263+ if (err)
63264+ return err;
63265+ }
63266+
63267+ if (!curr->ips)
63268+ return 0;
63269+
63270+ ip_addr = addr->sin_addr.s_addr;
63271+ ip_port = ntohs(addr->sin_port);
63272+
63273+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63274+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63275+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63276+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63277+ gr_to_filename(current->exec_file->f_path.dentry,
63278+ current->exec_file->f_path.mnt) :
63279+ curr->filename, curr->filename,
63280+ &ip_addr, ip_port, type,
63281+ sk->sk_protocol, mode, &current->signal->saved_ip);
63282+ return 0;
63283+ }
63284+
63285+ for (i = 0; i < curr->ip_num; i++) {
63286+ ip = *(curr->ips + i);
63287+ if (ip->iface != NULL) {
63288+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
63289+ p = strchr(iface, ':');
63290+ if (p != NULL)
63291+ *p = '\0';
63292+ dev = dev_get_by_name(sock_net(sk), iface);
63293+ if (dev == NULL)
63294+ continue;
63295+ idev = in_dev_get(dev);
63296+ if (idev == NULL) {
63297+ dev_put(dev);
63298+ continue;
63299+ }
63300+ rcu_read_lock();
63301+ for_ifa(idev) {
63302+ if (!strcmp(ip->iface, ifa->ifa_label)) {
63303+ our_addr = ifa->ifa_address;
63304+ our_netmask = 0xffffffff;
63305+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63306+ if (ret == 1) {
63307+ rcu_read_unlock();
63308+ in_dev_put(idev);
63309+ dev_put(dev);
63310+ return 0;
63311+ } else if (ret == 2) {
63312+ rcu_read_unlock();
63313+ in_dev_put(idev);
63314+ dev_put(dev);
63315+ goto denied;
63316+ }
63317+ }
63318+ } endfor_ifa(idev);
63319+ rcu_read_unlock();
63320+ in_dev_put(idev);
63321+ dev_put(dev);
63322+ } else {
63323+ our_addr = ip->addr;
63324+ our_netmask = ip->netmask;
63325+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63326+ if (ret == 1)
63327+ return 0;
63328+ else if (ret == 2)
63329+ goto denied;
63330+ }
63331+ }
63332+
63333+denied:
63334+ if (mode == GR_BIND)
63335+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63336+ else if (mode == GR_CONNECT)
63337+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63338+
63339+ return -EACCES;
63340+}
63341+
63342+int
63343+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
63344+{
63345+ /* always allow disconnection of dgram sockets with connect */
63346+ if (addr->sin_family == AF_UNSPEC)
63347+ return 0;
63348+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
63349+}
63350+
63351+int
63352+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
63353+{
63354+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
63355+}
63356+
63357+int gr_search_listen(struct socket *sock)
63358+{
63359+ struct sock *sk = sock->sk;
63360+ struct sockaddr_in addr;
63361+
63362+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63363+ addr.sin_port = inet_sk(sk)->inet_sport;
63364+
63365+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63366+}
63367+
63368+int gr_search_accept(struct socket *sock)
63369+{
63370+ struct sock *sk = sock->sk;
63371+ struct sockaddr_in addr;
63372+
63373+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63374+ addr.sin_port = inet_sk(sk)->inet_sport;
63375+
63376+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63377+}
63378+
63379+int
63380+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
63381+{
63382+ if (addr)
63383+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
63384+ else {
63385+ struct sockaddr_in sin;
63386+ const struct inet_sock *inet = inet_sk(sk);
63387+
63388+ sin.sin_addr.s_addr = inet->inet_daddr;
63389+ sin.sin_port = inet->inet_dport;
63390+
63391+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63392+ }
63393+}
63394+
63395+int
63396+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
63397+{
63398+ struct sockaddr_in sin;
63399+
63400+ if (unlikely(skb->len < sizeof (struct udphdr)))
63401+ return 0; // skip this packet
63402+
63403+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
63404+ sin.sin_port = udp_hdr(skb)->source;
63405+
63406+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63407+}
63408diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
63409new file mode 100644
63410index 0000000..25f54ef
63411--- /dev/null
63412+++ b/grsecurity/gracl_learn.c
63413@@ -0,0 +1,207 @@
63414+#include <linux/kernel.h>
63415+#include <linux/mm.h>
63416+#include <linux/sched.h>
63417+#include <linux/poll.h>
63418+#include <linux/string.h>
63419+#include <linux/file.h>
63420+#include <linux/types.h>
63421+#include <linux/vmalloc.h>
63422+#include <linux/grinternal.h>
63423+
63424+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
63425+ size_t count, loff_t *ppos);
63426+extern int gr_acl_is_enabled(void);
63427+
63428+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
63429+static int gr_learn_attached;
63430+
63431+/* use a 512k buffer */
63432+#define LEARN_BUFFER_SIZE (512 * 1024)
63433+
63434+static DEFINE_SPINLOCK(gr_learn_lock);
63435+static DEFINE_MUTEX(gr_learn_user_mutex);
63436+
63437+/* we need to maintain two buffers, so that the kernel context of grlearn
63438+ uses a semaphore around the userspace copying, and the other kernel contexts
63439+ use a spinlock when copying into the buffer, since they cannot sleep
63440+*/
63441+static char *learn_buffer;
63442+static char *learn_buffer_user;
63443+static int learn_buffer_len;
63444+static int learn_buffer_user_len;
63445+
63446+static ssize_t
63447+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
63448+{
63449+ DECLARE_WAITQUEUE(wait, current);
63450+ ssize_t retval = 0;
63451+
63452+ add_wait_queue(&learn_wait, &wait);
63453+ set_current_state(TASK_INTERRUPTIBLE);
63454+ do {
63455+ mutex_lock(&gr_learn_user_mutex);
63456+ spin_lock(&gr_learn_lock);
63457+ if (learn_buffer_len)
63458+ break;
63459+ spin_unlock(&gr_learn_lock);
63460+ mutex_unlock(&gr_learn_user_mutex);
63461+ if (file->f_flags & O_NONBLOCK) {
63462+ retval = -EAGAIN;
63463+ goto out;
63464+ }
63465+ if (signal_pending(current)) {
63466+ retval = -ERESTARTSYS;
63467+ goto out;
63468+ }
63469+
63470+ schedule();
63471+ } while (1);
63472+
63473+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
63474+ learn_buffer_user_len = learn_buffer_len;
63475+ retval = learn_buffer_len;
63476+ learn_buffer_len = 0;
63477+
63478+ spin_unlock(&gr_learn_lock);
63479+
63480+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
63481+ retval = -EFAULT;
63482+
63483+ mutex_unlock(&gr_learn_user_mutex);
63484+out:
63485+ set_current_state(TASK_RUNNING);
63486+ remove_wait_queue(&learn_wait, &wait);
63487+ return retval;
63488+}
63489+
63490+static unsigned int
63491+poll_learn(struct file * file, poll_table * wait)
63492+{
63493+ poll_wait(file, &learn_wait, wait);
63494+
63495+ if (learn_buffer_len)
63496+ return (POLLIN | POLLRDNORM);
63497+
63498+ return 0;
63499+}
63500+
63501+void
63502+gr_clear_learn_entries(void)
63503+{
63504+ char *tmp;
63505+
63506+ mutex_lock(&gr_learn_user_mutex);
63507+ spin_lock(&gr_learn_lock);
63508+ tmp = learn_buffer;
63509+ learn_buffer = NULL;
63510+ spin_unlock(&gr_learn_lock);
63511+ if (tmp)
63512+ vfree(tmp);
63513+ if (learn_buffer_user != NULL) {
63514+ vfree(learn_buffer_user);
63515+ learn_buffer_user = NULL;
63516+ }
63517+ learn_buffer_len = 0;
63518+ mutex_unlock(&gr_learn_user_mutex);
63519+
63520+ return;
63521+}
63522+
63523+void
63524+gr_add_learn_entry(const char *fmt, ...)
63525+{
63526+ va_list args;
63527+ unsigned int len;
63528+
63529+ if (!gr_learn_attached)
63530+ return;
63531+
63532+ spin_lock(&gr_learn_lock);
63533+
63534+ /* leave a gap at the end so we know when it's "full" but don't have to
63535+ compute the exact length of the string we're trying to append
63536+ */
63537+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63538+ spin_unlock(&gr_learn_lock);
63539+ wake_up_interruptible(&learn_wait);
63540+ return;
63541+ }
63542+ if (learn_buffer == NULL) {
63543+ spin_unlock(&gr_learn_lock);
63544+ return;
63545+ }
63546+
63547+ va_start(args, fmt);
63548+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63549+ va_end(args);
63550+
63551+ learn_buffer_len += len + 1;
63552+
63553+ spin_unlock(&gr_learn_lock);
63554+ wake_up_interruptible(&learn_wait);
63555+
63556+ return;
63557+}
63558+
63559+static int
63560+open_learn(struct inode *inode, struct file *file)
63561+{
63562+ if (file->f_mode & FMODE_READ && gr_learn_attached)
63563+ return -EBUSY;
63564+ if (file->f_mode & FMODE_READ) {
63565+ int retval = 0;
63566+ mutex_lock(&gr_learn_user_mutex);
63567+ if (learn_buffer == NULL)
63568+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
63569+ if (learn_buffer_user == NULL)
63570+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
63571+ if (learn_buffer == NULL) {
63572+ retval = -ENOMEM;
63573+ goto out_error;
63574+ }
63575+ if (learn_buffer_user == NULL) {
63576+ retval = -ENOMEM;
63577+ goto out_error;
63578+ }
63579+ learn_buffer_len = 0;
63580+ learn_buffer_user_len = 0;
63581+ gr_learn_attached = 1;
63582+out_error:
63583+ mutex_unlock(&gr_learn_user_mutex);
63584+ return retval;
63585+ }
63586+ return 0;
63587+}
63588+
63589+static int
63590+close_learn(struct inode *inode, struct file *file)
63591+{
63592+ if (file->f_mode & FMODE_READ) {
63593+ char *tmp = NULL;
63594+ mutex_lock(&gr_learn_user_mutex);
63595+ spin_lock(&gr_learn_lock);
63596+ tmp = learn_buffer;
63597+ learn_buffer = NULL;
63598+ spin_unlock(&gr_learn_lock);
63599+ if (tmp)
63600+ vfree(tmp);
63601+ if (learn_buffer_user != NULL) {
63602+ vfree(learn_buffer_user);
63603+ learn_buffer_user = NULL;
63604+ }
63605+ learn_buffer_len = 0;
63606+ learn_buffer_user_len = 0;
63607+ gr_learn_attached = 0;
63608+ mutex_unlock(&gr_learn_user_mutex);
63609+ }
63610+
63611+ return 0;
63612+}
63613+
63614+const struct file_operations grsec_fops = {
63615+ .read = read_learn,
63616+ .write = write_grsec_handler,
63617+ .open = open_learn,
63618+ .release = close_learn,
63619+ .poll = poll_learn,
63620+};
63621diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
63622new file mode 100644
63623index 0000000..39645c9
63624--- /dev/null
63625+++ b/grsecurity/gracl_res.c
63626@@ -0,0 +1,68 @@
63627+#include <linux/kernel.h>
63628+#include <linux/sched.h>
63629+#include <linux/gracl.h>
63630+#include <linux/grinternal.h>
63631+
63632+static const char *restab_log[] = {
63633+ [RLIMIT_CPU] = "RLIMIT_CPU",
63634+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
63635+ [RLIMIT_DATA] = "RLIMIT_DATA",
63636+ [RLIMIT_STACK] = "RLIMIT_STACK",
63637+ [RLIMIT_CORE] = "RLIMIT_CORE",
63638+ [RLIMIT_RSS] = "RLIMIT_RSS",
63639+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
63640+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
63641+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
63642+ [RLIMIT_AS] = "RLIMIT_AS",
63643+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
63644+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
63645+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
63646+ [RLIMIT_NICE] = "RLIMIT_NICE",
63647+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
63648+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
63649+ [GR_CRASH_RES] = "RLIMIT_CRASH"
63650+};
63651+
63652+void
63653+gr_log_resource(const struct task_struct *task,
63654+ const int res, const unsigned long wanted, const int gt)
63655+{
63656+ const struct cred *cred;
63657+ unsigned long rlim;
63658+
63659+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
63660+ return;
63661+
63662+ // not yet supported resource
63663+ if (unlikely(!restab_log[res]))
63664+ return;
63665+
63666+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
63667+ rlim = task_rlimit_max(task, res);
63668+ else
63669+ rlim = task_rlimit(task, res);
63670+
63671+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
63672+ return;
63673+
63674+ rcu_read_lock();
63675+ cred = __task_cred(task);
63676+
63677+ if (res == RLIMIT_NPROC &&
63678+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
63679+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
63680+ goto out_rcu_unlock;
63681+ else if (res == RLIMIT_MEMLOCK &&
63682+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
63683+ goto out_rcu_unlock;
63684+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
63685+ goto out_rcu_unlock;
63686+ rcu_read_unlock();
63687+
63688+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
63689+
63690+ return;
63691+out_rcu_unlock:
63692+ rcu_read_unlock();
63693+ return;
63694+}
63695diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
63696new file mode 100644
63697index 0000000..3c38bfe
63698--- /dev/null
63699+++ b/grsecurity/gracl_segv.c
63700@@ -0,0 +1,305 @@
63701+#include <linux/kernel.h>
63702+#include <linux/mm.h>
63703+#include <asm/uaccess.h>
63704+#include <asm/errno.h>
63705+#include <asm/mman.h>
63706+#include <net/sock.h>
63707+#include <linux/file.h>
63708+#include <linux/fs.h>
63709+#include <linux/net.h>
63710+#include <linux/in.h>
63711+#include <linux/slab.h>
63712+#include <linux/types.h>
63713+#include <linux/sched.h>
63714+#include <linux/timer.h>
63715+#include <linux/gracl.h>
63716+#include <linux/grsecurity.h>
63717+#include <linux/grinternal.h>
63718+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63719+#include <linux/magic.h>
63720+#include <linux/pagemap.h>
63721+#include "../fs/btrfs/async-thread.h"
63722+#include "../fs/btrfs/ctree.h"
63723+#include "../fs/btrfs/btrfs_inode.h"
63724+#endif
63725+
63726+static struct crash_uid *uid_set;
63727+static unsigned short uid_used;
63728+static DEFINE_SPINLOCK(gr_uid_lock);
63729+extern rwlock_t gr_inode_lock;
63730+extern struct acl_subject_label *
63731+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
63732+ struct acl_role_label *role);
63733+
63734+static inline dev_t __get_dev(const struct dentry *dentry)
63735+{
63736+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63737+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
63738+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
63739+ else
63740+#endif
63741+ return dentry->d_sb->s_dev;
63742+}
63743+
63744+int
63745+gr_init_uidset(void)
63746+{
63747+ uid_set =
63748+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
63749+ uid_used = 0;
63750+
63751+ return uid_set ? 1 : 0;
63752+}
63753+
63754+void
63755+gr_free_uidset(void)
63756+{
63757+ if (uid_set)
63758+ kfree(uid_set);
63759+
63760+ return;
63761+}
63762+
63763+int
63764+gr_find_uid(const uid_t uid)
63765+{
63766+ struct crash_uid *tmp = uid_set;
63767+ uid_t buid;
63768+ int low = 0, high = uid_used - 1, mid;
63769+
63770+ while (high >= low) {
63771+ mid = (low + high) >> 1;
63772+ buid = tmp[mid].uid;
63773+ if (buid == uid)
63774+ return mid;
63775+ if (buid > uid)
63776+ high = mid - 1;
63777+ if (buid < uid)
63778+ low = mid + 1;
63779+ }
63780+
63781+ return -1;
63782+}
63783+
63784+static __inline__ void
63785+gr_insertsort(void)
63786+{
63787+ unsigned short i, j;
63788+ struct crash_uid index;
63789+
63790+ for (i = 1; i < uid_used; i++) {
63791+ index = uid_set[i];
63792+ j = i;
63793+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
63794+ uid_set[j] = uid_set[j - 1];
63795+ j--;
63796+ }
63797+ uid_set[j] = index;
63798+ }
63799+
63800+ return;
63801+}
63802+
63803+static __inline__ void
63804+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
63805+{
63806+ int loc;
63807+ uid_t uid = GR_GLOBAL_UID(kuid);
63808+
63809+ if (uid_used == GR_UIDTABLE_MAX)
63810+ return;
63811+
63812+ loc = gr_find_uid(uid);
63813+
63814+ if (loc >= 0) {
63815+ uid_set[loc].expires = expires;
63816+ return;
63817+ }
63818+
63819+ uid_set[uid_used].uid = uid;
63820+ uid_set[uid_used].expires = expires;
63821+ uid_used++;
63822+
63823+ gr_insertsort();
63824+
63825+ return;
63826+}
63827+
63828+void
63829+gr_remove_uid(const unsigned short loc)
63830+{
63831+ unsigned short i;
63832+
63833+ for (i = loc + 1; i < uid_used; i++)
63834+ uid_set[i - 1] = uid_set[i];
63835+
63836+ uid_used--;
63837+
63838+ return;
63839+}
63840+
63841+int
63842+gr_check_crash_uid(const kuid_t kuid)
63843+{
63844+ int loc;
63845+ int ret = 0;
63846+ uid_t uid;
63847+
63848+ if (unlikely(!gr_acl_is_enabled()))
63849+ return 0;
63850+
63851+ uid = GR_GLOBAL_UID(kuid);
63852+
63853+ spin_lock(&gr_uid_lock);
63854+ loc = gr_find_uid(uid);
63855+
63856+ if (loc < 0)
63857+ goto out_unlock;
63858+
63859+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
63860+ gr_remove_uid(loc);
63861+ else
63862+ ret = 1;
63863+
63864+out_unlock:
63865+ spin_unlock(&gr_uid_lock);
63866+ return ret;
63867+}
63868+
63869+static __inline__ int
63870+proc_is_setxid(const struct cred *cred)
63871+{
63872+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63873+ !uid_eq(cred->uid, cred->fsuid))
63874+ return 1;
63875+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63876+ !gid_eq(cred->gid, cred->fsgid))
63877+ return 1;
63878+
63879+ return 0;
63880+}
63881+
63882+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63883+
63884+void
63885+gr_handle_crash(struct task_struct *task, const int sig)
63886+{
63887+ struct acl_subject_label *curr;
63888+ struct task_struct *tsk, *tsk2;
63889+ const struct cred *cred;
63890+ const struct cred *cred2;
63891+
63892+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
63893+ return;
63894+
63895+ if (unlikely(!gr_acl_is_enabled()))
63896+ return;
63897+
63898+ curr = task->acl;
63899+
63900+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
63901+ return;
63902+
63903+ if (time_before_eq(curr->expires, get_seconds())) {
63904+ curr->expires = 0;
63905+ curr->crashes = 0;
63906+ }
63907+
63908+ curr->crashes++;
63909+
63910+ if (!curr->expires)
63911+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
63912+
63913+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63914+ time_after(curr->expires, get_seconds())) {
63915+ rcu_read_lock();
63916+ cred = __task_cred(task);
63917+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
63918+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63919+ spin_lock(&gr_uid_lock);
63920+ gr_insert_uid(cred->uid, curr->expires);
63921+ spin_unlock(&gr_uid_lock);
63922+ curr->expires = 0;
63923+ curr->crashes = 0;
63924+ read_lock(&tasklist_lock);
63925+ do_each_thread(tsk2, tsk) {
63926+ cred2 = __task_cred(tsk);
63927+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
63928+ gr_fake_force_sig(SIGKILL, tsk);
63929+ } while_each_thread(tsk2, tsk);
63930+ read_unlock(&tasklist_lock);
63931+ } else {
63932+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
63933+ read_lock(&tasklist_lock);
63934+ read_lock(&grsec_exec_file_lock);
63935+ do_each_thread(tsk2, tsk) {
63936+ if (likely(tsk != task)) {
63937+ // if this thread has the same subject as the one that triggered
63938+ // RES_CRASH and it's the same binary, kill it
63939+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
63940+ gr_fake_force_sig(SIGKILL, tsk);
63941+ }
63942+ } while_each_thread(tsk2, tsk);
63943+ read_unlock(&grsec_exec_file_lock);
63944+ read_unlock(&tasklist_lock);
63945+ }
63946+ rcu_read_unlock();
63947+ }
63948+
63949+ return;
63950+}
63951+
63952+int
63953+gr_check_crash_exec(const struct file *filp)
63954+{
63955+ struct acl_subject_label *curr;
63956+
63957+ if (unlikely(!gr_acl_is_enabled()))
63958+ return 0;
63959+
63960+ read_lock(&gr_inode_lock);
63961+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
63962+ __get_dev(filp->f_path.dentry),
63963+ current->role);
63964+ read_unlock(&gr_inode_lock);
63965+
63966+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
63967+ (!curr->crashes && !curr->expires))
63968+ return 0;
63969+
63970+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
63971+ time_after(curr->expires, get_seconds()))
63972+ return 1;
63973+ else if (time_before_eq(curr->expires, get_seconds())) {
63974+ curr->crashes = 0;
63975+ curr->expires = 0;
63976+ }
63977+
63978+ return 0;
63979+}
63980+
63981+void
63982+gr_handle_alertkill(struct task_struct *task)
63983+{
63984+ struct acl_subject_label *curracl;
63985+ __u32 curr_ip;
63986+ struct task_struct *p, *p2;
63987+
63988+ if (unlikely(!gr_acl_is_enabled()))
63989+ return;
63990+
63991+ curracl = task->acl;
63992+ curr_ip = task->signal->curr_ip;
63993+
63994+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
63995+ read_lock(&tasklist_lock);
63996+ do_each_thread(p2, p) {
63997+ if (p->signal->curr_ip == curr_ip)
63998+ gr_fake_force_sig(SIGKILL, p);
63999+ } while_each_thread(p2, p);
64000+ read_unlock(&tasklist_lock);
64001+ } else if (curracl->mode & GR_KILLPROC)
64002+ gr_fake_force_sig(SIGKILL, task);
64003+
64004+ return;
64005+}
64006diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
64007new file mode 100644
64008index 0000000..98011b0
64009--- /dev/null
64010+++ b/grsecurity/gracl_shm.c
64011@@ -0,0 +1,40 @@
64012+#include <linux/kernel.h>
64013+#include <linux/mm.h>
64014+#include <linux/sched.h>
64015+#include <linux/file.h>
64016+#include <linux/ipc.h>
64017+#include <linux/gracl.h>
64018+#include <linux/grsecurity.h>
64019+#include <linux/grinternal.h>
64020+
64021+int
64022+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64023+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64024+{
64025+ struct task_struct *task;
64026+
64027+ if (!gr_acl_is_enabled())
64028+ return 1;
64029+
64030+ rcu_read_lock();
64031+ read_lock(&tasklist_lock);
64032+
64033+ task = find_task_by_vpid(shm_cprid);
64034+
64035+ if (unlikely(!task))
64036+ task = find_task_by_vpid(shm_lapid);
64037+
64038+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
64039+ (task_pid_nr(task) == shm_lapid)) &&
64040+ (task->acl->mode & GR_PROTSHM) &&
64041+ (task->acl != current->acl))) {
64042+ read_unlock(&tasklist_lock);
64043+ rcu_read_unlock();
64044+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
64045+ return 0;
64046+ }
64047+ read_unlock(&tasklist_lock);
64048+ rcu_read_unlock();
64049+
64050+ return 1;
64051+}
64052diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
64053new file mode 100644
64054index 0000000..bc0be01
64055--- /dev/null
64056+++ b/grsecurity/grsec_chdir.c
64057@@ -0,0 +1,19 @@
64058+#include <linux/kernel.h>
64059+#include <linux/sched.h>
64060+#include <linux/fs.h>
64061+#include <linux/file.h>
64062+#include <linux/grsecurity.h>
64063+#include <linux/grinternal.h>
64064+
64065+void
64066+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
64067+{
64068+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64069+ if ((grsec_enable_chdir && grsec_enable_group &&
64070+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
64071+ !grsec_enable_group)) {
64072+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
64073+ }
64074+#endif
64075+ return;
64076+}
64077diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
64078new file mode 100644
64079index 0000000..bd6e105
64080--- /dev/null
64081+++ b/grsecurity/grsec_chroot.c
64082@@ -0,0 +1,370 @@
64083+#include <linux/kernel.h>
64084+#include <linux/module.h>
64085+#include <linux/sched.h>
64086+#include <linux/file.h>
64087+#include <linux/fs.h>
64088+#include <linux/mount.h>
64089+#include <linux/types.h>
64090+#include "../fs/mount.h"
64091+#include <linux/grsecurity.h>
64092+#include <linux/grinternal.h>
64093+
64094+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
64095+static int gr_init_ran;
64096+#endif
64097+
64098+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
64099+{
64100+#ifdef CONFIG_GRKERNSEC
64101+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
64102+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
64103+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
64104+ && gr_init_ran
64105+#endif
64106+ )
64107+ task->gr_is_chrooted = 1;
64108+ else {
64109+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
64110+ if (task_pid_nr(task) == 1 && !gr_init_ran)
64111+ gr_init_ran = 1;
64112+#endif
64113+ task->gr_is_chrooted = 0;
64114+ }
64115+
64116+ task->gr_chroot_dentry = path->dentry;
64117+#endif
64118+ return;
64119+}
64120+
64121+void gr_clear_chroot_entries(struct task_struct *task)
64122+{
64123+#ifdef CONFIG_GRKERNSEC
64124+ task->gr_is_chrooted = 0;
64125+ task->gr_chroot_dentry = NULL;
64126+#endif
64127+ return;
64128+}
64129+
64130+int
64131+gr_handle_chroot_unix(const pid_t pid)
64132+{
64133+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64134+ struct task_struct *p;
64135+
64136+ if (unlikely(!grsec_enable_chroot_unix))
64137+ return 1;
64138+
64139+ if (likely(!proc_is_chrooted(current)))
64140+ return 1;
64141+
64142+ rcu_read_lock();
64143+ read_lock(&tasklist_lock);
64144+ p = find_task_by_vpid_unrestricted(pid);
64145+ if (unlikely(p && !have_same_root(current, p))) {
64146+ read_unlock(&tasklist_lock);
64147+ rcu_read_unlock();
64148+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
64149+ return 0;
64150+ }
64151+ read_unlock(&tasklist_lock);
64152+ rcu_read_unlock();
64153+#endif
64154+ return 1;
64155+}
64156+
64157+int
64158+gr_handle_chroot_nice(void)
64159+{
64160+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64161+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
64162+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
64163+ return -EPERM;
64164+ }
64165+#endif
64166+ return 0;
64167+}
64168+
64169+int
64170+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
64171+{
64172+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64173+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
64174+ && proc_is_chrooted(current)) {
64175+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
64176+ return -EACCES;
64177+ }
64178+#endif
64179+ return 0;
64180+}
64181+
64182+int
64183+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
64184+{
64185+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64186+ struct task_struct *p;
64187+ int ret = 0;
64188+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
64189+ return ret;
64190+
64191+ read_lock(&tasklist_lock);
64192+ do_each_pid_task(pid, type, p) {
64193+ if (!have_same_root(current, p)) {
64194+ ret = 1;
64195+ goto out;
64196+ }
64197+ } while_each_pid_task(pid, type, p);
64198+out:
64199+ read_unlock(&tasklist_lock);
64200+ return ret;
64201+#endif
64202+ return 0;
64203+}
64204+
64205+int
64206+gr_pid_is_chrooted(struct task_struct *p)
64207+{
64208+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64209+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
64210+ return 0;
64211+
64212+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
64213+ !have_same_root(current, p)) {
64214+ return 1;
64215+ }
64216+#endif
64217+ return 0;
64218+}
64219+
64220+EXPORT_SYMBOL(gr_pid_is_chrooted);
64221+
64222+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
64223+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
64224+{
64225+ struct path path, currentroot;
64226+ int ret = 0;
64227+
64228+ path.dentry = (struct dentry *)u_dentry;
64229+ path.mnt = (struct vfsmount *)u_mnt;
64230+ get_fs_root(current->fs, &currentroot);
64231+ if (path_is_under(&path, &currentroot))
64232+ ret = 1;
64233+ path_put(&currentroot);
64234+
64235+ return ret;
64236+}
64237+#endif
64238+
64239+int
64240+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
64241+{
64242+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64243+ if (!grsec_enable_chroot_fchdir)
64244+ return 1;
64245+
64246+ if (!proc_is_chrooted(current))
64247+ return 1;
64248+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
64249+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
64250+ return 0;
64251+ }
64252+#endif
64253+ return 1;
64254+}
64255+
64256+int
64257+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64258+ const time_t shm_createtime)
64259+{
64260+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64261+ struct task_struct *p;
64262+ time_t starttime;
64263+
64264+ if (unlikely(!grsec_enable_chroot_shmat))
64265+ return 1;
64266+
64267+ if (likely(!proc_is_chrooted(current)))
64268+ return 1;
64269+
64270+ rcu_read_lock();
64271+ read_lock(&tasklist_lock);
64272+
64273+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
64274+ starttime = p->start_time.tv_sec;
64275+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
64276+ if (have_same_root(current, p)) {
64277+ goto allow;
64278+ } else {
64279+ read_unlock(&tasklist_lock);
64280+ rcu_read_unlock();
64281+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64282+ return 0;
64283+ }
64284+ }
64285+ /* creator exited, pid reuse, fall through to next check */
64286+ }
64287+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
64288+ if (unlikely(!have_same_root(current, p))) {
64289+ read_unlock(&tasklist_lock);
64290+ rcu_read_unlock();
64291+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64292+ return 0;
64293+ }
64294+ }
64295+
64296+allow:
64297+ read_unlock(&tasklist_lock);
64298+ rcu_read_unlock();
64299+#endif
64300+ return 1;
64301+}
64302+
64303+void
64304+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
64305+{
64306+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64307+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
64308+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
64309+#endif
64310+ return;
64311+}
64312+
64313+int
64314+gr_handle_chroot_mknod(const struct dentry *dentry,
64315+ const struct vfsmount *mnt, const int mode)
64316+{
64317+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64318+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
64319+ proc_is_chrooted(current)) {
64320+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
64321+ return -EPERM;
64322+ }
64323+#endif
64324+ return 0;
64325+}
64326+
64327+int
64328+gr_handle_chroot_mount(const struct dentry *dentry,
64329+ const struct vfsmount *mnt, const char *dev_name)
64330+{
64331+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64332+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
64333+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
64334+ return -EPERM;
64335+ }
64336+#endif
64337+ return 0;
64338+}
64339+
64340+int
64341+gr_handle_chroot_pivot(void)
64342+{
64343+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64344+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
64345+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
64346+ return -EPERM;
64347+ }
64348+#endif
64349+ return 0;
64350+}
64351+
64352+int
64353+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
64354+{
64355+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64356+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
64357+ !gr_is_outside_chroot(dentry, mnt)) {
64358+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
64359+ return -EPERM;
64360+ }
64361+#endif
64362+ return 0;
64363+}
64364+
64365+extern const char *captab_log[];
64366+extern int captab_log_entries;
64367+
64368+int
64369+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64370+{
64371+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64372+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64373+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64374+ if (cap_raised(chroot_caps, cap)) {
64375+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
64376+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
64377+ }
64378+ return 0;
64379+ }
64380+ }
64381+#endif
64382+ return 1;
64383+}
64384+
64385+int
64386+gr_chroot_is_capable(const int cap)
64387+{
64388+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64389+ return gr_task_chroot_is_capable(current, current_cred(), cap);
64390+#endif
64391+ return 1;
64392+}
64393+
64394+int
64395+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
64396+{
64397+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64398+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64399+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64400+ if (cap_raised(chroot_caps, cap)) {
64401+ return 0;
64402+ }
64403+ }
64404+#endif
64405+ return 1;
64406+}
64407+
64408+int
64409+gr_chroot_is_capable_nolog(const int cap)
64410+{
64411+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64412+ return gr_task_chroot_is_capable_nolog(current, cap);
64413+#endif
64414+ return 1;
64415+}
64416+
64417+int
64418+gr_handle_chroot_sysctl(const int op)
64419+{
64420+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64421+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
64422+ proc_is_chrooted(current))
64423+ return -EACCES;
64424+#endif
64425+ return 0;
64426+}
64427+
64428+void
64429+gr_handle_chroot_chdir(const struct path *path)
64430+{
64431+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64432+ if (grsec_enable_chroot_chdir)
64433+ set_fs_pwd(current->fs, path);
64434+#endif
64435+ return;
64436+}
64437+
64438+int
64439+gr_handle_chroot_chmod(const struct dentry *dentry,
64440+ const struct vfsmount *mnt, const int mode)
64441+{
64442+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64443+ /* allow chmod +s on directories, but not files */
64444+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
64445+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
64446+ proc_is_chrooted(current)) {
64447+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
64448+ return -EPERM;
64449+ }
64450+#endif
64451+ return 0;
64452+}
64453diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
64454new file mode 100644
64455index 0000000..ce65ceb
64456--- /dev/null
64457+++ b/grsecurity/grsec_disabled.c
64458@@ -0,0 +1,434 @@
64459+#include <linux/kernel.h>
64460+#include <linux/module.h>
64461+#include <linux/sched.h>
64462+#include <linux/file.h>
64463+#include <linux/fs.h>
64464+#include <linux/kdev_t.h>
64465+#include <linux/net.h>
64466+#include <linux/in.h>
64467+#include <linux/ip.h>
64468+#include <linux/skbuff.h>
64469+#include <linux/sysctl.h>
64470+
64471+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64472+void
64473+pax_set_initial_flags(struct linux_binprm *bprm)
64474+{
64475+ return;
64476+}
64477+#endif
64478+
64479+#ifdef CONFIG_SYSCTL
64480+__u32
64481+gr_handle_sysctl(const struct ctl_table * table, const int op)
64482+{
64483+ return 0;
64484+}
64485+#endif
64486+
64487+#ifdef CONFIG_TASKSTATS
64488+int gr_is_taskstats_denied(int pid)
64489+{
64490+ return 0;
64491+}
64492+#endif
64493+
64494+int
64495+gr_acl_is_enabled(void)
64496+{
64497+ return 0;
64498+}
64499+
64500+void
64501+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
64502+{
64503+ return;
64504+}
64505+
64506+int
64507+gr_handle_rawio(const struct inode *inode)
64508+{
64509+ return 0;
64510+}
64511+
64512+void
64513+gr_acl_handle_psacct(struct task_struct *task, const long code)
64514+{
64515+ return;
64516+}
64517+
64518+int
64519+gr_handle_ptrace(struct task_struct *task, const long request)
64520+{
64521+ return 0;
64522+}
64523+
64524+int
64525+gr_handle_proc_ptrace(struct task_struct *task)
64526+{
64527+ return 0;
64528+}
64529+
64530+int
64531+gr_set_acls(const int type)
64532+{
64533+ return 0;
64534+}
64535+
64536+int
64537+gr_check_hidden_task(const struct task_struct *tsk)
64538+{
64539+ return 0;
64540+}
64541+
64542+int
64543+gr_check_protected_task(const struct task_struct *task)
64544+{
64545+ return 0;
64546+}
64547+
64548+int
64549+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64550+{
64551+ return 0;
64552+}
64553+
64554+void
64555+gr_copy_label(struct task_struct *tsk)
64556+{
64557+ return;
64558+}
64559+
64560+void
64561+gr_set_pax_flags(struct task_struct *task)
64562+{
64563+ return;
64564+}
64565+
64566+int
64567+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64568+ const int unsafe_share)
64569+{
64570+ return 0;
64571+}
64572+
64573+void
64574+gr_handle_delete(const ino_t ino, const dev_t dev)
64575+{
64576+ return;
64577+}
64578+
64579+void
64580+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64581+{
64582+ return;
64583+}
64584+
64585+void
64586+gr_handle_crash(struct task_struct *task, const int sig)
64587+{
64588+ return;
64589+}
64590+
64591+int
64592+gr_check_crash_exec(const struct file *filp)
64593+{
64594+ return 0;
64595+}
64596+
64597+int
64598+gr_check_crash_uid(const kuid_t uid)
64599+{
64600+ return 0;
64601+}
64602+
64603+void
64604+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64605+ struct dentry *old_dentry,
64606+ struct dentry *new_dentry,
64607+ struct vfsmount *mnt, const __u8 replace)
64608+{
64609+ return;
64610+}
64611+
64612+int
64613+gr_search_socket(const int family, const int type, const int protocol)
64614+{
64615+ return 1;
64616+}
64617+
64618+int
64619+gr_search_connectbind(const int mode, const struct socket *sock,
64620+ const struct sockaddr_in *addr)
64621+{
64622+ return 0;
64623+}
64624+
64625+void
64626+gr_handle_alertkill(struct task_struct *task)
64627+{
64628+ return;
64629+}
64630+
64631+__u32
64632+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
64633+{
64634+ return 1;
64635+}
64636+
64637+__u32
64638+gr_acl_handle_hidden_file(const struct dentry * dentry,
64639+ const struct vfsmount * mnt)
64640+{
64641+ return 1;
64642+}
64643+
64644+__u32
64645+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
64646+ int acc_mode)
64647+{
64648+ return 1;
64649+}
64650+
64651+__u32
64652+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
64653+{
64654+ return 1;
64655+}
64656+
64657+__u32
64658+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
64659+{
64660+ return 1;
64661+}
64662+
64663+int
64664+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
64665+ unsigned int *vm_flags)
64666+{
64667+ return 1;
64668+}
64669+
64670+__u32
64671+gr_acl_handle_truncate(const struct dentry * dentry,
64672+ const struct vfsmount * mnt)
64673+{
64674+ return 1;
64675+}
64676+
64677+__u32
64678+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
64679+{
64680+ return 1;
64681+}
64682+
64683+__u32
64684+gr_acl_handle_access(const struct dentry * dentry,
64685+ const struct vfsmount * mnt, const int fmode)
64686+{
64687+ return 1;
64688+}
64689+
64690+__u32
64691+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
64692+ umode_t *mode)
64693+{
64694+ return 1;
64695+}
64696+
64697+__u32
64698+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
64699+{
64700+ return 1;
64701+}
64702+
64703+__u32
64704+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
64705+{
64706+ return 1;
64707+}
64708+
64709+void
64710+grsecurity_init(void)
64711+{
64712+ return;
64713+}
64714+
64715+umode_t gr_acl_umask(void)
64716+{
64717+ return 0;
64718+}
64719+
64720+__u32
64721+gr_acl_handle_mknod(const struct dentry * new_dentry,
64722+ const struct dentry * parent_dentry,
64723+ const struct vfsmount * parent_mnt,
64724+ const int mode)
64725+{
64726+ return 1;
64727+}
64728+
64729+__u32
64730+gr_acl_handle_mkdir(const struct dentry * new_dentry,
64731+ const struct dentry * parent_dentry,
64732+ const struct vfsmount * parent_mnt)
64733+{
64734+ return 1;
64735+}
64736+
64737+__u32
64738+gr_acl_handle_symlink(const struct dentry * new_dentry,
64739+ const struct dentry * parent_dentry,
64740+ const struct vfsmount * parent_mnt, const struct filename *from)
64741+{
64742+ return 1;
64743+}
64744+
64745+__u32
64746+gr_acl_handle_link(const struct dentry * new_dentry,
64747+ const struct dentry * parent_dentry,
64748+ const struct vfsmount * parent_mnt,
64749+ const struct dentry * old_dentry,
64750+ const struct vfsmount * old_mnt, const struct filename *to)
64751+{
64752+ return 1;
64753+}
64754+
64755+int
64756+gr_acl_handle_rename(const struct dentry *new_dentry,
64757+ const struct dentry *parent_dentry,
64758+ const struct vfsmount *parent_mnt,
64759+ const struct dentry *old_dentry,
64760+ const struct inode *old_parent_inode,
64761+ const struct vfsmount *old_mnt, const struct filename *newname)
64762+{
64763+ return 0;
64764+}
64765+
64766+int
64767+gr_acl_handle_filldir(const struct file *file, const char *name,
64768+ const int namelen, const ino_t ino)
64769+{
64770+ return 1;
64771+}
64772+
64773+int
64774+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64775+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64776+{
64777+ return 1;
64778+}
64779+
64780+int
64781+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
64782+{
64783+ return 0;
64784+}
64785+
64786+int
64787+gr_search_accept(const struct socket *sock)
64788+{
64789+ return 0;
64790+}
64791+
64792+int
64793+gr_search_listen(const struct socket *sock)
64794+{
64795+ return 0;
64796+}
64797+
64798+int
64799+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
64800+{
64801+ return 0;
64802+}
64803+
64804+__u32
64805+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
64806+{
64807+ return 1;
64808+}
64809+
64810+__u32
64811+gr_acl_handle_creat(const struct dentry * dentry,
64812+ const struct dentry * p_dentry,
64813+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
64814+ const int imode)
64815+{
64816+ return 1;
64817+}
64818+
64819+void
64820+gr_acl_handle_exit(void)
64821+{
64822+ return;
64823+}
64824+
64825+int
64826+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64827+{
64828+ return 1;
64829+}
64830+
64831+void
64832+gr_set_role_label(const kuid_t uid, const kgid_t gid)
64833+{
64834+ return;
64835+}
64836+
64837+int
64838+gr_acl_handle_procpidmem(const struct task_struct *task)
64839+{
64840+ return 0;
64841+}
64842+
64843+int
64844+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
64845+{
64846+ return 0;
64847+}
64848+
64849+int
64850+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
64851+{
64852+ return 0;
64853+}
64854+
64855+void
64856+gr_set_kernel_label(struct task_struct *task)
64857+{
64858+ return;
64859+}
64860+
64861+int
64862+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64863+{
64864+ return 0;
64865+}
64866+
64867+int
64868+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64869+{
64870+ return 0;
64871+}
64872+
64873+int gr_acl_enable_at_secure(void)
64874+{
64875+ return 0;
64876+}
64877+
64878+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64879+{
64880+ return dentry->d_sb->s_dev;
64881+}
64882+
64883+void gr_put_exec_file(struct task_struct *task)
64884+{
64885+ return;
64886+}
64887+
64888+EXPORT_SYMBOL(gr_set_kernel_label);
64889+#ifdef CONFIG_SECURITY
64890+EXPORT_SYMBOL(gr_check_user_change);
64891+EXPORT_SYMBOL(gr_check_group_change);
64892+#endif
64893diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
64894new file mode 100644
64895index 0000000..387032b
64896--- /dev/null
64897+++ b/grsecurity/grsec_exec.c
64898@@ -0,0 +1,187 @@
64899+#include <linux/kernel.h>
64900+#include <linux/sched.h>
64901+#include <linux/file.h>
64902+#include <linux/binfmts.h>
64903+#include <linux/fs.h>
64904+#include <linux/types.h>
64905+#include <linux/grdefs.h>
64906+#include <linux/grsecurity.h>
64907+#include <linux/grinternal.h>
64908+#include <linux/capability.h>
64909+#include <linux/module.h>
64910+#include <linux/compat.h>
64911+
64912+#include <asm/uaccess.h>
64913+
64914+#ifdef CONFIG_GRKERNSEC_EXECLOG
64915+static char gr_exec_arg_buf[132];
64916+static DEFINE_MUTEX(gr_exec_arg_mutex);
64917+#endif
64918+
64919+struct user_arg_ptr {
64920+#ifdef CONFIG_COMPAT
64921+ bool is_compat;
64922+#endif
64923+ union {
64924+ const char __user *const __user *native;
64925+#ifdef CONFIG_COMPAT
64926+ const compat_uptr_t __user *compat;
64927+#endif
64928+ } ptr;
64929+};
64930+
64931+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
64932+
64933+void
64934+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
64935+{
64936+#ifdef CONFIG_GRKERNSEC_EXECLOG
64937+ char *grarg = gr_exec_arg_buf;
64938+ unsigned int i, x, execlen = 0;
64939+ char c;
64940+
64941+ if (!((grsec_enable_execlog && grsec_enable_group &&
64942+ in_group_p(grsec_audit_gid))
64943+ || (grsec_enable_execlog && !grsec_enable_group)))
64944+ return;
64945+
64946+ mutex_lock(&gr_exec_arg_mutex);
64947+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
64948+
64949+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
64950+ const char __user *p;
64951+ unsigned int len;
64952+
64953+ p = get_user_arg_ptr(argv, i);
64954+ if (IS_ERR(p))
64955+ goto log;
64956+
64957+ len = strnlen_user(p, 128 - execlen);
64958+ if (len > 128 - execlen)
64959+ len = 128 - execlen;
64960+ else if (len > 0)
64961+ len--;
64962+ if (copy_from_user(grarg + execlen, p, len))
64963+ goto log;
64964+
64965+ /* rewrite unprintable characters */
64966+ for (x = 0; x < len; x++) {
64967+ c = *(grarg + execlen + x);
64968+ if (c < 32 || c > 126)
64969+ *(grarg + execlen + x) = ' ';
64970+ }
64971+
64972+ execlen += len;
64973+ *(grarg + execlen) = ' ';
64974+ *(grarg + execlen + 1) = '\0';
64975+ execlen++;
64976+ }
64977+
64978+ log:
64979+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
64980+ bprm->file->f_path.mnt, grarg);
64981+ mutex_unlock(&gr_exec_arg_mutex);
64982+#endif
64983+ return;
64984+}
64985+
64986+#ifdef CONFIG_GRKERNSEC
64987+extern int gr_acl_is_capable(const int cap);
64988+extern int gr_acl_is_capable_nolog(const int cap);
64989+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64990+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
64991+extern int gr_chroot_is_capable(const int cap);
64992+extern int gr_chroot_is_capable_nolog(const int cap);
64993+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
64994+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
64995+#endif
64996+
64997+const char *captab_log[] = {
64998+ "CAP_CHOWN",
64999+ "CAP_DAC_OVERRIDE",
65000+ "CAP_DAC_READ_SEARCH",
65001+ "CAP_FOWNER",
65002+ "CAP_FSETID",
65003+ "CAP_KILL",
65004+ "CAP_SETGID",
65005+ "CAP_SETUID",
65006+ "CAP_SETPCAP",
65007+ "CAP_LINUX_IMMUTABLE",
65008+ "CAP_NET_BIND_SERVICE",
65009+ "CAP_NET_BROADCAST",
65010+ "CAP_NET_ADMIN",
65011+ "CAP_NET_RAW",
65012+ "CAP_IPC_LOCK",
65013+ "CAP_IPC_OWNER",
65014+ "CAP_SYS_MODULE",
65015+ "CAP_SYS_RAWIO",
65016+ "CAP_SYS_CHROOT",
65017+ "CAP_SYS_PTRACE",
65018+ "CAP_SYS_PACCT",
65019+ "CAP_SYS_ADMIN",
65020+ "CAP_SYS_BOOT",
65021+ "CAP_SYS_NICE",
65022+ "CAP_SYS_RESOURCE",
65023+ "CAP_SYS_TIME",
65024+ "CAP_SYS_TTY_CONFIG",
65025+ "CAP_MKNOD",
65026+ "CAP_LEASE",
65027+ "CAP_AUDIT_WRITE",
65028+ "CAP_AUDIT_CONTROL",
65029+ "CAP_SETFCAP",
65030+ "CAP_MAC_OVERRIDE",
65031+ "CAP_MAC_ADMIN",
65032+ "CAP_SYSLOG",
65033+ "CAP_WAKE_ALARM"
65034+};
65035+
65036+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
65037+
65038+int gr_is_capable(const int cap)
65039+{
65040+#ifdef CONFIG_GRKERNSEC
65041+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
65042+ return 1;
65043+ return 0;
65044+#else
65045+ return 1;
65046+#endif
65047+}
65048+
65049+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
65050+{
65051+#ifdef CONFIG_GRKERNSEC
65052+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
65053+ return 1;
65054+ return 0;
65055+#else
65056+ return 1;
65057+#endif
65058+}
65059+
65060+int gr_is_capable_nolog(const int cap)
65061+{
65062+#ifdef CONFIG_GRKERNSEC
65063+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
65064+ return 1;
65065+ return 0;
65066+#else
65067+ return 1;
65068+#endif
65069+}
65070+
65071+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
65072+{
65073+#ifdef CONFIG_GRKERNSEC
65074+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
65075+ return 1;
65076+ return 0;
65077+#else
65078+ return 1;
65079+#endif
65080+}
65081+
65082+EXPORT_SYMBOL(gr_is_capable);
65083+EXPORT_SYMBOL(gr_is_capable_nolog);
65084+EXPORT_SYMBOL(gr_task_is_capable);
65085+EXPORT_SYMBOL(gr_task_is_capable_nolog);
65086diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
65087new file mode 100644
65088index 0000000..06cc6ea
65089--- /dev/null
65090+++ b/grsecurity/grsec_fifo.c
65091@@ -0,0 +1,24 @@
65092+#include <linux/kernel.h>
65093+#include <linux/sched.h>
65094+#include <linux/fs.h>
65095+#include <linux/file.h>
65096+#include <linux/grinternal.h>
65097+
65098+int
65099+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
65100+ const struct dentry *dir, const int flag, const int acc_mode)
65101+{
65102+#ifdef CONFIG_GRKERNSEC_FIFO
65103+ const struct cred *cred = current_cred();
65104+
65105+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
65106+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
65107+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
65108+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
65109+ if (!inode_permission(dentry->d_inode, acc_mode))
65110+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
65111+ return -EACCES;
65112+ }
65113+#endif
65114+ return 0;
65115+}
65116diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
65117new file mode 100644
65118index 0000000..8ca18bf
65119--- /dev/null
65120+++ b/grsecurity/grsec_fork.c
65121@@ -0,0 +1,23 @@
65122+#include <linux/kernel.h>
65123+#include <linux/sched.h>
65124+#include <linux/grsecurity.h>
65125+#include <linux/grinternal.h>
65126+#include <linux/errno.h>
65127+
65128+void
65129+gr_log_forkfail(const int retval)
65130+{
65131+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65132+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
65133+ switch (retval) {
65134+ case -EAGAIN:
65135+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
65136+ break;
65137+ case -ENOMEM:
65138+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
65139+ break;
65140+ }
65141+ }
65142+#endif
65143+ return;
65144+}
65145diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
65146new file mode 100644
65147index 0000000..a862e9f
65148--- /dev/null
65149+++ b/grsecurity/grsec_init.c
65150@@ -0,0 +1,283 @@
65151+#include <linux/kernel.h>
65152+#include <linux/sched.h>
65153+#include <linux/mm.h>
65154+#include <linux/gracl.h>
65155+#include <linux/slab.h>
65156+#include <linux/vmalloc.h>
65157+#include <linux/percpu.h>
65158+#include <linux/module.h>
65159+
65160+int grsec_enable_ptrace_readexec;
65161+int grsec_enable_setxid;
65162+int grsec_enable_symlinkown;
65163+kgid_t grsec_symlinkown_gid;
65164+int grsec_enable_brute;
65165+int grsec_enable_link;
65166+int grsec_enable_dmesg;
65167+int grsec_enable_harden_ptrace;
65168+int grsec_enable_fifo;
65169+int grsec_enable_execlog;
65170+int grsec_enable_signal;
65171+int grsec_enable_forkfail;
65172+int grsec_enable_audit_ptrace;
65173+int grsec_enable_time;
65174+int grsec_enable_audit_textrel;
65175+int grsec_enable_group;
65176+kgid_t grsec_audit_gid;
65177+int grsec_enable_chdir;
65178+int grsec_enable_mount;
65179+int grsec_enable_rofs;
65180+int grsec_enable_chroot_findtask;
65181+int grsec_enable_chroot_mount;
65182+int grsec_enable_chroot_shmat;
65183+int grsec_enable_chroot_fchdir;
65184+int grsec_enable_chroot_double;
65185+int grsec_enable_chroot_pivot;
65186+int grsec_enable_chroot_chdir;
65187+int grsec_enable_chroot_chmod;
65188+int grsec_enable_chroot_mknod;
65189+int grsec_enable_chroot_nice;
65190+int grsec_enable_chroot_execlog;
65191+int grsec_enable_chroot_caps;
65192+int grsec_enable_chroot_sysctl;
65193+int grsec_enable_chroot_unix;
65194+int grsec_enable_tpe;
65195+kgid_t grsec_tpe_gid;
65196+int grsec_enable_blackhole;
65197+#ifdef CONFIG_IPV6_MODULE
65198+EXPORT_SYMBOL(grsec_enable_blackhole);
65199+#endif
65200+int grsec_lastack_retries;
65201+int grsec_enable_tpe_all;
65202+int grsec_enable_tpe_invert;
65203+int grsec_enable_socket_all;
65204+kgid_t grsec_socket_all_gid;
65205+int grsec_enable_socket_client;
65206+kgid_t grsec_socket_client_gid;
65207+int grsec_enable_socket_server;
65208+kgid_t grsec_socket_server_gid;
65209+int grsec_resource_logging;
65210+int grsec_disable_privio;
65211+int grsec_enable_log_rwxmaps;
65212+int grsec_lock;
65213+
65214+DEFINE_SPINLOCK(grsec_alert_lock);
65215+unsigned long grsec_alert_wtime = 0;
65216+unsigned long grsec_alert_fyet = 0;
65217+
65218+DEFINE_SPINLOCK(grsec_audit_lock);
65219+
65220+DEFINE_RWLOCK(grsec_exec_file_lock);
65221+
65222+char *gr_shared_page[4];
65223+
65224+char *gr_alert_log_fmt;
65225+char *gr_audit_log_fmt;
65226+char *gr_alert_log_buf;
65227+char *gr_audit_log_buf;
65228+
65229+extern struct gr_arg *gr_usermode;
65230+extern unsigned char *gr_system_salt;
65231+extern unsigned char *gr_system_sum;
65232+
65233+void __init
65234+grsecurity_init(void)
65235+{
65236+ int j;
65237+ /* create the per-cpu shared pages */
65238+
65239+#ifdef CONFIG_X86
65240+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
65241+#endif
65242+
65243+ for (j = 0; j < 4; j++) {
65244+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
65245+ if (gr_shared_page[j] == NULL) {
65246+ panic("Unable to allocate grsecurity shared page");
65247+ return;
65248+ }
65249+ }
65250+
65251+ /* allocate log buffers */
65252+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
65253+ if (!gr_alert_log_fmt) {
65254+ panic("Unable to allocate grsecurity alert log format buffer");
65255+ return;
65256+ }
65257+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
65258+ if (!gr_audit_log_fmt) {
65259+ panic("Unable to allocate grsecurity audit log format buffer");
65260+ return;
65261+ }
65262+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65263+ if (!gr_alert_log_buf) {
65264+ panic("Unable to allocate grsecurity alert log buffer");
65265+ return;
65266+ }
65267+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65268+ if (!gr_audit_log_buf) {
65269+ panic("Unable to allocate grsecurity audit log buffer");
65270+ return;
65271+ }
65272+
65273+ /* allocate memory for authentication structure */
65274+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
65275+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
65276+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
65277+
65278+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
65279+ panic("Unable to allocate grsecurity authentication structure");
65280+ return;
65281+ }
65282+
65283+
65284+#ifdef CONFIG_GRKERNSEC_IO
65285+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
65286+ grsec_disable_privio = 1;
65287+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65288+ grsec_disable_privio = 1;
65289+#else
65290+ grsec_disable_privio = 0;
65291+#endif
65292+#endif
65293+
65294+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65295+ /* for backward compatibility, tpe_invert always defaults to on if
65296+ enabled in the kernel
65297+ */
65298+ grsec_enable_tpe_invert = 1;
65299+#endif
65300+
65301+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65302+#ifndef CONFIG_GRKERNSEC_SYSCTL
65303+ grsec_lock = 1;
65304+#endif
65305+
65306+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65307+ grsec_enable_audit_textrel = 1;
65308+#endif
65309+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65310+ grsec_enable_log_rwxmaps = 1;
65311+#endif
65312+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65313+ grsec_enable_group = 1;
65314+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
65315+#endif
65316+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65317+ grsec_enable_ptrace_readexec = 1;
65318+#endif
65319+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65320+ grsec_enable_chdir = 1;
65321+#endif
65322+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65323+ grsec_enable_harden_ptrace = 1;
65324+#endif
65325+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65326+ grsec_enable_mount = 1;
65327+#endif
65328+#ifdef CONFIG_GRKERNSEC_LINK
65329+ grsec_enable_link = 1;
65330+#endif
65331+#ifdef CONFIG_GRKERNSEC_BRUTE
65332+ grsec_enable_brute = 1;
65333+#endif
65334+#ifdef CONFIG_GRKERNSEC_DMESG
65335+ grsec_enable_dmesg = 1;
65336+#endif
65337+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65338+ grsec_enable_blackhole = 1;
65339+ grsec_lastack_retries = 4;
65340+#endif
65341+#ifdef CONFIG_GRKERNSEC_FIFO
65342+ grsec_enable_fifo = 1;
65343+#endif
65344+#ifdef CONFIG_GRKERNSEC_EXECLOG
65345+ grsec_enable_execlog = 1;
65346+#endif
65347+#ifdef CONFIG_GRKERNSEC_SETXID
65348+ grsec_enable_setxid = 1;
65349+#endif
65350+#ifdef CONFIG_GRKERNSEC_SIGNAL
65351+ grsec_enable_signal = 1;
65352+#endif
65353+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65354+ grsec_enable_forkfail = 1;
65355+#endif
65356+#ifdef CONFIG_GRKERNSEC_TIME
65357+ grsec_enable_time = 1;
65358+#endif
65359+#ifdef CONFIG_GRKERNSEC_RESLOG
65360+ grsec_resource_logging = 1;
65361+#endif
65362+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65363+ grsec_enable_chroot_findtask = 1;
65364+#endif
65365+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65366+ grsec_enable_chroot_unix = 1;
65367+#endif
65368+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65369+ grsec_enable_chroot_mount = 1;
65370+#endif
65371+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65372+ grsec_enable_chroot_fchdir = 1;
65373+#endif
65374+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65375+ grsec_enable_chroot_shmat = 1;
65376+#endif
65377+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65378+ grsec_enable_audit_ptrace = 1;
65379+#endif
65380+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65381+ grsec_enable_chroot_double = 1;
65382+#endif
65383+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65384+ grsec_enable_chroot_pivot = 1;
65385+#endif
65386+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65387+ grsec_enable_chroot_chdir = 1;
65388+#endif
65389+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65390+ grsec_enable_chroot_chmod = 1;
65391+#endif
65392+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65393+ grsec_enable_chroot_mknod = 1;
65394+#endif
65395+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65396+ grsec_enable_chroot_nice = 1;
65397+#endif
65398+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65399+ grsec_enable_chroot_execlog = 1;
65400+#endif
65401+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65402+ grsec_enable_chroot_caps = 1;
65403+#endif
65404+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65405+ grsec_enable_chroot_sysctl = 1;
65406+#endif
65407+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65408+ grsec_enable_symlinkown = 1;
65409+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
65410+#endif
65411+#ifdef CONFIG_GRKERNSEC_TPE
65412+ grsec_enable_tpe = 1;
65413+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
65414+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65415+ grsec_enable_tpe_all = 1;
65416+#endif
65417+#endif
65418+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65419+ grsec_enable_socket_all = 1;
65420+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
65421+#endif
65422+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65423+ grsec_enable_socket_client = 1;
65424+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
65425+#endif
65426+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65427+ grsec_enable_socket_server = 1;
65428+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
65429+#endif
65430+#endif
65431+
65432+ return;
65433+}
65434diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
65435new file mode 100644
65436index 0000000..5e05e20
65437--- /dev/null
65438+++ b/grsecurity/grsec_link.c
65439@@ -0,0 +1,58 @@
65440+#include <linux/kernel.h>
65441+#include <linux/sched.h>
65442+#include <linux/fs.h>
65443+#include <linux/file.h>
65444+#include <linux/grinternal.h>
65445+
65446+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
65447+{
65448+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65449+ const struct inode *link_inode = link->dentry->d_inode;
65450+
65451+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
65452+ /* ignore root-owned links, e.g. /proc/self */
65453+ gr_is_global_nonroot(link_inode->i_uid) && target &&
65454+ !uid_eq(link_inode->i_uid, target->i_uid)) {
65455+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
65456+ return 1;
65457+ }
65458+#endif
65459+ return 0;
65460+}
65461+
65462+int
65463+gr_handle_follow_link(const struct inode *parent,
65464+ const struct inode *inode,
65465+ const struct dentry *dentry, const struct vfsmount *mnt)
65466+{
65467+#ifdef CONFIG_GRKERNSEC_LINK
65468+ const struct cred *cred = current_cred();
65469+
65470+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
65471+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
65472+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
65473+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
65474+ return -EACCES;
65475+ }
65476+#endif
65477+ return 0;
65478+}
65479+
65480+int
65481+gr_handle_hardlink(const struct dentry *dentry,
65482+ const struct vfsmount *mnt,
65483+ struct inode *inode, const int mode, const struct filename *to)
65484+{
65485+#ifdef CONFIG_GRKERNSEC_LINK
65486+ const struct cred *cred = current_cred();
65487+
65488+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
65489+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
65490+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
65491+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
65492+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
65493+ return -EPERM;
65494+ }
65495+#endif
65496+ return 0;
65497+}
65498diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
65499new file mode 100644
65500index 0000000..7c06085
65501--- /dev/null
65502+++ b/grsecurity/grsec_log.c
65503@@ -0,0 +1,326 @@
65504+#include <linux/kernel.h>
65505+#include <linux/sched.h>
65506+#include <linux/file.h>
65507+#include <linux/tty.h>
65508+#include <linux/fs.h>
65509+#include <linux/grinternal.h>
65510+
65511+#ifdef CONFIG_TREE_PREEMPT_RCU
65512+#define DISABLE_PREEMPT() preempt_disable()
65513+#define ENABLE_PREEMPT() preempt_enable()
65514+#else
65515+#define DISABLE_PREEMPT()
65516+#define ENABLE_PREEMPT()
65517+#endif
65518+
65519+#define BEGIN_LOCKS(x) \
65520+ DISABLE_PREEMPT(); \
65521+ rcu_read_lock(); \
65522+ read_lock(&tasklist_lock); \
65523+ read_lock(&grsec_exec_file_lock); \
65524+ if (x != GR_DO_AUDIT) \
65525+ spin_lock(&grsec_alert_lock); \
65526+ else \
65527+ spin_lock(&grsec_audit_lock)
65528+
65529+#define END_LOCKS(x) \
65530+ if (x != GR_DO_AUDIT) \
65531+ spin_unlock(&grsec_alert_lock); \
65532+ else \
65533+ spin_unlock(&grsec_audit_lock); \
65534+ read_unlock(&grsec_exec_file_lock); \
65535+ read_unlock(&tasklist_lock); \
65536+ rcu_read_unlock(); \
65537+ ENABLE_PREEMPT(); \
65538+ if (x == GR_DONT_AUDIT) \
65539+ gr_handle_alertkill(current)
65540+
65541+enum {
65542+ FLOODING,
65543+ NO_FLOODING
65544+};
65545+
65546+extern char *gr_alert_log_fmt;
65547+extern char *gr_audit_log_fmt;
65548+extern char *gr_alert_log_buf;
65549+extern char *gr_audit_log_buf;
65550+
65551+static int gr_log_start(int audit)
65552+{
65553+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65554+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65555+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65556+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65557+ unsigned long curr_secs = get_seconds();
65558+
65559+ if (audit == GR_DO_AUDIT)
65560+ goto set_fmt;
65561+
65562+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65563+ grsec_alert_wtime = curr_secs;
65564+ grsec_alert_fyet = 0;
65565+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65566+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65567+ grsec_alert_fyet++;
65568+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
65569+ grsec_alert_wtime = curr_secs;
65570+ grsec_alert_fyet++;
65571+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
65572+ return FLOODING;
65573+ }
65574+ else return FLOODING;
65575+
65576+set_fmt:
65577+#endif
65578+ memset(buf, 0, PAGE_SIZE);
65579+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
65580+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
65581+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65582+ } else if (current->signal->curr_ip) {
65583+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
65584+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
65585+ } else if (gr_acl_is_enabled()) {
65586+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
65587+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65588+ } else {
65589+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
65590+ strcpy(buf, fmt);
65591+ }
65592+
65593+ return NO_FLOODING;
65594+}
65595+
65596+static void gr_log_middle(int audit, const char *msg, va_list ap)
65597+ __attribute__ ((format (printf, 2, 0)));
65598+
65599+static void gr_log_middle(int audit, const char *msg, va_list ap)
65600+{
65601+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65602+ unsigned int len = strlen(buf);
65603+
65604+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65605+
65606+ return;
65607+}
65608+
65609+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65610+ __attribute__ ((format (printf, 2, 3)));
65611+
65612+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65613+{
65614+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65615+ unsigned int len = strlen(buf);
65616+ va_list ap;
65617+
65618+ va_start(ap, msg);
65619+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65620+ va_end(ap);
65621+
65622+ return;
65623+}
65624+
65625+static void gr_log_end(int audit, int append_default)
65626+{
65627+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65628+ if (append_default) {
65629+ struct task_struct *task = current;
65630+ struct task_struct *parent = task->real_parent;
65631+ const struct cred *cred = __task_cred(task);
65632+ const struct cred *pcred = __task_cred(parent);
65633+ unsigned int len = strlen(buf);
65634+
65635+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65636+ }
65637+
65638+ printk("%s\n", buf);
65639+
65640+ return;
65641+}
65642+
65643+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
65644+{
65645+ int logtype;
65646+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
65647+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
65648+ void *voidptr = NULL;
65649+ int num1 = 0, num2 = 0;
65650+ unsigned long ulong1 = 0, ulong2 = 0;
65651+ struct dentry *dentry = NULL;
65652+ struct vfsmount *mnt = NULL;
65653+ struct file *file = NULL;
65654+ struct task_struct *task = NULL;
65655+ const struct cred *cred, *pcred;
65656+ va_list ap;
65657+
65658+ BEGIN_LOCKS(audit);
65659+ logtype = gr_log_start(audit);
65660+ if (logtype == FLOODING) {
65661+ END_LOCKS(audit);
65662+ return;
65663+ }
65664+ va_start(ap, argtypes);
65665+ switch (argtypes) {
65666+ case GR_TTYSNIFF:
65667+ task = va_arg(ap, struct task_struct *);
65668+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
65669+ break;
65670+ case GR_SYSCTL_HIDDEN:
65671+ str1 = va_arg(ap, char *);
65672+ gr_log_middle_varargs(audit, msg, result, str1);
65673+ break;
65674+ case GR_RBAC:
65675+ dentry = va_arg(ap, struct dentry *);
65676+ mnt = va_arg(ap, struct vfsmount *);
65677+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
65678+ break;
65679+ case GR_RBAC_STR:
65680+ dentry = va_arg(ap, struct dentry *);
65681+ mnt = va_arg(ap, struct vfsmount *);
65682+ str1 = va_arg(ap, char *);
65683+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
65684+ break;
65685+ case GR_STR_RBAC:
65686+ str1 = va_arg(ap, char *);
65687+ dentry = va_arg(ap, struct dentry *);
65688+ mnt = va_arg(ap, struct vfsmount *);
65689+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
65690+ break;
65691+ case GR_RBAC_MODE2:
65692+ dentry = va_arg(ap, struct dentry *);
65693+ mnt = va_arg(ap, struct vfsmount *);
65694+ str1 = va_arg(ap, char *);
65695+ str2 = va_arg(ap, char *);
65696+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
65697+ break;
65698+ case GR_RBAC_MODE3:
65699+ dentry = va_arg(ap, struct dentry *);
65700+ mnt = va_arg(ap, struct vfsmount *);
65701+ str1 = va_arg(ap, char *);
65702+ str2 = va_arg(ap, char *);
65703+ str3 = va_arg(ap, char *);
65704+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
65705+ break;
65706+ case GR_FILENAME:
65707+ dentry = va_arg(ap, struct dentry *);
65708+ mnt = va_arg(ap, struct vfsmount *);
65709+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
65710+ break;
65711+ case GR_STR_FILENAME:
65712+ str1 = va_arg(ap, char *);
65713+ dentry = va_arg(ap, struct dentry *);
65714+ mnt = va_arg(ap, struct vfsmount *);
65715+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
65716+ break;
65717+ case GR_FILENAME_STR:
65718+ dentry = va_arg(ap, struct dentry *);
65719+ mnt = va_arg(ap, struct vfsmount *);
65720+ str1 = va_arg(ap, char *);
65721+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
65722+ break;
65723+ case GR_FILENAME_TWO_INT:
65724+ dentry = va_arg(ap, struct dentry *);
65725+ mnt = va_arg(ap, struct vfsmount *);
65726+ num1 = va_arg(ap, int);
65727+ num2 = va_arg(ap, int);
65728+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
65729+ break;
65730+ case GR_FILENAME_TWO_INT_STR:
65731+ dentry = va_arg(ap, struct dentry *);
65732+ mnt = va_arg(ap, struct vfsmount *);
65733+ num1 = va_arg(ap, int);
65734+ num2 = va_arg(ap, int);
65735+ str1 = va_arg(ap, char *);
65736+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
65737+ break;
65738+ case GR_TEXTREL:
65739+ file = va_arg(ap, struct file *);
65740+ ulong1 = va_arg(ap, unsigned long);
65741+ ulong2 = va_arg(ap, unsigned long);
65742+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
65743+ break;
65744+ case GR_PTRACE:
65745+ task = va_arg(ap, struct task_struct *);
65746+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
65747+ break;
65748+ case GR_RESOURCE:
65749+ task = va_arg(ap, struct task_struct *);
65750+ cred = __task_cred(task);
65751+ pcred = __task_cred(task->real_parent);
65752+ ulong1 = va_arg(ap, unsigned long);
65753+ str1 = va_arg(ap, char *);
65754+ ulong2 = va_arg(ap, unsigned long);
65755+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65756+ break;
65757+ case GR_CAP:
65758+ task = va_arg(ap, struct task_struct *);
65759+ cred = __task_cred(task);
65760+ pcred = __task_cred(task->real_parent);
65761+ str1 = va_arg(ap, char *);
65762+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65763+ break;
65764+ case GR_SIG:
65765+ str1 = va_arg(ap, char *);
65766+ voidptr = va_arg(ap, void *);
65767+ gr_log_middle_varargs(audit, msg, str1, voidptr);
65768+ break;
65769+ case GR_SIG2:
65770+ task = va_arg(ap, struct task_struct *);
65771+ cred = __task_cred(task);
65772+ pcred = __task_cred(task->real_parent);
65773+ num1 = va_arg(ap, int);
65774+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65775+ break;
65776+ case GR_CRASH1:
65777+ task = va_arg(ap, struct task_struct *);
65778+ cred = __task_cred(task);
65779+ pcred = __task_cred(task->real_parent);
65780+ ulong1 = va_arg(ap, unsigned long);
65781+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
65782+ break;
65783+ case GR_CRASH2:
65784+ task = va_arg(ap, struct task_struct *);
65785+ cred = __task_cred(task);
65786+ pcred = __task_cred(task->real_parent);
65787+ ulong1 = va_arg(ap, unsigned long);
65788+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
65789+ break;
65790+ case GR_RWXMAP:
65791+ file = va_arg(ap, struct file *);
65792+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
65793+ break;
65794+ case GR_PSACCT:
65795+ {
65796+ unsigned int wday, cday;
65797+ __u8 whr, chr;
65798+ __u8 wmin, cmin;
65799+ __u8 wsec, csec;
65800+ char cur_tty[64] = { 0 };
65801+ char parent_tty[64] = { 0 };
65802+
65803+ task = va_arg(ap, struct task_struct *);
65804+ wday = va_arg(ap, unsigned int);
65805+ cday = va_arg(ap, unsigned int);
65806+ whr = va_arg(ap, int);
65807+ chr = va_arg(ap, int);
65808+ wmin = va_arg(ap, int);
65809+ cmin = va_arg(ap, int);
65810+ wsec = va_arg(ap, int);
65811+ csec = va_arg(ap, int);
65812+ ulong1 = va_arg(ap, unsigned long);
65813+ cred = __task_cred(task);
65814+ pcred = __task_cred(task->real_parent);
65815+
65816+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65817+ }
65818+ break;
65819+ default:
65820+ gr_log_middle(audit, msg, ap);
65821+ }
65822+ va_end(ap);
65823+ // these don't need DEFAULTSECARGS printed on the end
65824+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
65825+ gr_log_end(audit, 0);
65826+ else
65827+ gr_log_end(audit, 1);
65828+ END_LOCKS(audit);
65829+}
65830diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
65831new file mode 100644
65832index 0000000..f536303
65833--- /dev/null
65834+++ b/grsecurity/grsec_mem.c
65835@@ -0,0 +1,40 @@
65836+#include <linux/kernel.h>
65837+#include <linux/sched.h>
65838+#include <linux/mm.h>
65839+#include <linux/mman.h>
65840+#include <linux/grinternal.h>
65841+
65842+void
65843+gr_handle_ioperm(void)
65844+{
65845+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
65846+ return;
65847+}
65848+
65849+void
65850+gr_handle_iopl(void)
65851+{
65852+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
65853+ return;
65854+}
65855+
65856+void
65857+gr_handle_mem_readwrite(u64 from, u64 to)
65858+{
65859+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
65860+ return;
65861+}
65862+
65863+void
65864+gr_handle_vm86(void)
65865+{
65866+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
65867+ return;
65868+}
65869+
65870+void
65871+gr_log_badprocpid(const char *entry)
65872+{
65873+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
65874+ return;
65875+}
65876diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
65877new file mode 100644
65878index 0000000..2131422
65879--- /dev/null
65880+++ b/grsecurity/grsec_mount.c
65881@@ -0,0 +1,62 @@
65882+#include <linux/kernel.h>
65883+#include <linux/sched.h>
65884+#include <linux/mount.h>
65885+#include <linux/grsecurity.h>
65886+#include <linux/grinternal.h>
65887+
65888+void
65889+gr_log_remount(const char *devname, const int retval)
65890+{
65891+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65892+ if (grsec_enable_mount && (retval >= 0))
65893+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
65894+#endif
65895+ return;
65896+}
65897+
65898+void
65899+gr_log_unmount(const char *devname, const int retval)
65900+{
65901+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65902+ if (grsec_enable_mount && (retval >= 0))
65903+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
65904+#endif
65905+ return;
65906+}
65907+
65908+void
65909+gr_log_mount(const char *from, const char *to, const int retval)
65910+{
65911+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65912+ if (grsec_enable_mount && (retval >= 0))
65913+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
65914+#endif
65915+ return;
65916+}
65917+
65918+int
65919+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
65920+{
65921+#ifdef CONFIG_GRKERNSEC_ROFS
65922+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
65923+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
65924+ return -EPERM;
65925+ } else
65926+ return 0;
65927+#endif
65928+ return 0;
65929+}
65930+
65931+int
65932+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
65933+{
65934+#ifdef CONFIG_GRKERNSEC_ROFS
65935+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
65936+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
65937+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
65938+ return -EPERM;
65939+ } else
65940+ return 0;
65941+#endif
65942+ return 0;
65943+}
65944diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
65945new file mode 100644
65946index 0000000..a3b12a0
65947--- /dev/null
65948+++ b/grsecurity/grsec_pax.c
65949@@ -0,0 +1,36 @@
65950+#include <linux/kernel.h>
65951+#include <linux/sched.h>
65952+#include <linux/mm.h>
65953+#include <linux/file.h>
65954+#include <linux/grinternal.h>
65955+#include <linux/grsecurity.h>
65956+
65957+void
65958+gr_log_textrel(struct vm_area_struct * vma)
65959+{
65960+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65961+ if (grsec_enable_audit_textrel)
65962+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
65963+#endif
65964+ return;
65965+}
65966+
65967+void
65968+gr_log_rwxmmap(struct file *file)
65969+{
65970+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65971+ if (grsec_enable_log_rwxmaps)
65972+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
65973+#endif
65974+ return;
65975+}
65976+
65977+void
65978+gr_log_rwxmprotect(struct file *file)
65979+{
65980+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65981+ if (grsec_enable_log_rwxmaps)
65982+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
65983+#endif
65984+ return;
65985+}
65986diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
65987new file mode 100644
65988index 0000000..f7f29aa
65989--- /dev/null
65990+++ b/grsecurity/grsec_ptrace.c
65991@@ -0,0 +1,30 @@
65992+#include <linux/kernel.h>
65993+#include <linux/sched.h>
65994+#include <linux/grinternal.h>
65995+#include <linux/security.h>
65996+
65997+void
65998+gr_audit_ptrace(struct task_struct *task)
65999+{
66000+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66001+ if (grsec_enable_audit_ptrace)
66002+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
66003+#endif
66004+ return;
66005+}
66006+
66007+int
66008+gr_ptrace_readexec(struct file *file, int unsafe_flags)
66009+{
66010+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66011+ const struct dentry *dentry = file->f_path.dentry;
66012+ const struct vfsmount *mnt = file->f_path.mnt;
66013+
66014+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
66015+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
66016+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
66017+ return -EACCES;
66018+ }
66019+#endif
66020+ return 0;
66021+}
66022diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
66023new file mode 100644
66024index 0000000..4e29cc7
66025--- /dev/null
66026+++ b/grsecurity/grsec_sig.c
66027@@ -0,0 +1,246 @@
66028+#include <linux/kernel.h>
66029+#include <linux/sched.h>
66030+#include <linux/fs.h>
66031+#include <linux/delay.h>
66032+#include <linux/grsecurity.h>
66033+#include <linux/grinternal.h>
66034+#include <linux/hardirq.h>
66035+
66036+char *signames[] = {
66037+ [SIGSEGV] = "Segmentation fault",
66038+ [SIGILL] = "Illegal instruction",
66039+ [SIGABRT] = "Abort",
66040+ [SIGBUS] = "Invalid alignment/Bus error"
66041+};
66042+
66043+void
66044+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
66045+{
66046+#ifdef CONFIG_GRKERNSEC_SIGNAL
66047+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
66048+ (sig == SIGABRT) || (sig == SIGBUS))) {
66049+ if (task_pid_nr(t) == task_pid_nr(current)) {
66050+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
66051+ } else {
66052+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
66053+ }
66054+ }
66055+#endif
66056+ return;
66057+}
66058+
66059+int
66060+gr_handle_signal(const struct task_struct *p, const int sig)
66061+{
66062+#ifdef CONFIG_GRKERNSEC
66063+ /* ignore the 0 signal for protected task checks */
66064+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
66065+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
66066+ return -EPERM;
66067+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
66068+ return -EPERM;
66069+ }
66070+#endif
66071+ return 0;
66072+}
66073+
66074+#ifdef CONFIG_GRKERNSEC
66075+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
66076+
66077+int gr_fake_force_sig(int sig, struct task_struct *t)
66078+{
66079+ unsigned long int flags;
66080+ int ret, blocked, ignored;
66081+ struct k_sigaction *action;
66082+
66083+ spin_lock_irqsave(&t->sighand->siglock, flags);
66084+ action = &t->sighand->action[sig-1];
66085+ ignored = action->sa.sa_handler == SIG_IGN;
66086+ blocked = sigismember(&t->blocked, sig);
66087+ if (blocked || ignored) {
66088+ action->sa.sa_handler = SIG_DFL;
66089+ if (blocked) {
66090+ sigdelset(&t->blocked, sig);
66091+ recalc_sigpending_and_wake(t);
66092+ }
66093+ }
66094+ if (action->sa.sa_handler == SIG_DFL)
66095+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
66096+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
66097+
66098+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
66099+
66100+ return ret;
66101+}
66102+#endif
66103+
66104+#ifdef CONFIG_GRKERNSEC_BRUTE
66105+#define GR_USER_BAN_TIME (15 * 60)
66106+#define GR_DAEMON_BRUTE_TIME (30 * 60)
66107+
66108+static int __get_dumpable(unsigned long mm_flags)
66109+{
66110+ int ret;
66111+
66112+ ret = mm_flags & MMF_DUMPABLE_MASK;
66113+ return (ret >= 2) ? 2 : ret;
66114+}
66115+#endif
66116+
66117+void gr_handle_brute_attach(unsigned long mm_flags)
66118+{
66119+#ifdef CONFIG_GRKERNSEC_BRUTE
66120+ struct task_struct *p = current;
66121+ kuid_t uid = GLOBAL_ROOT_UID;
66122+ int daemon = 0;
66123+
66124+ if (!grsec_enable_brute)
66125+ return;
66126+
66127+ rcu_read_lock();
66128+ read_lock(&tasklist_lock);
66129+ read_lock(&grsec_exec_file_lock);
66130+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
66131+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
66132+ p->real_parent->brute = 1;
66133+ daemon = 1;
66134+ } else {
66135+ const struct cred *cred = __task_cred(p), *cred2;
66136+ struct task_struct *tsk, *tsk2;
66137+
66138+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
66139+ struct user_struct *user;
66140+
66141+ uid = cred->uid;
66142+
66143+ /* this is put upon execution past expiration */
66144+ user = find_user(uid);
66145+ if (user == NULL)
66146+ goto unlock;
66147+ user->suid_banned = 1;
66148+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
66149+ if (user->suid_ban_expires == ~0UL)
66150+ user->suid_ban_expires--;
66151+
66152+ /* only kill other threads of the same binary, from the same user */
66153+ do_each_thread(tsk2, tsk) {
66154+ cred2 = __task_cred(tsk);
66155+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
66156+ gr_fake_force_sig(SIGKILL, tsk);
66157+ } while_each_thread(tsk2, tsk);
66158+ }
66159+ }
66160+unlock:
66161+ read_unlock(&grsec_exec_file_lock);
66162+ read_unlock(&tasklist_lock);
66163+ rcu_read_unlock();
66164+
66165+ if (gr_is_global_nonroot(uid))
66166+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
66167+ else if (daemon)
66168+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
66169+
66170+#endif
66171+ return;
66172+}
66173+
66174+void gr_handle_brute_check(void)
66175+{
66176+#ifdef CONFIG_GRKERNSEC_BRUTE
66177+ struct task_struct *p = current;
66178+
66179+ if (unlikely(p->brute)) {
66180+ if (!grsec_enable_brute)
66181+ p->brute = 0;
66182+ else if (time_before(get_seconds(), p->brute_expires))
66183+ msleep(30 * 1000);
66184+ }
66185+#endif
66186+ return;
66187+}
66188+
66189+void gr_handle_kernel_exploit(void)
66190+{
66191+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66192+ const struct cred *cred;
66193+ struct task_struct *tsk, *tsk2;
66194+ struct user_struct *user;
66195+ kuid_t uid;
66196+
66197+ if (in_irq() || in_serving_softirq() || in_nmi())
66198+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
66199+
66200+ uid = current_uid();
66201+
66202+ if (gr_is_global_root(uid))
66203+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
66204+ else {
66205+ /* kill all the processes of this user, hold a reference
66206+ to their creds struct, and prevent them from creating
66207+ another process until system reset
66208+ */
66209+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
66210+ GR_GLOBAL_UID(uid));
66211+ /* we intentionally leak this ref */
66212+ user = get_uid(current->cred->user);
66213+ if (user)
66214+ user->kernel_banned = 1;
66215+
66216+ /* kill all processes of this user */
66217+ read_lock(&tasklist_lock);
66218+ do_each_thread(tsk2, tsk) {
66219+ cred = __task_cred(tsk);
66220+ if (uid_eq(cred->uid, uid))
66221+ gr_fake_force_sig(SIGKILL, tsk);
66222+ } while_each_thread(tsk2, tsk);
66223+ read_unlock(&tasklist_lock);
66224+ }
66225+#endif
66226+}
66227+
66228+#ifdef CONFIG_GRKERNSEC_BRUTE
66229+static bool suid_ban_expired(struct user_struct *user)
66230+{
66231+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
66232+ user->suid_banned = 0;
66233+ user->suid_ban_expires = 0;
66234+ free_uid(user);
66235+ return true;
66236+ }
66237+
66238+ return false;
66239+}
66240+#endif
66241+
66242+int gr_process_kernel_exec_ban(void)
66243+{
66244+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66245+ if (unlikely(current->cred->user->kernel_banned))
66246+ return -EPERM;
66247+#endif
66248+ return 0;
66249+}
66250+
66251+int gr_process_kernel_setuid_ban(struct user_struct *user)
66252+{
66253+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66254+ if (unlikely(user->kernel_banned))
66255+ gr_fake_force_sig(SIGKILL, current);
66256+#endif
66257+ return 0;
66258+}
66259+
66260+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
66261+{
66262+#ifdef CONFIG_GRKERNSEC_BRUTE
66263+ struct user_struct *user = current->cred->user;
66264+ if (unlikely(user->suid_banned)) {
66265+ if (suid_ban_expired(user))
66266+ return 0;
66267+ /* disallow execution of suid binaries only */
66268+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
66269+ return -EPERM;
66270+ }
66271+#endif
66272+ return 0;
66273+}
66274diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
66275new file mode 100644
66276index 0000000..4030d57
66277--- /dev/null
66278+++ b/grsecurity/grsec_sock.c
66279@@ -0,0 +1,244 @@
66280+#include <linux/kernel.h>
66281+#include <linux/module.h>
66282+#include <linux/sched.h>
66283+#include <linux/file.h>
66284+#include <linux/net.h>
66285+#include <linux/in.h>
66286+#include <linux/ip.h>
66287+#include <net/sock.h>
66288+#include <net/inet_sock.h>
66289+#include <linux/grsecurity.h>
66290+#include <linux/grinternal.h>
66291+#include <linux/gracl.h>
66292+
66293+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
66294+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
66295+
66296+EXPORT_SYMBOL(gr_search_udp_recvmsg);
66297+EXPORT_SYMBOL(gr_search_udp_sendmsg);
66298+
66299+#ifdef CONFIG_UNIX_MODULE
66300+EXPORT_SYMBOL(gr_acl_handle_unix);
66301+EXPORT_SYMBOL(gr_acl_handle_mknod);
66302+EXPORT_SYMBOL(gr_handle_chroot_unix);
66303+EXPORT_SYMBOL(gr_handle_create);
66304+#endif
66305+
66306+#ifdef CONFIG_GRKERNSEC
66307+#define gr_conn_table_size 32749
66308+struct conn_table_entry {
66309+ struct conn_table_entry *next;
66310+ struct signal_struct *sig;
66311+};
66312+
66313+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
66314+DEFINE_SPINLOCK(gr_conn_table_lock);
66315+
66316+extern const char * gr_socktype_to_name(unsigned char type);
66317+extern const char * gr_proto_to_name(unsigned char proto);
66318+extern const char * gr_sockfamily_to_name(unsigned char family);
66319+
66320+static __inline__ int
66321+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
66322+{
66323+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
66324+}
66325+
66326+static __inline__ int
66327+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
66328+ __u16 sport, __u16 dport)
66329+{
66330+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
66331+ sig->gr_sport == sport && sig->gr_dport == dport))
66332+ return 1;
66333+ else
66334+ return 0;
66335+}
66336+
66337+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
66338+{
66339+ struct conn_table_entry **match;
66340+ unsigned int index;
66341+
66342+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66343+ sig->gr_sport, sig->gr_dport,
66344+ gr_conn_table_size);
66345+
66346+ newent->sig = sig;
66347+
66348+ match = &gr_conn_table[index];
66349+ newent->next = *match;
66350+ *match = newent;
66351+
66352+ return;
66353+}
66354+
66355+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
66356+{
66357+ struct conn_table_entry *match, *last = NULL;
66358+ unsigned int index;
66359+
66360+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66361+ sig->gr_sport, sig->gr_dport,
66362+ gr_conn_table_size);
66363+
66364+ match = gr_conn_table[index];
66365+ while (match && !conn_match(match->sig,
66366+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
66367+ sig->gr_dport)) {
66368+ last = match;
66369+ match = match->next;
66370+ }
66371+
66372+ if (match) {
66373+ if (last)
66374+ last->next = match->next;
66375+ else
66376+ gr_conn_table[index] = NULL;
66377+ kfree(match);
66378+ }
66379+
66380+ return;
66381+}
66382+
66383+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
66384+ __u16 sport, __u16 dport)
66385+{
66386+ struct conn_table_entry *match;
66387+ unsigned int index;
66388+
66389+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
66390+
66391+ match = gr_conn_table[index];
66392+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
66393+ match = match->next;
66394+
66395+ if (match)
66396+ return match->sig;
66397+ else
66398+ return NULL;
66399+}
66400+
66401+#endif
66402+
66403+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
66404+{
66405+#ifdef CONFIG_GRKERNSEC
66406+ struct signal_struct *sig = task->signal;
66407+ struct conn_table_entry *newent;
66408+
66409+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
66410+ if (newent == NULL)
66411+ return;
66412+ /* no bh lock needed since we are called with bh disabled */
66413+ spin_lock(&gr_conn_table_lock);
66414+ gr_del_task_from_ip_table_nolock(sig);
66415+ sig->gr_saddr = inet->inet_rcv_saddr;
66416+ sig->gr_daddr = inet->inet_daddr;
66417+ sig->gr_sport = inet->inet_sport;
66418+ sig->gr_dport = inet->inet_dport;
66419+ gr_add_to_task_ip_table_nolock(sig, newent);
66420+ spin_unlock(&gr_conn_table_lock);
66421+#endif
66422+ return;
66423+}
66424+
66425+void gr_del_task_from_ip_table(struct task_struct *task)
66426+{
66427+#ifdef CONFIG_GRKERNSEC
66428+ spin_lock_bh(&gr_conn_table_lock);
66429+ gr_del_task_from_ip_table_nolock(task->signal);
66430+ spin_unlock_bh(&gr_conn_table_lock);
66431+#endif
66432+ return;
66433+}
66434+
66435+void
66436+gr_attach_curr_ip(const struct sock *sk)
66437+{
66438+#ifdef CONFIG_GRKERNSEC
66439+ struct signal_struct *p, *set;
66440+ const struct inet_sock *inet = inet_sk(sk);
66441+
66442+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
66443+ return;
66444+
66445+ set = current->signal;
66446+
66447+ spin_lock_bh(&gr_conn_table_lock);
66448+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
66449+ inet->inet_dport, inet->inet_sport);
66450+ if (unlikely(p != NULL)) {
66451+ set->curr_ip = p->curr_ip;
66452+ set->used_accept = 1;
66453+ gr_del_task_from_ip_table_nolock(p);
66454+ spin_unlock_bh(&gr_conn_table_lock);
66455+ return;
66456+ }
66457+ spin_unlock_bh(&gr_conn_table_lock);
66458+
66459+ set->curr_ip = inet->inet_daddr;
66460+ set->used_accept = 1;
66461+#endif
66462+ return;
66463+}
66464+
66465+int
66466+gr_handle_sock_all(const int family, const int type, const int protocol)
66467+{
66468+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66469+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
66470+ (family != AF_UNIX)) {
66471+ if (family == AF_INET)
66472+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
66473+ else
66474+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
66475+ return -EACCES;
66476+ }
66477+#endif
66478+ return 0;
66479+}
66480+
66481+int
66482+gr_handle_sock_server(const struct sockaddr *sck)
66483+{
66484+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66485+ if (grsec_enable_socket_server &&
66486+ in_group_p(grsec_socket_server_gid) &&
66487+ sck && (sck->sa_family != AF_UNIX) &&
66488+ (sck->sa_family != AF_LOCAL)) {
66489+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66490+ return -EACCES;
66491+ }
66492+#endif
66493+ return 0;
66494+}
66495+
66496+int
66497+gr_handle_sock_server_other(const struct sock *sck)
66498+{
66499+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66500+ if (grsec_enable_socket_server &&
66501+ in_group_p(grsec_socket_server_gid) &&
66502+ sck && (sck->sk_family != AF_UNIX) &&
66503+ (sck->sk_family != AF_LOCAL)) {
66504+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66505+ return -EACCES;
66506+ }
66507+#endif
66508+ return 0;
66509+}
66510+
66511+int
66512+gr_handle_sock_client(const struct sockaddr *sck)
66513+{
66514+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66515+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
66516+ sck && (sck->sa_family != AF_UNIX) &&
66517+ (sck->sa_family != AF_LOCAL)) {
66518+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
66519+ return -EACCES;
66520+ }
66521+#endif
66522+ return 0;
66523+}
66524diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
66525new file mode 100644
66526index 0000000..f55ef0f
66527--- /dev/null
66528+++ b/grsecurity/grsec_sysctl.c
66529@@ -0,0 +1,469 @@
66530+#include <linux/kernel.h>
66531+#include <linux/sched.h>
66532+#include <linux/sysctl.h>
66533+#include <linux/grsecurity.h>
66534+#include <linux/grinternal.h>
66535+
66536+int
66537+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
66538+{
66539+#ifdef CONFIG_GRKERNSEC_SYSCTL
66540+ if (dirname == NULL || name == NULL)
66541+ return 0;
66542+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
66543+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
66544+ return -EACCES;
66545+ }
66546+#endif
66547+ return 0;
66548+}
66549+
66550+#ifdef CONFIG_GRKERNSEC_ROFS
66551+static int __maybe_unused one = 1;
66552+#endif
66553+
66554+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66555+struct ctl_table grsecurity_table[] = {
66556+#ifdef CONFIG_GRKERNSEC_SYSCTL
66557+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
66558+#ifdef CONFIG_GRKERNSEC_IO
66559+ {
66560+ .procname = "disable_priv_io",
66561+ .data = &grsec_disable_privio,
66562+ .maxlen = sizeof(int),
66563+ .mode = 0600,
66564+ .proc_handler = &proc_dointvec,
66565+ },
66566+#endif
66567+#endif
66568+#ifdef CONFIG_GRKERNSEC_LINK
66569+ {
66570+ .procname = "linking_restrictions",
66571+ .data = &grsec_enable_link,
66572+ .maxlen = sizeof(int),
66573+ .mode = 0600,
66574+ .proc_handler = &proc_dointvec,
66575+ },
66576+#endif
66577+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66578+ {
66579+ .procname = "enforce_symlinksifowner",
66580+ .data = &grsec_enable_symlinkown,
66581+ .maxlen = sizeof(int),
66582+ .mode = 0600,
66583+ .proc_handler = &proc_dointvec,
66584+ },
66585+ {
66586+ .procname = "symlinkown_gid",
66587+ .data = &grsec_symlinkown_gid,
66588+ .maxlen = sizeof(int),
66589+ .mode = 0600,
66590+ .proc_handler = &proc_dointvec,
66591+ },
66592+#endif
66593+#ifdef CONFIG_GRKERNSEC_BRUTE
66594+ {
66595+ .procname = "deter_bruteforce",
66596+ .data = &grsec_enable_brute,
66597+ .maxlen = sizeof(int),
66598+ .mode = 0600,
66599+ .proc_handler = &proc_dointvec,
66600+ },
66601+#endif
66602+#ifdef CONFIG_GRKERNSEC_FIFO
66603+ {
66604+ .procname = "fifo_restrictions",
66605+ .data = &grsec_enable_fifo,
66606+ .maxlen = sizeof(int),
66607+ .mode = 0600,
66608+ .proc_handler = &proc_dointvec,
66609+ },
66610+#endif
66611+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66612+ {
66613+ .procname = "ptrace_readexec",
66614+ .data = &grsec_enable_ptrace_readexec,
66615+ .maxlen = sizeof(int),
66616+ .mode = 0600,
66617+ .proc_handler = &proc_dointvec,
66618+ },
66619+#endif
66620+#ifdef CONFIG_GRKERNSEC_SETXID
66621+ {
66622+ .procname = "consistent_setxid",
66623+ .data = &grsec_enable_setxid,
66624+ .maxlen = sizeof(int),
66625+ .mode = 0600,
66626+ .proc_handler = &proc_dointvec,
66627+ },
66628+#endif
66629+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66630+ {
66631+ .procname = "ip_blackhole",
66632+ .data = &grsec_enable_blackhole,
66633+ .maxlen = sizeof(int),
66634+ .mode = 0600,
66635+ .proc_handler = &proc_dointvec,
66636+ },
66637+ {
66638+ .procname = "lastack_retries",
66639+ .data = &grsec_lastack_retries,
66640+ .maxlen = sizeof(int),
66641+ .mode = 0600,
66642+ .proc_handler = &proc_dointvec,
66643+ },
66644+#endif
66645+#ifdef CONFIG_GRKERNSEC_EXECLOG
66646+ {
66647+ .procname = "exec_logging",
66648+ .data = &grsec_enable_execlog,
66649+ .maxlen = sizeof(int),
66650+ .mode = 0600,
66651+ .proc_handler = &proc_dointvec,
66652+ },
66653+#endif
66654+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66655+ {
66656+ .procname = "rwxmap_logging",
66657+ .data = &grsec_enable_log_rwxmaps,
66658+ .maxlen = sizeof(int),
66659+ .mode = 0600,
66660+ .proc_handler = &proc_dointvec,
66661+ },
66662+#endif
66663+#ifdef CONFIG_GRKERNSEC_SIGNAL
66664+ {
66665+ .procname = "signal_logging",
66666+ .data = &grsec_enable_signal,
66667+ .maxlen = sizeof(int),
66668+ .mode = 0600,
66669+ .proc_handler = &proc_dointvec,
66670+ },
66671+#endif
66672+#ifdef CONFIG_GRKERNSEC_FORKFAIL
66673+ {
66674+ .procname = "forkfail_logging",
66675+ .data = &grsec_enable_forkfail,
66676+ .maxlen = sizeof(int),
66677+ .mode = 0600,
66678+ .proc_handler = &proc_dointvec,
66679+ },
66680+#endif
66681+#ifdef CONFIG_GRKERNSEC_TIME
66682+ {
66683+ .procname = "timechange_logging",
66684+ .data = &grsec_enable_time,
66685+ .maxlen = sizeof(int),
66686+ .mode = 0600,
66687+ .proc_handler = &proc_dointvec,
66688+ },
66689+#endif
66690+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
66691+ {
66692+ .procname = "chroot_deny_shmat",
66693+ .data = &grsec_enable_chroot_shmat,
66694+ .maxlen = sizeof(int),
66695+ .mode = 0600,
66696+ .proc_handler = &proc_dointvec,
66697+ },
66698+#endif
66699+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66700+ {
66701+ .procname = "chroot_deny_unix",
66702+ .data = &grsec_enable_chroot_unix,
66703+ .maxlen = sizeof(int),
66704+ .mode = 0600,
66705+ .proc_handler = &proc_dointvec,
66706+ },
66707+#endif
66708+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
66709+ {
66710+ .procname = "chroot_deny_mount",
66711+ .data = &grsec_enable_chroot_mount,
66712+ .maxlen = sizeof(int),
66713+ .mode = 0600,
66714+ .proc_handler = &proc_dointvec,
66715+ },
66716+#endif
66717+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
66718+ {
66719+ .procname = "chroot_deny_fchdir",
66720+ .data = &grsec_enable_chroot_fchdir,
66721+ .maxlen = sizeof(int),
66722+ .mode = 0600,
66723+ .proc_handler = &proc_dointvec,
66724+ },
66725+#endif
66726+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
66727+ {
66728+ .procname = "chroot_deny_chroot",
66729+ .data = &grsec_enable_chroot_double,
66730+ .maxlen = sizeof(int),
66731+ .mode = 0600,
66732+ .proc_handler = &proc_dointvec,
66733+ },
66734+#endif
66735+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
66736+ {
66737+ .procname = "chroot_deny_pivot",
66738+ .data = &grsec_enable_chroot_pivot,
66739+ .maxlen = sizeof(int),
66740+ .mode = 0600,
66741+ .proc_handler = &proc_dointvec,
66742+ },
66743+#endif
66744+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
66745+ {
66746+ .procname = "chroot_enforce_chdir",
66747+ .data = &grsec_enable_chroot_chdir,
66748+ .maxlen = sizeof(int),
66749+ .mode = 0600,
66750+ .proc_handler = &proc_dointvec,
66751+ },
66752+#endif
66753+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
66754+ {
66755+ .procname = "chroot_deny_chmod",
66756+ .data = &grsec_enable_chroot_chmod,
66757+ .maxlen = sizeof(int),
66758+ .mode = 0600,
66759+ .proc_handler = &proc_dointvec,
66760+ },
66761+#endif
66762+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
66763+ {
66764+ .procname = "chroot_deny_mknod",
66765+ .data = &grsec_enable_chroot_mknod,
66766+ .maxlen = sizeof(int),
66767+ .mode = 0600,
66768+ .proc_handler = &proc_dointvec,
66769+ },
66770+#endif
66771+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66772+ {
66773+ .procname = "chroot_restrict_nice",
66774+ .data = &grsec_enable_chroot_nice,
66775+ .maxlen = sizeof(int),
66776+ .mode = 0600,
66777+ .proc_handler = &proc_dointvec,
66778+ },
66779+#endif
66780+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
66781+ {
66782+ .procname = "chroot_execlog",
66783+ .data = &grsec_enable_chroot_execlog,
66784+ .maxlen = sizeof(int),
66785+ .mode = 0600,
66786+ .proc_handler = &proc_dointvec,
66787+ },
66788+#endif
66789+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66790+ {
66791+ .procname = "chroot_caps",
66792+ .data = &grsec_enable_chroot_caps,
66793+ .maxlen = sizeof(int),
66794+ .mode = 0600,
66795+ .proc_handler = &proc_dointvec,
66796+ },
66797+#endif
66798+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
66799+ {
66800+ .procname = "chroot_deny_sysctl",
66801+ .data = &grsec_enable_chroot_sysctl,
66802+ .maxlen = sizeof(int),
66803+ .mode = 0600,
66804+ .proc_handler = &proc_dointvec,
66805+ },
66806+#endif
66807+#ifdef CONFIG_GRKERNSEC_TPE
66808+ {
66809+ .procname = "tpe",
66810+ .data = &grsec_enable_tpe,
66811+ .maxlen = sizeof(int),
66812+ .mode = 0600,
66813+ .proc_handler = &proc_dointvec,
66814+ },
66815+ {
66816+ .procname = "tpe_gid",
66817+ .data = &grsec_tpe_gid,
66818+ .maxlen = sizeof(int),
66819+ .mode = 0600,
66820+ .proc_handler = &proc_dointvec,
66821+ },
66822+#endif
66823+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66824+ {
66825+ .procname = "tpe_invert",
66826+ .data = &grsec_enable_tpe_invert,
66827+ .maxlen = sizeof(int),
66828+ .mode = 0600,
66829+ .proc_handler = &proc_dointvec,
66830+ },
66831+#endif
66832+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66833+ {
66834+ .procname = "tpe_restrict_all",
66835+ .data = &grsec_enable_tpe_all,
66836+ .maxlen = sizeof(int),
66837+ .mode = 0600,
66838+ .proc_handler = &proc_dointvec,
66839+ },
66840+#endif
66841+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66842+ {
66843+ .procname = "socket_all",
66844+ .data = &grsec_enable_socket_all,
66845+ .maxlen = sizeof(int),
66846+ .mode = 0600,
66847+ .proc_handler = &proc_dointvec,
66848+ },
66849+ {
66850+ .procname = "socket_all_gid",
66851+ .data = &grsec_socket_all_gid,
66852+ .maxlen = sizeof(int),
66853+ .mode = 0600,
66854+ .proc_handler = &proc_dointvec,
66855+ },
66856+#endif
66857+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66858+ {
66859+ .procname = "socket_client",
66860+ .data = &grsec_enable_socket_client,
66861+ .maxlen = sizeof(int),
66862+ .mode = 0600,
66863+ .proc_handler = &proc_dointvec,
66864+ },
66865+ {
66866+ .procname = "socket_client_gid",
66867+ .data = &grsec_socket_client_gid,
66868+ .maxlen = sizeof(int),
66869+ .mode = 0600,
66870+ .proc_handler = &proc_dointvec,
66871+ },
66872+#endif
66873+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66874+ {
66875+ .procname = "socket_server",
66876+ .data = &grsec_enable_socket_server,
66877+ .maxlen = sizeof(int),
66878+ .mode = 0600,
66879+ .proc_handler = &proc_dointvec,
66880+ },
66881+ {
66882+ .procname = "socket_server_gid",
66883+ .data = &grsec_socket_server_gid,
66884+ .maxlen = sizeof(int),
66885+ .mode = 0600,
66886+ .proc_handler = &proc_dointvec,
66887+ },
66888+#endif
66889+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
66890+ {
66891+ .procname = "audit_group",
66892+ .data = &grsec_enable_group,
66893+ .maxlen = sizeof(int),
66894+ .mode = 0600,
66895+ .proc_handler = &proc_dointvec,
66896+ },
66897+ {
66898+ .procname = "audit_gid",
66899+ .data = &grsec_audit_gid,
66900+ .maxlen = sizeof(int),
66901+ .mode = 0600,
66902+ .proc_handler = &proc_dointvec,
66903+ },
66904+#endif
66905+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
66906+ {
66907+ .procname = "audit_chdir",
66908+ .data = &grsec_enable_chdir,
66909+ .maxlen = sizeof(int),
66910+ .mode = 0600,
66911+ .proc_handler = &proc_dointvec,
66912+ },
66913+#endif
66914+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66915+ {
66916+ .procname = "audit_mount",
66917+ .data = &grsec_enable_mount,
66918+ .maxlen = sizeof(int),
66919+ .mode = 0600,
66920+ .proc_handler = &proc_dointvec,
66921+ },
66922+#endif
66923+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66924+ {
66925+ .procname = "audit_textrel",
66926+ .data = &grsec_enable_audit_textrel,
66927+ .maxlen = sizeof(int),
66928+ .mode = 0600,
66929+ .proc_handler = &proc_dointvec,
66930+ },
66931+#endif
66932+#ifdef CONFIG_GRKERNSEC_DMESG
66933+ {
66934+ .procname = "dmesg",
66935+ .data = &grsec_enable_dmesg,
66936+ .maxlen = sizeof(int),
66937+ .mode = 0600,
66938+ .proc_handler = &proc_dointvec,
66939+ },
66940+#endif
66941+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
66942+ {
66943+ .procname = "chroot_findtask",
66944+ .data = &grsec_enable_chroot_findtask,
66945+ .maxlen = sizeof(int),
66946+ .mode = 0600,
66947+ .proc_handler = &proc_dointvec,
66948+ },
66949+#endif
66950+#ifdef CONFIG_GRKERNSEC_RESLOG
66951+ {
66952+ .procname = "resource_logging",
66953+ .data = &grsec_resource_logging,
66954+ .maxlen = sizeof(int),
66955+ .mode = 0600,
66956+ .proc_handler = &proc_dointvec,
66957+ },
66958+#endif
66959+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66960+ {
66961+ .procname = "audit_ptrace",
66962+ .data = &grsec_enable_audit_ptrace,
66963+ .maxlen = sizeof(int),
66964+ .mode = 0600,
66965+ .proc_handler = &proc_dointvec,
66966+ },
66967+#endif
66968+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
66969+ {
66970+ .procname = "harden_ptrace",
66971+ .data = &grsec_enable_harden_ptrace,
66972+ .maxlen = sizeof(int),
66973+ .mode = 0600,
66974+ .proc_handler = &proc_dointvec,
66975+ },
66976+#endif
66977+ {
66978+ .procname = "grsec_lock",
66979+ .data = &grsec_lock,
66980+ .maxlen = sizeof(int),
66981+ .mode = 0600,
66982+ .proc_handler = &proc_dointvec,
66983+ },
66984+#endif
66985+#ifdef CONFIG_GRKERNSEC_ROFS
66986+ {
66987+ .procname = "romount_protect",
66988+ .data = &grsec_enable_rofs,
66989+ .maxlen = sizeof(int),
66990+ .mode = 0600,
66991+ .proc_handler = &proc_dointvec_minmax,
66992+ .extra1 = &one,
66993+ .extra2 = &one,
66994+ },
66995+#endif
66996+ { }
66997+};
66998+#endif
66999diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
67000new file mode 100644
67001index 0000000..0dc13c3
67002--- /dev/null
67003+++ b/grsecurity/grsec_time.c
67004@@ -0,0 +1,16 @@
67005+#include <linux/kernel.h>
67006+#include <linux/sched.h>
67007+#include <linux/grinternal.h>
67008+#include <linux/module.h>
67009+
67010+void
67011+gr_log_timechange(void)
67012+{
67013+#ifdef CONFIG_GRKERNSEC_TIME
67014+ if (grsec_enable_time)
67015+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
67016+#endif
67017+ return;
67018+}
67019+
67020+EXPORT_SYMBOL(gr_log_timechange);
67021diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
67022new file mode 100644
67023index 0000000..ee57dcf
67024--- /dev/null
67025+++ b/grsecurity/grsec_tpe.c
67026@@ -0,0 +1,73 @@
67027+#include <linux/kernel.h>
67028+#include <linux/sched.h>
67029+#include <linux/file.h>
67030+#include <linux/fs.h>
67031+#include <linux/grinternal.h>
67032+
67033+extern int gr_acl_tpe_check(void);
67034+
67035+int
67036+gr_tpe_allow(const struct file *file)
67037+{
67038+#ifdef CONFIG_GRKERNSEC
67039+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
67040+ const struct cred *cred = current_cred();
67041+ char *msg = NULL;
67042+ char *msg2 = NULL;
67043+
67044+ // never restrict root
67045+ if (gr_is_global_root(cred->uid))
67046+ return 1;
67047+
67048+ if (grsec_enable_tpe) {
67049+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
67050+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
67051+ msg = "not being in trusted group";
67052+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
67053+ msg = "being in untrusted group";
67054+#else
67055+ if (in_group_p(grsec_tpe_gid))
67056+ msg = "being in untrusted group";
67057+#endif
67058+ }
67059+ if (!msg && gr_acl_tpe_check())
67060+ msg = "being in untrusted role";
67061+
67062+ // not in any affected group/role
67063+ if (!msg)
67064+ goto next_check;
67065+
67066+ if (gr_is_global_nonroot(inode->i_uid))
67067+ msg2 = "file in non-root-owned directory";
67068+ else if (inode->i_mode & S_IWOTH)
67069+ msg2 = "file in world-writable directory";
67070+ else if (inode->i_mode & S_IWGRP)
67071+ msg2 = "file in group-writable directory";
67072+
67073+ if (msg && msg2) {
67074+ char fullmsg[70] = {0};
67075+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
67076+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
67077+ return 0;
67078+ }
67079+ msg = NULL;
67080+next_check:
67081+#ifdef CONFIG_GRKERNSEC_TPE_ALL
67082+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
67083+ return 1;
67084+
67085+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
67086+ msg = "directory not owned by user";
67087+ else if (inode->i_mode & S_IWOTH)
67088+ msg = "file in world-writable directory";
67089+ else if (inode->i_mode & S_IWGRP)
67090+ msg = "file in group-writable directory";
67091+
67092+ if (msg) {
67093+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
67094+ return 0;
67095+ }
67096+#endif
67097+#endif
67098+ return 1;
67099+}
67100diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
67101new file mode 100644
67102index 0000000..9f7b1ac
67103--- /dev/null
67104+++ b/grsecurity/grsum.c
67105@@ -0,0 +1,61 @@
67106+#include <linux/err.h>
67107+#include <linux/kernel.h>
67108+#include <linux/sched.h>
67109+#include <linux/mm.h>
67110+#include <linux/scatterlist.h>
67111+#include <linux/crypto.h>
67112+#include <linux/gracl.h>
67113+
67114+
67115+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
67116+#error "crypto and sha256 must be built into the kernel"
67117+#endif
67118+
67119+int
67120+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
67121+{
67122+ char *p;
67123+ struct crypto_hash *tfm;
67124+ struct hash_desc desc;
67125+ struct scatterlist sg;
67126+ unsigned char temp_sum[GR_SHA_LEN];
67127+ volatile int retval = 0;
67128+ volatile int dummy = 0;
67129+ unsigned int i;
67130+
67131+ sg_init_table(&sg, 1);
67132+
67133+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
67134+ if (IS_ERR(tfm)) {
67135+ /* should never happen, since sha256 should be built in */
67136+ return 1;
67137+ }
67138+
67139+ desc.tfm = tfm;
67140+ desc.flags = 0;
67141+
67142+ crypto_hash_init(&desc);
67143+
67144+ p = salt;
67145+ sg_set_buf(&sg, p, GR_SALT_LEN);
67146+ crypto_hash_update(&desc, &sg, sg.length);
67147+
67148+ p = entry->pw;
67149+ sg_set_buf(&sg, p, strlen(p));
67150+
67151+ crypto_hash_update(&desc, &sg, sg.length);
67152+
67153+ crypto_hash_final(&desc, temp_sum);
67154+
67155+ memset(entry->pw, 0, GR_PW_LEN);
67156+
67157+ for (i = 0; i < GR_SHA_LEN; i++)
67158+ if (sum[i] != temp_sum[i])
67159+ retval = 1;
67160+ else
67161+ dummy = 1; // waste a cycle
67162+
67163+ crypto_free_hash(tfm);
67164+
67165+ return retval;
67166+}
67167diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
67168index 77ff547..181834f 100644
67169--- a/include/asm-generic/4level-fixup.h
67170+++ b/include/asm-generic/4level-fixup.h
67171@@ -13,8 +13,10 @@
67172 #define pmd_alloc(mm, pud, address) \
67173 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
67174 NULL: pmd_offset(pud, address))
67175+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
67176
67177 #define pud_alloc(mm, pgd, address) (pgd)
67178+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
67179 #define pud_offset(pgd, start) (pgd)
67180 #define pud_none(pud) 0
67181 #define pud_bad(pud) 0
67182diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
67183index b7babf0..04ad282 100644
67184--- a/include/asm-generic/atomic-long.h
67185+++ b/include/asm-generic/atomic-long.h
67186@@ -22,6 +22,12 @@
67187
67188 typedef atomic64_t atomic_long_t;
67189
67190+#ifdef CONFIG_PAX_REFCOUNT
67191+typedef atomic64_unchecked_t atomic_long_unchecked_t;
67192+#else
67193+typedef atomic64_t atomic_long_unchecked_t;
67194+#endif
67195+
67196 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
67197
67198 static inline long atomic_long_read(atomic_long_t *l)
67199@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67200 return (long)atomic64_read(v);
67201 }
67202
67203+#ifdef CONFIG_PAX_REFCOUNT
67204+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67205+{
67206+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67207+
67208+ return (long)atomic64_read_unchecked(v);
67209+}
67210+#endif
67211+
67212 static inline void atomic_long_set(atomic_long_t *l, long i)
67213 {
67214 atomic64_t *v = (atomic64_t *)l;
67215@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67216 atomic64_set(v, i);
67217 }
67218
67219+#ifdef CONFIG_PAX_REFCOUNT
67220+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67221+{
67222+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67223+
67224+ atomic64_set_unchecked(v, i);
67225+}
67226+#endif
67227+
67228 static inline void atomic_long_inc(atomic_long_t *l)
67229 {
67230 atomic64_t *v = (atomic64_t *)l;
67231@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67232 atomic64_inc(v);
67233 }
67234
67235+#ifdef CONFIG_PAX_REFCOUNT
67236+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67237+{
67238+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67239+
67240+ atomic64_inc_unchecked(v);
67241+}
67242+#endif
67243+
67244 static inline void atomic_long_dec(atomic_long_t *l)
67245 {
67246 atomic64_t *v = (atomic64_t *)l;
67247@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67248 atomic64_dec(v);
67249 }
67250
67251+#ifdef CONFIG_PAX_REFCOUNT
67252+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67253+{
67254+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67255+
67256+ atomic64_dec_unchecked(v);
67257+}
67258+#endif
67259+
67260 static inline void atomic_long_add(long i, atomic_long_t *l)
67261 {
67262 atomic64_t *v = (atomic64_t *)l;
67263@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67264 atomic64_add(i, v);
67265 }
67266
67267+#ifdef CONFIG_PAX_REFCOUNT
67268+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67269+{
67270+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67271+
67272+ atomic64_add_unchecked(i, v);
67273+}
67274+#endif
67275+
67276 static inline void atomic_long_sub(long i, atomic_long_t *l)
67277 {
67278 atomic64_t *v = (atomic64_t *)l;
67279@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67280 atomic64_sub(i, v);
67281 }
67282
67283+#ifdef CONFIG_PAX_REFCOUNT
67284+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67285+{
67286+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67287+
67288+ atomic64_sub_unchecked(i, v);
67289+}
67290+#endif
67291+
67292 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67293 {
67294 atomic64_t *v = (atomic64_t *)l;
67295@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67296 return (long)atomic64_add_return(i, v);
67297 }
67298
67299+#ifdef CONFIG_PAX_REFCOUNT
67300+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67301+{
67302+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67303+
67304+ return (long)atomic64_add_return_unchecked(i, v);
67305+}
67306+#endif
67307+
67308 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67309 {
67310 atomic64_t *v = (atomic64_t *)l;
67311@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67312 return (long)atomic64_inc_return(v);
67313 }
67314
67315+#ifdef CONFIG_PAX_REFCOUNT
67316+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67317+{
67318+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67319+
67320+ return (long)atomic64_inc_return_unchecked(v);
67321+}
67322+#endif
67323+
67324 static inline long atomic_long_dec_return(atomic_long_t *l)
67325 {
67326 atomic64_t *v = (atomic64_t *)l;
67327@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67328
67329 typedef atomic_t atomic_long_t;
67330
67331+#ifdef CONFIG_PAX_REFCOUNT
67332+typedef atomic_unchecked_t atomic_long_unchecked_t;
67333+#else
67334+typedef atomic_t atomic_long_unchecked_t;
67335+#endif
67336+
67337 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
67338 static inline long atomic_long_read(atomic_long_t *l)
67339 {
67340@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67341 return (long)atomic_read(v);
67342 }
67343
67344+#ifdef CONFIG_PAX_REFCOUNT
67345+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67346+{
67347+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67348+
67349+ return (long)atomic_read_unchecked(v);
67350+}
67351+#endif
67352+
67353 static inline void atomic_long_set(atomic_long_t *l, long i)
67354 {
67355 atomic_t *v = (atomic_t *)l;
67356@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67357 atomic_set(v, i);
67358 }
67359
67360+#ifdef CONFIG_PAX_REFCOUNT
67361+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67362+{
67363+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67364+
67365+ atomic_set_unchecked(v, i);
67366+}
67367+#endif
67368+
67369 static inline void atomic_long_inc(atomic_long_t *l)
67370 {
67371 atomic_t *v = (atomic_t *)l;
67372@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67373 atomic_inc(v);
67374 }
67375
67376+#ifdef CONFIG_PAX_REFCOUNT
67377+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67378+{
67379+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67380+
67381+ atomic_inc_unchecked(v);
67382+}
67383+#endif
67384+
67385 static inline void atomic_long_dec(atomic_long_t *l)
67386 {
67387 atomic_t *v = (atomic_t *)l;
67388@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67389 atomic_dec(v);
67390 }
67391
67392+#ifdef CONFIG_PAX_REFCOUNT
67393+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67394+{
67395+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67396+
67397+ atomic_dec_unchecked(v);
67398+}
67399+#endif
67400+
67401 static inline void atomic_long_add(long i, atomic_long_t *l)
67402 {
67403 atomic_t *v = (atomic_t *)l;
67404@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67405 atomic_add(i, v);
67406 }
67407
67408+#ifdef CONFIG_PAX_REFCOUNT
67409+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67410+{
67411+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67412+
67413+ atomic_add_unchecked(i, v);
67414+}
67415+#endif
67416+
67417 static inline void atomic_long_sub(long i, atomic_long_t *l)
67418 {
67419 atomic_t *v = (atomic_t *)l;
67420@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67421 atomic_sub(i, v);
67422 }
67423
67424+#ifdef CONFIG_PAX_REFCOUNT
67425+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67426+{
67427+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67428+
67429+ atomic_sub_unchecked(i, v);
67430+}
67431+#endif
67432+
67433 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67434 {
67435 atomic_t *v = (atomic_t *)l;
67436@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67437 return (long)atomic_add_return(i, v);
67438 }
67439
67440+#ifdef CONFIG_PAX_REFCOUNT
67441+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67442+{
67443+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67444+
67445+ return (long)atomic_add_return_unchecked(i, v);
67446+}
67447+
67448+#endif
67449+
67450 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67451 {
67452 atomic_t *v = (atomic_t *)l;
67453@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67454 return (long)atomic_inc_return(v);
67455 }
67456
67457+#ifdef CONFIG_PAX_REFCOUNT
67458+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67459+{
67460+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67461+
67462+ return (long)atomic_inc_return_unchecked(v);
67463+}
67464+#endif
67465+
67466 static inline long atomic_long_dec_return(atomic_long_t *l)
67467 {
67468 atomic_t *v = (atomic_t *)l;
67469@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67470
67471 #endif /* BITS_PER_LONG == 64 */
67472
67473+#ifdef CONFIG_PAX_REFCOUNT
67474+static inline void pax_refcount_needs_these_functions(void)
67475+{
67476+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
67477+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
67478+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
67479+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
67480+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
67481+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
67482+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
67483+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
67484+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
67485+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
67486+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
67487+#ifdef CONFIG_X86
67488+ atomic_clear_mask_unchecked(0, NULL);
67489+ atomic_set_mask_unchecked(0, NULL);
67490+#endif
67491+
67492+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
67493+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
67494+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
67495+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
67496+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
67497+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
67498+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
67499+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
67500+}
67501+#else
67502+#define atomic_read_unchecked(v) atomic_read(v)
67503+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
67504+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
67505+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
67506+#define atomic_inc_unchecked(v) atomic_inc(v)
67507+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
67508+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
67509+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
67510+#define atomic_dec_unchecked(v) atomic_dec(v)
67511+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
67512+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
67513+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
67514+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
67515+
67516+#define atomic_long_read_unchecked(v) atomic_long_read(v)
67517+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
67518+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
67519+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
67520+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
67521+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
67522+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
67523+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
67524+#endif
67525+
67526 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
67527diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
67528index 33bd2de..f31bff97 100644
67529--- a/include/asm-generic/atomic.h
67530+++ b/include/asm-generic/atomic.h
67531@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
67532 * Atomically clears the bits set in @mask from @v
67533 */
67534 #ifndef atomic_clear_mask
67535-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
67536+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
67537 {
67538 unsigned long flags;
67539
67540diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
67541index b18ce4f..2ee2843 100644
67542--- a/include/asm-generic/atomic64.h
67543+++ b/include/asm-generic/atomic64.h
67544@@ -16,6 +16,8 @@ typedef struct {
67545 long long counter;
67546 } atomic64_t;
67547
67548+typedef atomic64_t atomic64_unchecked_t;
67549+
67550 #define ATOMIC64_INIT(i) { (i) }
67551
67552 extern long long atomic64_read(const atomic64_t *v);
67553@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
67554 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
67555 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
67556
67557+#define atomic64_read_unchecked(v) atomic64_read(v)
67558+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
67559+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
67560+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
67561+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
67562+#define atomic64_inc_unchecked(v) atomic64_inc(v)
67563+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
67564+#define atomic64_dec_unchecked(v) atomic64_dec(v)
67565+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
67566+
67567 #endif /* _ASM_GENERIC_ATOMIC64_H */
67568diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
67569index 1bfcfe5..e04c5c9 100644
67570--- a/include/asm-generic/cache.h
67571+++ b/include/asm-generic/cache.h
67572@@ -6,7 +6,7 @@
67573 * cache lines need to provide their own cache.h.
67574 */
67575
67576-#define L1_CACHE_SHIFT 5
67577-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67578+#define L1_CACHE_SHIFT 5UL
67579+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67580
67581 #endif /* __ASM_GENERIC_CACHE_H */
67582diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67583index 0d68a1e..b74a761 100644
67584--- a/include/asm-generic/emergency-restart.h
67585+++ b/include/asm-generic/emergency-restart.h
67586@@ -1,7 +1,7 @@
67587 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67588 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67589
67590-static inline void machine_emergency_restart(void)
67591+static inline __noreturn void machine_emergency_restart(void)
67592 {
67593 machine_restart(NULL);
67594 }
67595diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
67596index 90f99c7..00ce236 100644
67597--- a/include/asm-generic/kmap_types.h
67598+++ b/include/asm-generic/kmap_types.h
67599@@ -2,9 +2,9 @@
67600 #define _ASM_GENERIC_KMAP_TYPES_H
67601
67602 #ifdef __WITH_KM_FENCE
67603-# define KM_TYPE_NR 41
67604+# define KM_TYPE_NR 42
67605 #else
67606-# define KM_TYPE_NR 20
67607+# define KM_TYPE_NR 21
67608 #endif
67609
67610 #endif
67611diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
67612index 9ceb03b..62b0b8f 100644
67613--- a/include/asm-generic/local.h
67614+++ b/include/asm-generic/local.h
67615@@ -23,24 +23,37 @@ typedef struct
67616 atomic_long_t a;
67617 } local_t;
67618
67619+typedef struct {
67620+ atomic_long_unchecked_t a;
67621+} local_unchecked_t;
67622+
67623 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
67624
67625 #define local_read(l) atomic_long_read(&(l)->a)
67626+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
67627 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
67628+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
67629 #define local_inc(l) atomic_long_inc(&(l)->a)
67630+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
67631 #define local_dec(l) atomic_long_dec(&(l)->a)
67632+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
67633 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
67634+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
67635 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
67636+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
67637
67638 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
67639 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
67640 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
67641 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
67642 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
67643+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
67644 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
67645 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
67646+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
67647
67648 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67649+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67650 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
67651 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
67652 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
67653diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
67654index 725612b..9cc513a 100644
67655--- a/include/asm-generic/pgtable-nopmd.h
67656+++ b/include/asm-generic/pgtable-nopmd.h
67657@@ -1,14 +1,19 @@
67658 #ifndef _PGTABLE_NOPMD_H
67659 #define _PGTABLE_NOPMD_H
67660
67661-#ifndef __ASSEMBLY__
67662-
67663 #include <asm-generic/pgtable-nopud.h>
67664
67665-struct mm_struct;
67666-
67667 #define __PAGETABLE_PMD_FOLDED
67668
67669+#define PMD_SHIFT PUD_SHIFT
67670+#define PTRS_PER_PMD 1
67671+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
67672+#define PMD_MASK (~(PMD_SIZE-1))
67673+
67674+#ifndef __ASSEMBLY__
67675+
67676+struct mm_struct;
67677+
67678 /*
67679 * Having the pmd type consist of a pud gets the size right, and allows
67680 * us to conceptually access the pud entry that this pmd is folded into
67681@@ -16,11 +21,6 @@ struct mm_struct;
67682 */
67683 typedef struct { pud_t pud; } pmd_t;
67684
67685-#define PMD_SHIFT PUD_SHIFT
67686-#define PTRS_PER_PMD 1
67687-#define PMD_SIZE (1UL << PMD_SHIFT)
67688-#define PMD_MASK (~(PMD_SIZE-1))
67689-
67690 /*
67691 * The "pud_xxx()" functions here are trivial for a folded two-level
67692 * setup: the pmd is never bad, and a pmd always exists (as it's folded
67693diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
67694index 810431d..0ec4804f 100644
67695--- a/include/asm-generic/pgtable-nopud.h
67696+++ b/include/asm-generic/pgtable-nopud.h
67697@@ -1,10 +1,15 @@
67698 #ifndef _PGTABLE_NOPUD_H
67699 #define _PGTABLE_NOPUD_H
67700
67701-#ifndef __ASSEMBLY__
67702-
67703 #define __PAGETABLE_PUD_FOLDED
67704
67705+#define PUD_SHIFT PGDIR_SHIFT
67706+#define PTRS_PER_PUD 1
67707+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
67708+#define PUD_MASK (~(PUD_SIZE-1))
67709+
67710+#ifndef __ASSEMBLY__
67711+
67712 /*
67713 * Having the pud type consist of a pgd gets the size right, and allows
67714 * us to conceptually access the pgd entry that this pud is folded into
67715@@ -12,11 +17,6 @@
67716 */
67717 typedef struct { pgd_t pgd; } pud_t;
67718
67719-#define PUD_SHIFT PGDIR_SHIFT
67720-#define PTRS_PER_PUD 1
67721-#define PUD_SIZE (1UL << PUD_SHIFT)
67722-#define PUD_MASK (~(PUD_SIZE-1))
67723-
67724 /*
67725 * The "pgd_xxx()" functions here are trivial for a folded two-level
67726 * setup: the pud is never bad, and a pud always exists (as it's folded
67727@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
67728 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
67729
67730 #define pgd_populate(mm, pgd, pud) do { } while (0)
67731+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
67732 /*
67733 * (puds are folded into pgds so this doesn't get actually called,
67734 * but the define is needed for a generic inline function.)
67735diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
67736index a59ff51..2594a70 100644
67737--- a/include/asm-generic/pgtable.h
67738+++ b/include/asm-generic/pgtable.h
67739@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
67740 }
67741 #endif /* CONFIG_NUMA_BALANCING */
67742
67743+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
67744+static inline unsigned long pax_open_kernel(void) { return 0; }
67745+#endif
67746+
67747+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
67748+static inline unsigned long pax_close_kernel(void) { return 0; }
67749+#endif
67750+
67751 #endif /* CONFIG_MMU */
67752
67753 #endif /* !__ASSEMBLY__ */
67754diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
67755index eb58d2d..df131bf 100644
67756--- a/include/asm-generic/vmlinux.lds.h
67757+++ b/include/asm-generic/vmlinux.lds.h
67758@@ -239,6 +239,7 @@
67759 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
67760 VMLINUX_SYMBOL(__start_rodata) = .; \
67761 *(.rodata) *(.rodata.*) \
67762+ *(.data..read_only) \
67763 *(__vermagic) /* Kernel version magic */ \
67764 . = ALIGN(8); \
67765 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
67766@@ -749,17 +750,18 @@
67767 * section in the linker script will go there too. @phdr should have
67768 * a leading colon.
67769 *
67770- * Note that this macros defines __per_cpu_load as an absolute symbol.
67771+ * Note that this macros defines per_cpu_load as an absolute symbol.
67772 * If there is no need to put the percpu section at a predetermined
67773 * address, use PERCPU_SECTION.
67774 */
67775 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
67776- VMLINUX_SYMBOL(__per_cpu_load) = .; \
67777- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
67778+ per_cpu_load = .; \
67779+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
67780 - LOAD_OFFSET) { \
67781+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
67782 PERCPU_INPUT(cacheline) \
67783 } phdr \
67784- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
67785+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
67786
67787 /**
67788 * PERCPU_SECTION - define output section for percpu area, simple version
67789diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
67790index 418d270..bfd2794 100644
67791--- a/include/crypto/algapi.h
67792+++ b/include/crypto/algapi.h
67793@@ -34,7 +34,7 @@ struct crypto_type {
67794 unsigned int maskclear;
67795 unsigned int maskset;
67796 unsigned int tfmsize;
67797-};
67798+} __do_const;
67799
67800 struct crypto_instance {
67801 struct crypto_alg alg;
67802diff --git a/include/drm/drmP.h b/include/drm/drmP.h
67803index 63d17ee..716de2b 100644
67804--- a/include/drm/drmP.h
67805+++ b/include/drm/drmP.h
67806@@ -72,6 +72,7 @@
67807 #include <linux/workqueue.h>
67808 #include <linux/poll.h>
67809 #include <asm/pgalloc.h>
67810+#include <asm/local.h>
67811 #include <drm/drm.h>
67812 #include <drm/drm_sarea.h>
67813
67814@@ -296,10 +297,12 @@ do { \
67815 * \param cmd command.
67816 * \param arg argument.
67817 */
67818-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
67819+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
67820+ struct drm_file *file_priv);
67821+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
67822 struct drm_file *file_priv);
67823
67824-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67825+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
67826 unsigned long arg);
67827
67828 #define DRM_IOCTL_NR(n) _IOC_NR(n)
67829@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67830 struct drm_ioctl_desc {
67831 unsigned int cmd;
67832 int flags;
67833- drm_ioctl_t *func;
67834+ drm_ioctl_t func;
67835 unsigned int cmd_drv;
67836 const char *name;
67837-};
67838+} __do_const;
67839
67840 /**
67841 * Creates a driver or general drm_ioctl_desc array entry for the given
67842@@ -1015,7 +1018,7 @@ struct drm_info_list {
67843 int (*show)(struct seq_file*, void*); /** show callback */
67844 u32 driver_features; /**< Required driver features for this entry */
67845 void *data;
67846-};
67847+} __do_const;
67848
67849 /**
67850 * debugfs node structure. This structure represents a debugfs file.
67851@@ -1088,7 +1091,7 @@ struct drm_device {
67852
67853 /** \name Usage Counters */
67854 /*@{ */
67855- int open_count; /**< Outstanding files open */
67856+ local_t open_count; /**< Outstanding files open */
67857 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
67858 atomic_t vma_count; /**< Outstanding vma areas open */
67859 int buf_use; /**< Buffers in use -- cannot alloc */
67860@@ -1099,7 +1102,7 @@ struct drm_device {
67861 /*@{ */
67862 unsigned long counters;
67863 enum drm_stat_type types[15];
67864- atomic_t counts[15];
67865+ atomic_unchecked_t counts[15];
67866 /*@} */
67867
67868 struct list_head filelist;
67869diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
67870index f43d556..94d9343 100644
67871--- a/include/drm/drm_crtc_helper.h
67872+++ b/include/drm/drm_crtc_helper.h
67873@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
67874 struct drm_connector *connector);
67875 /* disable encoder when not in use - more explicit than dpms off */
67876 void (*disable)(struct drm_encoder *encoder);
67877-};
67878+} __no_const;
67879
67880 /**
67881 * drm_connector_helper_funcs - helper operations for connectors
67882diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
67883index 72dcbe8..8db58d7 100644
67884--- a/include/drm/ttm/ttm_memory.h
67885+++ b/include/drm/ttm/ttm_memory.h
67886@@ -48,7 +48,7 @@
67887
67888 struct ttm_mem_shrink {
67889 int (*do_shrink) (struct ttm_mem_shrink *);
67890-};
67891+} __no_const;
67892
67893 /**
67894 * struct ttm_mem_global - Global memory accounting structure.
67895diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
67896index 4b840e8..155d235 100644
67897--- a/include/keys/asymmetric-subtype.h
67898+++ b/include/keys/asymmetric-subtype.h
67899@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
67900 /* Verify the signature on a key of this subtype (optional) */
67901 int (*verify_signature)(const struct key *key,
67902 const struct public_key_signature *sig);
67903-};
67904+} __do_const;
67905
67906 /**
67907 * asymmetric_key_subtype - Get the subtype from an asymmetric key
67908diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
67909index c1da539..1dcec55 100644
67910--- a/include/linux/atmdev.h
67911+++ b/include/linux/atmdev.h
67912@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
67913 #endif
67914
67915 struct k_atm_aal_stats {
67916-#define __HANDLE_ITEM(i) atomic_t i
67917+#define __HANDLE_ITEM(i) atomic_unchecked_t i
67918 __AAL_STAT_ITEMS
67919 #undef __HANDLE_ITEM
67920 };
67921@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
67922 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
67923 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
67924 struct module *owner;
67925-};
67926+} __do_const ;
67927
67928 struct atmphy_ops {
67929 int (*start)(struct atm_dev *dev);
67930diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
67931index 70cf138..cabb82e 100644
67932--- a/include/linux/binfmts.h
67933+++ b/include/linux/binfmts.h
67934@@ -73,8 +73,9 @@ struct linux_binfmt {
67935 int (*load_binary)(struct linux_binprm *);
67936 int (*load_shlib)(struct file *);
67937 int (*core_dump)(struct coredump_params *cprm);
67938+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
67939 unsigned long min_coredump; /* minimal dump size */
67940-};
67941+} __do_const;
67942
67943 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
67944
67945diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
67946index 2fdb4a4..54aad7e 100644
67947--- a/include/linux/blkdev.h
67948+++ b/include/linux/blkdev.h
67949@@ -1526,7 +1526,7 @@ struct block_device_operations {
67950 /* this callback is with swap_lock and sometimes page table lock held */
67951 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
67952 struct module *owner;
67953-};
67954+} __do_const;
67955
67956 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
67957 unsigned long);
67958diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
67959index 7c2e030..b72475d 100644
67960--- a/include/linux/blktrace_api.h
67961+++ b/include/linux/blktrace_api.h
67962@@ -23,7 +23,7 @@ struct blk_trace {
67963 struct dentry *dir;
67964 struct dentry *dropped_file;
67965 struct dentry *msg_file;
67966- atomic_t dropped;
67967+ atomic_unchecked_t dropped;
67968 };
67969
67970 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
67971diff --git a/include/linux/cache.h b/include/linux/cache.h
67972index 4c57065..4307975 100644
67973--- a/include/linux/cache.h
67974+++ b/include/linux/cache.h
67975@@ -16,6 +16,10 @@
67976 #define __read_mostly
67977 #endif
67978
67979+#ifndef __read_only
67980+#define __read_only __read_mostly
67981+#endif
67982+
67983 #ifndef ____cacheline_aligned
67984 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
67985 #endif
67986diff --git a/include/linux/capability.h b/include/linux/capability.h
67987index d9a4f7f4..19f77d6 100644
67988--- a/include/linux/capability.h
67989+++ b/include/linux/capability.h
67990@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
67991 extern bool nsown_capable(int cap);
67992 extern bool inode_capable(const struct inode *inode, int cap);
67993 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
67994+extern bool capable_nolog(int cap);
67995+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
67996+extern bool inode_capable_nolog(const struct inode *inode, int cap);
67997
67998 /* audit system wants to get cap info from files as well */
67999 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
68000
68001+extern int is_privileged_binary(const struct dentry *dentry);
68002+
68003 #endif /* !_LINUX_CAPABILITY_H */
68004diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
68005index 8609d57..86e4d79 100644
68006--- a/include/linux/cdrom.h
68007+++ b/include/linux/cdrom.h
68008@@ -87,7 +87,6 @@ struct cdrom_device_ops {
68009
68010 /* driver specifications */
68011 const int capability; /* capability flags */
68012- int n_minors; /* number of active minor devices */
68013 /* handle uniform packets for scsi type devices (scsi,atapi) */
68014 int (*generic_packet) (struct cdrom_device_info *,
68015 struct packet_command *);
68016diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
68017index 4ce9056..86caac6 100644
68018--- a/include/linux/cleancache.h
68019+++ b/include/linux/cleancache.h
68020@@ -31,7 +31,7 @@ struct cleancache_ops {
68021 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
68022 void (*invalidate_inode)(int, struct cleancache_filekey);
68023 void (*invalidate_fs)(int);
68024-};
68025+} __no_const;
68026
68027 extern struct cleancache_ops *
68028 cleancache_register_ops(struct cleancache_ops *ops);
68029diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
68030index 1186098..f87e53d 100644
68031--- a/include/linux/clk-provider.h
68032+++ b/include/linux/clk-provider.h
68033@@ -132,6 +132,7 @@ struct clk_ops {
68034 unsigned long);
68035 void (*init)(struct clk_hw *hw);
68036 };
68037+typedef struct clk_ops __no_const clk_ops_no_const;
68038
68039 /**
68040 * struct clk_init_data - holds init data that's common to all clocks and is
68041diff --git a/include/linux/compat.h b/include/linux/compat.h
68042index 7f0c1dd..b5729c6 100644
68043--- a/include/linux/compat.h
68044+++ b/include/linux/compat.h
68045@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
68046 compat_size_t __user *len_ptr);
68047
68048 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
68049-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
68050+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
68051 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
68052 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
68053 compat_ssize_t msgsz, int msgflg);
68054@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
68055 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
68056 compat_ulong_t addr, compat_ulong_t data);
68057 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68058- compat_long_t addr, compat_long_t data);
68059+ compat_ulong_t addr, compat_ulong_t data);
68060
68061 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
68062 /*
68063diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
68064index 842de22..7f3a41f 100644
68065--- a/include/linux/compiler-gcc4.h
68066+++ b/include/linux/compiler-gcc4.h
68067@@ -39,9 +39,29 @@
68068 # define __compiletime_warning(message) __attribute__((warning(message)))
68069 # define __compiletime_error(message) __attribute__((error(message)))
68070 #endif /* __CHECKER__ */
68071+
68072+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
68073+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
68074+#define __bos0(ptr) __bos((ptr), 0)
68075+#define __bos1(ptr) __bos((ptr), 1)
68076 #endif /* GCC_VERSION >= 40300 */
68077
68078 #if GCC_VERSION >= 40500
68079+
68080+#ifdef CONSTIFY_PLUGIN
68081+#define __no_const __attribute__((no_const))
68082+#define __do_const __attribute__((do_const))
68083+#endif
68084+
68085+#ifdef SIZE_OVERFLOW_PLUGIN
68086+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
68087+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
68088+#endif
68089+
68090+#ifdef LATENT_ENTROPY_PLUGIN
68091+#define __latent_entropy __attribute__((latent_entropy))
68092+#endif
68093+
68094 /*
68095 * Mark a position in code as unreachable. This can be used to
68096 * suppress control flow warnings after asm blocks that transfer
68097diff --git a/include/linux/compiler.h b/include/linux/compiler.h
68098index 92669cd..1771a15 100644
68099--- a/include/linux/compiler.h
68100+++ b/include/linux/compiler.h
68101@@ -5,11 +5,14 @@
68102
68103 #ifdef __CHECKER__
68104 # define __user __attribute__((noderef, address_space(1)))
68105+# define __force_user __force __user
68106 # define __kernel __attribute__((address_space(0)))
68107+# define __force_kernel __force __kernel
68108 # define __safe __attribute__((safe))
68109 # define __force __attribute__((force))
68110 # define __nocast __attribute__((nocast))
68111 # define __iomem __attribute__((noderef, address_space(2)))
68112+# define __force_iomem __force __iomem
68113 # define __must_hold(x) __attribute__((context(x,1,1)))
68114 # define __acquires(x) __attribute__((context(x,0,1)))
68115 # define __releases(x) __attribute__((context(x,1,0)))
68116@@ -17,20 +20,37 @@
68117 # define __release(x) __context__(x,-1)
68118 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
68119 # define __percpu __attribute__((noderef, address_space(3)))
68120+# define __force_percpu __force __percpu
68121 #ifdef CONFIG_SPARSE_RCU_POINTER
68122 # define __rcu __attribute__((noderef, address_space(4)))
68123+# define __force_rcu __force __rcu
68124 #else
68125 # define __rcu
68126+# define __force_rcu
68127 #endif
68128 extern void __chk_user_ptr(const volatile void __user *);
68129 extern void __chk_io_ptr(const volatile void __iomem *);
68130 #else
68131-# define __user
68132-# define __kernel
68133+# ifdef CHECKER_PLUGIN
68134+//# define __user
68135+//# define __force_user
68136+//# define __kernel
68137+//# define __force_kernel
68138+# else
68139+# ifdef STRUCTLEAK_PLUGIN
68140+# define __user __attribute__((user))
68141+# else
68142+# define __user
68143+# endif
68144+# define __force_user
68145+# define __kernel
68146+# define __force_kernel
68147+# endif
68148 # define __safe
68149 # define __force
68150 # define __nocast
68151 # define __iomem
68152+# define __force_iomem
68153 # define __chk_user_ptr(x) (void)0
68154 # define __chk_io_ptr(x) (void)0
68155 # define __builtin_warning(x, y...) (1)
68156@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
68157 # define __release(x) (void)0
68158 # define __cond_lock(x,c) (c)
68159 # define __percpu
68160+# define __force_percpu
68161 # define __rcu
68162+# define __force_rcu
68163 #endif
68164
68165 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
68166@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68167 # define __attribute_const__ /* unimplemented */
68168 #endif
68169
68170+#ifndef __no_const
68171+# define __no_const
68172+#endif
68173+
68174+#ifndef __do_const
68175+# define __do_const
68176+#endif
68177+
68178+#ifndef __size_overflow
68179+# define __size_overflow(...)
68180+#endif
68181+
68182+#ifndef __intentional_overflow
68183+# define __intentional_overflow(...)
68184+#endif
68185+
68186+#ifndef __latent_entropy
68187+# define __latent_entropy
68188+#endif
68189+
68190 /*
68191 * Tell gcc if a function is cold. The compiler will assume any path
68192 * directly leading to the call is unlikely.
68193@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68194 #define __cold
68195 #endif
68196
68197+#ifndef __alloc_size
68198+#define __alloc_size(...)
68199+#endif
68200+
68201+#ifndef __bos
68202+#define __bos(ptr, arg)
68203+#endif
68204+
68205+#ifndef __bos0
68206+#define __bos0(ptr)
68207+#endif
68208+
68209+#ifndef __bos1
68210+#define __bos1(ptr)
68211+#endif
68212+
68213 /* Simple shorthand for a section definition */
68214 #ifndef __section
68215 # define __section(S) __attribute__ ((__section__(#S)))
68216@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68217 * use is to mediate communication between process-level code and irq/NMI
68218 * handlers, all running on the same CPU.
68219 */
68220-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68221+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
68222+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
68223
68224 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
68225 #ifdef CONFIG_KPROBES
68226diff --git a/include/linux/completion.h b/include/linux/completion.h
68227index 33f0280..35c6568 100644
68228--- a/include/linux/completion.h
68229+++ b/include/linux/completion.h
68230@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
68231 extern void wait_for_completion(struct completion *);
68232 extern void wait_for_completion_io(struct completion *);
68233 extern int wait_for_completion_interruptible(struct completion *x);
68234-extern int wait_for_completion_killable(struct completion *x);
68235+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
68236 extern unsigned long wait_for_completion_timeout(struct completion *x,
68237 unsigned long timeout);
68238 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
68239 unsigned long timeout);
68240 extern long wait_for_completion_interruptible_timeout(
68241- struct completion *x, unsigned long timeout);
68242+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68243 extern long wait_for_completion_killable_timeout(
68244- struct completion *x, unsigned long timeout);
68245+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68246 extern bool try_wait_for_completion(struct completion *x);
68247 extern bool completion_done(struct completion *x);
68248
68249diff --git a/include/linux/configfs.h b/include/linux/configfs.h
68250index 34025df..d94bbbc 100644
68251--- a/include/linux/configfs.h
68252+++ b/include/linux/configfs.h
68253@@ -125,7 +125,7 @@ struct configfs_attribute {
68254 const char *ca_name;
68255 struct module *ca_owner;
68256 umode_t ca_mode;
68257-};
68258+} __do_const;
68259
68260 /*
68261 * Users often need to create attribute structures for their configurable
68262diff --git a/include/linux/cpu.h b/include/linux/cpu.h
68263index 9f3c7e8..a18c7b6 100644
68264--- a/include/linux/cpu.h
68265+++ b/include/linux/cpu.h
68266@@ -115,7 +115,7 @@ enum {
68267 /* Need to know about CPUs going up/down? */
68268 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
68269 #define cpu_notifier(fn, pri) { \
68270- static struct notifier_block fn##_nb __cpuinitdata = \
68271+ static struct notifier_block fn##_nb = \
68272 { .notifier_call = fn, .priority = pri }; \
68273 register_cpu_notifier(&fn##_nb); \
68274 }
68275diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
68276index 037d36a..ca5fe6e 100644
68277--- a/include/linux/cpufreq.h
68278+++ b/include/linux/cpufreq.h
68279@@ -262,7 +262,7 @@ struct cpufreq_driver {
68280 int (*suspend) (struct cpufreq_policy *policy);
68281 int (*resume) (struct cpufreq_policy *policy);
68282 struct freq_attr **attr;
68283-};
68284+} __do_const;
68285
68286 /* flags */
68287
68288@@ -321,6 +321,7 @@ struct global_attr {
68289 ssize_t (*store)(struct kobject *a, struct attribute *b,
68290 const char *c, size_t count);
68291 };
68292+typedef struct global_attr __no_const global_attr_no_const;
68293
68294 #define define_one_global_ro(_name) \
68295 static struct global_attr _name = \
68296diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
68297index 8f04062..900239a 100644
68298--- a/include/linux/cpuidle.h
68299+++ b/include/linux/cpuidle.h
68300@@ -52,7 +52,8 @@ struct cpuidle_state {
68301 int index);
68302
68303 int (*enter_dead) (struct cpuidle_device *dev, int index);
68304-};
68305+} __do_const;
68306+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
68307
68308 /* Idle State Flags */
68309 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
68310@@ -191,7 +192,7 @@ struct cpuidle_governor {
68311 void (*reflect) (struct cpuidle_device *dev, int index);
68312
68313 struct module *owner;
68314-};
68315+} __do_const;
68316
68317 #ifdef CONFIG_CPU_IDLE
68318
68319diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
68320index d08e4d2..95fad61 100644
68321--- a/include/linux/cpumask.h
68322+++ b/include/linux/cpumask.h
68323@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68324 }
68325
68326 /* Valid inputs for n are -1 and 0. */
68327-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68328+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68329 {
68330 return n+1;
68331 }
68332
68333-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68334+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68335 {
68336 return n+1;
68337 }
68338
68339-static inline unsigned int cpumask_next_and(int n,
68340+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
68341 const struct cpumask *srcp,
68342 const struct cpumask *andp)
68343 {
68344@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68345 *
68346 * Returns >= nr_cpu_ids if no further cpus set.
68347 */
68348-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68349+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68350 {
68351 /* -1 is a legal arg here. */
68352 if (n != -1)
68353@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68354 *
68355 * Returns >= nr_cpu_ids if no further cpus unset.
68356 */
68357-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68358+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68359 {
68360 /* -1 is a legal arg here. */
68361 if (n != -1)
68362@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68363 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
68364 }
68365
68366-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
68367+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
68368 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
68369
68370 /**
68371diff --git a/include/linux/cred.h b/include/linux/cred.h
68372index 04421e8..6bce4ef 100644
68373--- a/include/linux/cred.h
68374+++ b/include/linux/cred.h
68375@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
68376 static inline void validate_process_creds(void)
68377 {
68378 }
68379+static inline void validate_task_creds(struct task_struct *task)
68380+{
68381+}
68382 #endif
68383
68384 /**
68385diff --git a/include/linux/crypto.h b/include/linux/crypto.h
68386index b92eadf..b4ecdc1 100644
68387--- a/include/linux/crypto.h
68388+++ b/include/linux/crypto.h
68389@@ -373,7 +373,7 @@ struct cipher_tfm {
68390 const u8 *key, unsigned int keylen);
68391 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68392 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68393-};
68394+} __no_const;
68395
68396 struct hash_tfm {
68397 int (*init)(struct hash_desc *desc);
68398@@ -394,13 +394,13 @@ struct compress_tfm {
68399 int (*cot_decompress)(struct crypto_tfm *tfm,
68400 const u8 *src, unsigned int slen,
68401 u8 *dst, unsigned int *dlen);
68402-};
68403+} __no_const;
68404
68405 struct rng_tfm {
68406 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
68407 unsigned int dlen);
68408 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
68409-};
68410+} __no_const;
68411
68412 #define crt_ablkcipher crt_u.ablkcipher
68413 #define crt_aead crt_u.aead
68414diff --git a/include/linux/ctype.h b/include/linux/ctype.h
68415index 653589e..4ef254a 100644
68416--- a/include/linux/ctype.h
68417+++ b/include/linux/ctype.h
68418@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
68419 * Fast implementation of tolower() for internal usage. Do not use in your
68420 * code.
68421 */
68422-static inline char _tolower(const char c)
68423+static inline unsigned char _tolower(const unsigned char c)
68424 {
68425 return c | 0x20;
68426 }
68427diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
68428index 7925bf0..d5143d2 100644
68429--- a/include/linux/decompress/mm.h
68430+++ b/include/linux/decompress/mm.h
68431@@ -77,7 +77,7 @@ static void free(void *where)
68432 * warnings when not needed (indeed large_malloc / large_free are not
68433 * needed by inflate */
68434
68435-#define malloc(a) kmalloc(a, GFP_KERNEL)
68436+#define malloc(a) kmalloc((a), GFP_KERNEL)
68437 #define free(a) kfree(a)
68438
68439 #define large_malloc(a) vmalloc(a)
68440diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
68441index fe8c447..bdc1f33 100644
68442--- a/include/linux/devfreq.h
68443+++ b/include/linux/devfreq.h
68444@@ -114,7 +114,7 @@ struct devfreq_governor {
68445 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
68446 int (*event_handler)(struct devfreq *devfreq,
68447 unsigned int event, void *data);
68448-};
68449+} __do_const;
68450
68451 /**
68452 * struct devfreq - Device devfreq structure
68453diff --git a/include/linux/device.h b/include/linux/device.h
68454index c0a1261..dba7569 100644
68455--- a/include/linux/device.h
68456+++ b/include/linux/device.h
68457@@ -290,7 +290,7 @@ struct subsys_interface {
68458 struct list_head node;
68459 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
68460 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68461-};
68462+} __do_const;
68463
68464 int subsys_interface_register(struct subsys_interface *sif);
68465 void subsys_interface_unregister(struct subsys_interface *sif);
68466@@ -473,7 +473,7 @@ struct device_type {
68467 void (*release)(struct device *dev);
68468
68469 const struct dev_pm_ops *pm;
68470-};
68471+} __do_const;
68472
68473 /* interface for exporting device attributes */
68474 struct device_attribute {
68475@@ -483,11 +483,12 @@ struct device_attribute {
68476 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
68477 const char *buf, size_t count);
68478 };
68479+typedef struct device_attribute __no_const device_attribute_no_const;
68480
68481 struct dev_ext_attribute {
68482 struct device_attribute attr;
68483 void *var;
68484-};
68485+} __do_const;
68486
68487 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
68488 char *buf);
68489diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
68490index 94af418..b1ca7a2 100644
68491--- a/include/linux/dma-mapping.h
68492+++ b/include/linux/dma-mapping.h
68493@@ -54,7 +54,7 @@ struct dma_map_ops {
68494 u64 (*get_required_mask)(struct device *dev);
68495 #endif
68496 int is_phys;
68497-};
68498+} __do_const;
68499
68500 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
68501
68502diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
68503index 96d3e4a..dc36433 100644
68504--- a/include/linux/dmaengine.h
68505+++ b/include/linux/dmaengine.h
68506@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
68507 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
68508 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
68509
68510-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68511+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68512 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
68513-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68514+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68515 struct dma_pinned_list *pinned_list, struct page *page,
68516 unsigned int offset, size_t len);
68517
68518diff --git a/include/linux/efi.h b/include/linux/efi.h
68519index 2bc0ad7..3f7b006 100644
68520--- a/include/linux/efi.h
68521+++ b/include/linux/efi.h
68522@@ -745,6 +745,7 @@ struct efivar_operations {
68523 efi_set_variable_t *set_variable;
68524 efi_query_variable_store_t *query_variable_store;
68525 };
68526+typedef struct efivar_operations __no_const efivar_operations_no_const;
68527
68528 struct efivars {
68529 /*
68530diff --git a/include/linux/elf.h b/include/linux/elf.h
68531index 40a3c0e..4c45a38 100644
68532--- a/include/linux/elf.h
68533+++ b/include/linux/elf.h
68534@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
68535 #define elf_note elf32_note
68536 #define elf_addr_t Elf32_Off
68537 #define Elf_Half Elf32_Half
68538+#define elf_dyn Elf32_Dyn
68539
68540 #else
68541
68542@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
68543 #define elf_note elf64_note
68544 #define elf_addr_t Elf64_Off
68545 #define Elf_Half Elf64_Half
68546+#define elf_dyn Elf64_Dyn
68547
68548 #endif
68549
68550diff --git a/include/linux/err.h b/include/linux/err.h
68551index f2edce2..cc2082c 100644
68552--- a/include/linux/err.h
68553+++ b/include/linux/err.h
68554@@ -19,12 +19,12 @@
68555
68556 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
68557
68558-static inline void * __must_check ERR_PTR(long error)
68559+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
68560 {
68561 return (void *) error;
68562 }
68563
68564-static inline long __must_check PTR_ERR(const void *ptr)
68565+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
68566 {
68567 return (long) ptr;
68568 }
68569diff --git a/include/linux/extcon.h b/include/linux/extcon.h
68570index fcb51c8..bdafcf6 100644
68571--- a/include/linux/extcon.h
68572+++ b/include/linux/extcon.h
68573@@ -134,7 +134,7 @@ struct extcon_dev {
68574 /* /sys/class/extcon/.../mutually_exclusive/... */
68575 struct attribute_group attr_g_muex;
68576 struct attribute **attrs_muex;
68577- struct device_attribute *d_attrs_muex;
68578+ device_attribute_no_const *d_attrs_muex;
68579 };
68580
68581 /**
68582diff --git a/include/linux/fb.h b/include/linux/fb.h
68583index d49c60f..2834fbe 100644
68584--- a/include/linux/fb.h
68585+++ b/include/linux/fb.h
68586@@ -304,7 +304,7 @@ struct fb_ops {
68587 /* called at KDB enter and leave time to prepare the console */
68588 int (*fb_debug_enter)(struct fb_info *info);
68589 int (*fb_debug_leave)(struct fb_info *info);
68590-};
68591+} __do_const;
68592
68593 #ifdef CONFIG_FB_TILEBLITTING
68594 #define FB_TILE_CURSOR_NONE 0
68595diff --git a/include/linux/filter.h b/include/linux/filter.h
68596index f65f5a6..2f4f93a 100644
68597--- a/include/linux/filter.h
68598+++ b/include/linux/filter.h
68599@@ -20,6 +20,7 @@ struct compat_sock_fprog {
68600
68601 struct sk_buff;
68602 struct sock;
68603+struct bpf_jit_work;
68604
68605 struct sk_filter
68606 {
68607@@ -27,6 +28,9 @@ struct sk_filter
68608 unsigned int len; /* Number of filter blocks */
68609 unsigned int (*bpf_func)(const struct sk_buff *skb,
68610 const struct sock_filter *filter);
68611+#ifdef CONFIG_BPF_JIT
68612+ struct bpf_jit_work *work;
68613+#endif
68614 struct rcu_head rcu;
68615 struct sock_filter insns[0];
68616 };
68617diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
68618index 8293262..2b3b8bd 100644
68619--- a/include/linux/frontswap.h
68620+++ b/include/linux/frontswap.h
68621@@ -11,7 +11,7 @@ struct frontswap_ops {
68622 int (*load)(unsigned, pgoff_t, struct page *);
68623 void (*invalidate_page)(unsigned, pgoff_t);
68624 void (*invalidate_area)(unsigned);
68625-};
68626+} __no_const;
68627
68628 extern bool frontswap_enabled;
68629 extern struct frontswap_ops *
68630diff --git a/include/linux/fs.h b/include/linux/fs.h
68631index 65c2be2..4c53f6e 100644
68632--- a/include/linux/fs.h
68633+++ b/include/linux/fs.h
68634@@ -1543,7 +1543,8 @@ struct file_operations {
68635 long (*fallocate)(struct file *file, int mode, loff_t offset,
68636 loff_t len);
68637 int (*show_fdinfo)(struct seq_file *m, struct file *f);
68638-};
68639+} __do_const;
68640+typedef struct file_operations __no_const file_operations_no_const;
68641
68642 struct inode_operations {
68643 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
68644@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
68645 inode->i_flags |= S_NOSEC;
68646 }
68647
68648+static inline bool is_sidechannel_device(const struct inode *inode)
68649+{
68650+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
68651+ umode_t mode = inode->i_mode;
68652+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
68653+#else
68654+ return false;
68655+#endif
68656+}
68657+
68658 #endif /* _LINUX_FS_H */
68659diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
68660index 2b93a9a..855d94a 100644
68661--- a/include/linux/fs_struct.h
68662+++ b/include/linux/fs_struct.h
68663@@ -6,7 +6,7 @@
68664 #include <linux/seqlock.h>
68665
68666 struct fs_struct {
68667- int users;
68668+ atomic_t users;
68669 spinlock_t lock;
68670 seqcount_t seq;
68671 int umask;
68672diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
68673index 5dfa0aa..6acf322 100644
68674--- a/include/linux/fscache-cache.h
68675+++ b/include/linux/fscache-cache.h
68676@@ -112,7 +112,7 @@ struct fscache_operation {
68677 fscache_operation_release_t release;
68678 };
68679
68680-extern atomic_t fscache_op_debug_id;
68681+extern atomic_unchecked_t fscache_op_debug_id;
68682 extern void fscache_op_work_func(struct work_struct *work);
68683
68684 extern void fscache_enqueue_operation(struct fscache_operation *);
68685@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
68686 INIT_WORK(&op->work, fscache_op_work_func);
68687 atomic_set(&op->usage, 1);
68688 op->state = FSCACHE_OP_ST_INITIALISED;
68689- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
68690+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68691 op->processor = processor;
68692 op->release = release;
68693 INIT_LIST_HEAD(&op->pend_link);
68694diff --git a/include/linux/fscache.h b/include/linux/fscache.h
68695index 7a08623..4c07b0f 100644
68696--- a/include/linux/fscache.h
68697+++ b/include/linux/fscache.h
68698@@ -152,7 +152,7 @@ struct fscache_cookie_def {
68699 * - this is mandatory for any object that may have data
68700 */
68701 void (*now_uncached)(void *cookie_netfs_data);
68702-};
68703+} __do_const;
68704
68705 /*
68706 * fscache cached network filesystem type
68707diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
68708index a78680a..87bd73e 100644
68709--- a/include/linux/fsnotify.h
68710+++ b/include/linux/fsnotify.h
68711@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
68712 struct inode *inode = path->dentry->d_inode;
68713 __u32 mask = FS_ACCESS;
68714
68715+ if (is_sidechannel_device(inode))
68716+ return;
68717+
68718 if (S_ISDIR(inode->i_mode))
68719 mask |= FS_ISDIR;
68720
68721@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
68722 struct inode *inode = path->dentry->d_inode;
68723 __u32 mask = FS_MODIFY;
68724
68725+ if (is_sidechannel_device(inode))
68726+ return;
68727+
68728 if (S_ISDIR(inode->i_mode))
68729 mask |= FS_ISDIR;
68730
68731@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
68732 */
68733 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
68734 {
68735- return kstrdup(name, GFP_KERNEL);
68736+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
68737 }
68738
68739 /*
68740diff --git a/include/linux/genhd.h b/include/linux/genhd.h
68741index 9f3c275..911b591 100644
68742--- a/include/linux/genhd.h
68743+++ b/include/linux/genhd.h
68744@@ -194,7 +194,7 @@ struct gendisk {
68745 struct kobject *slave_dir;
68746
68747 struct timer_rand_state *random;
68748- atomic_t sync_io; /* RAID */
68749+ atomic_unchecked_t sync_io; /* RAID */
68750 struct disk_events *ev;
68751 #ifdef CONFIG_BLK_DEV_INTEGRITY
68752 struct blk_integrity *integrity;
68753diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
68754index 023bc34..b02b46a 100644
68755--- a/include/linux/genl_magic_func.h
68756+++ b/include/linux/genl_magic_func.h
68757@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
68758 },
68759
68760 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
68761-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
68762+static struct genl_ops ZZZ_genl_ops[] = {
68763 #include GENL_MAGIC_INCLUDE_FILE
68764 };
68765
68766diff --git a/include/linux/gfp.h b/include/linux/gfp.h
68767index 0f615eb..5c3832f 100644
68768--- a/include/linux/gfp.h
68769+++ b/include/linux/gfp.h
68770@@ -35,6 +35,13 @@ struct vm_area_struct;
68771 #define ___GFP_NO_KSWAPD 0x400000u
68772 #define ___GFP_OTHER_NODE 0x800000u
68773 #define ___GFP_WRITE 0x1000000u
68774+
68775+#ifdef CONFIG_PAX_USERCOPY_SLABS
68776+#define ___GFP_USERCOPY 0x2000000u
68777+#else
68778+#define ___GFP_USERCOPY 0
68779+#endif
68780+
68781 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
68782
68783 /*
68784@@ -92,6 +99,7 @@ struct vm_area_struct;
68785 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
68786 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
68787 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
68788+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
68789
68790 /*
68791 * This may seem redundant, but it's a way of annotating false positives vs.
68792@@ -99,7 +107,7 @@ struct vm_area_struct;
68793 */
68794 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68795
68796-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
68797+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
68798 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
68799
68800 /* This equals 0, but use constants in case they ever change */
68801@@ -153,6 +161,8 @@ struct vm_area_struct;
68802 /* 4GB DMA on some platforms */
68803 #define GFP_DMA32 __GFP_DMA32
68804
68805+#define GFP_USERCOPY __GFP_USERCOPY
68806+
68807 /* Convert GFP flags to their corresponding migrate type */
68808 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
68809 {
68810diff --git a/include/linux/gracl.h b/include/linux/gracl.h
68811new file mode 100644
68812index 0000000..ebe6d72
68813--- /dev/null
68814+++ b/include/linux/gracl.h
68815@@ -0,0 +1,319 @@
68816+#ifndef GR_ACL_H
68817+#define GR_ACL_H
68818+
68819+#include <linux/grdefs.h>
68820+#include <linux/resource.h>
68821+#include <linux/capability.h>
68822+#include <linux/dcache.h>
68823+#include <asm/resource.h>
68824+
68825+/* Major status information */
68826+
68827+#define GR_VERSION "grsecurity 2.9.1"
68828+#define GRSECURITY_VERSION 0x2901
68829+
68830+enum {
68831+ GR_SHUTDOWN = 0,
68832+ GR_ENABLE = 1,
68833+ GR_SPROLE = 2,
68834+ GR_RELOAD = 3,
68835+ GR_SEGVMOD = 4,
68836+ GR_STATUS = 5,
68837+ GR_UNSPROLE = 6,
68838+ GR_PASSSET = 7,
68839+ GR_SPROLEPAM = 8,
68840+};
68841+
68842+/* Password setup definitions
68843+ * kernel/grhash.c */
68844+enum {
68845+ GR_PW_LEN = 128,
68846+ GR_SALT_LEN = 16,
68847+ GR_SHA_LEN = 32,
68848+};
68849+
68850+enum {
68851+ GR_SPROLE_LEN = 64,
68852+};
68853+
68854+enum {
68855+ GR_NO_GLOB = 0,
68856+ GR_REG_GLOB,
68857+ GR_CREATE_GLOB
68858+};
68859+
68860+#define GR_NLIMITS 32
68861+
68862+/* Begin Data Structures */
68863+
68864+struct sprole_pw {
68865+ unsigned char *rolename;
68866+ unsigned char salt[GR_SALT_LEN];
68867+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
68868+};
68869+
68870+struct name_entry {
68871+ __u32 key;
68872+ ino_t inode;
68873+ dev_t device;
68874+ char *name;
68875+ __u16 len;
68876+ __u8 deleted;
68877+ struct name_entry *prev;
68878+ struct name_entry *next;
68879+};
68880+
68881+struct inodev_entry {
68882+ struct name_entry *nentry;
68883+ struct inodev_entry *prev;
68884+ struct inodev_entry *next;
68885+};
68886+
68887+struct acl_role_db {
68888+ struct acl_role_label **r_hash;
68889+ __u32 r_size;
68890+};
68891+
68892+struct inodev_db {
68893+ struct inodev_entry **i_hash;
68894+ __u32 i_size;
68895+};
68896+
68897+struct name_db {
68898+ struct name_entry **n_hash;
68899+ __u32 n_size;
68900+};
68901+
68902+struct crash_uid {
68903+ uid_t uid;
68904+ unsigned long expires;
68905+};
68906+
68907+struct gr_hash_struct {
68908+ void **table;
68909+ void **nametable;
68910+ void *first;
68911+ __u32 table_size;
68912+ __u32 used_size;
68913+ int type;
68914+};
68915+
68916+/* Userspace Grsecurity ACL data structures */
68917+
68918+struct acl_subject_label {
68919+ char *filename;
68920+ ino_t inode;
68921+ dev_t device;
68922+ __u32 mode;
68923+ kernel_cap_t cap_mask;
68924+ kernel_cap_t cap_lower;
68925+ kernel_cap_t cap_invert_audit;
68926+
68927+ struct rlimit res[GR_NLIMITS];
68928+ __u32 resmask;
68929+
68930+ __u8 user_trans_type;
68931+ __u8 group_trans_type;
68932+ uid_t *user_transitions;
68933+ gid_t *group_transitions;
68934+ __u16 user_trans_num;
68935+ __u16 group_trans_num;
68936+
68937+ __u32 sock_families[2];
68938+ __u32 ip_proto[8];
68939+ __u32 ip_type;
68940+ struct acl_ip_label **ips;
68941+ __u32 ip_num;
68942+ __u32 inaddr_any_override;
68943+
68944+ __u32 crashes;
68945+ unsigned long expires;
68946+
68947+ struct acl_subject_label *parent_subject;
68948+ struct gr_hash_struct *hash;
68949+ struct acl_subject_label *prev;
68950+ struct acl_subject_label *next;
68951+
68952+ struct acl_object_label **obj_hash;
68953+ __u32 obj_hash_size;
68954+ __u16 pax_flags;
68955+};
68956+
68957+struct role_allowed_ip {
68958+ __u32 addr;
68959+ __u32 netmask;
68960+
68961+ struct role_allowed_ip *prev;
68962+ struct role_allowed_ip *next;
68963+};
68964+
68965+struct role_transition {
68966+ char *rolename;
68967+
68968+ struct role_transition *prev;
68969+ struct role_transition *next;
68970+};
68971+
68972+struct acl_role_label {
68973+ char *rolename;
68974+ uid_t uidgid;
68975+ __u16 roletype;
68976+
68977+ __u16 auth_attempts;
68978+ unsigned long expires;
68979+
68980+ struct acl_subject_label *root_label;
68981+ struct gr_hash_struct *hash;
68982+
68983+ struct acl_role_label *prev;
68984+ struct acl_role_label *next;
68985+
68986+ struct role_transition *transitions;
68987+ struct role_allowed_ip *allowed_ips;
68988+ uid_t *domain_children;
68989+ __u16 domain_child_num;
68990+
68991+ umode_t umask;
68992+
68993+ struct acl_subject_label **subj_hash;
68994+ __u32 subj_hash_size;
68995+};
68996+
68997+struct user_acl_role_db {
68998+ struct acl_role_label **r_table;
68999+ __u32 num_pointers; /* Number of allocations to track */
69000+ __u32 num_roles; /* Number of roles */
69001+ __u32 num_domain_children; /* Number of domain children */
69002+ __u32 num_subjects; /* Number of subjects */
69003+ __u32 num_objects; /* Number of objects */
69004+};
69005+
69006+struct acl_object_label {
69007+ char *filename;
69008+ ino_t inode;
69009+ dev_t device;
69010+ __u32 mode;
69011+
69012+ struct acl_subject_label *nested;
69013+ struct acl_object_label *globbed;
69014+
69015+ /* next two structures not used */
69016+
69017+ struct acl_object_label *prev;
69018+ struct acl_object_label *next;
69019+};
69020+
69021+struct acl_ip_label {
69022+ char *iface;
69023+ __u32 addr;
69024+ __u32 netmask;
69025+ __u16 low, high;
69026+ __u8 mode;
69027+ __u32 type;
69028+ __u32 proto[8];
69029+
69030+ /* next two structures not used */
69031+
69032+ struct acl_ip_label *prev;
69033+ struct acl_ip_label *next;
69034+};
69035+
69036+struct gr_arg {
69037+ struct user_acl_role_db role_db;
69038+ unsigned char pw[GR_PW_LEN];
69039+ unsigned char salt[GR_SALT_LEN];
69040+ unsigned char sum[GR_SHA_LEN];
69041+ unsigned char sp_role[GR_SPROLE_LEN];
69042+ struct sprole_pw *sprole_pws;
69043+ dev_t segv_device;
69044+ ino_t segv_inode;
69045+ uid_t segv_uid;
69046+ __u16 num_sprole_pws;
69047+ __u16 mode;
69048+};
69049+
69050+struct gr_arg_wrapper {
69051+ struct gr_arg *arg;
69052+ __u32 version;
69053+ __u32 size;
69054+};
69055+
69056+struct subject_map {
69057+ struct acl_subject_label *user;
69058+ struct acl_subject_label *kernel;
69059+ struct subject_map *prev;
69060+ struct subject_map *next;
69061+};
69062+
69063+struct acl_subj_map_db {
69064+ struct subject_map **s_hash;
69065+ __u32 s_size;
69066+};
69067+
69068+/* End Data Structures Section */
69069+
69070+/* Hash functions generated by empirical testing by Brad Spengler
69071+ Makes good use of the low bits of the inode. Generally 0-1 times
69072+ in loop for successful match. 0-3 for unsuccessful match.
69073+ Shift/add algorithm with modulus of table size and an XOR*/
69074+
69075+static __inline__ unsigned int
69076+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
69077+{
69078+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
69079+}
69080+
69081+ static __inline__ unsigned int
69082+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
69083+{
69084+ return ((const unsigned long)userp % sz);
69085+}
69086+
69087+static __inline__ unsigned int
69088+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
69089+{
69090+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
69091+}
69092+
69093+static __inline__ unsigned int
69094+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
69095+{
69096+ return full_name_hash((const unsigned char *)name, len) % sz;
69097+}
69098+
69099+#define FOR_EACH_ROLE_START(role) \
69100+ role = role_list; \
69101+ while (role) {
69102+
69103+#define FOR_EACH_ROLE_END(role) \
69104+ role = role->prev; \
69105+ }
69106+
69107+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
69108+ subj = NULL; \
69109+ iter = 0; \
69110+ while (iter < role->subj_hash_size) { \
69111+ if (subj == NULL) \
69112+ subj = role->subj_hash[iter]; \
69113+ if (subj == NULL) { \
69114+ iter++; \
69115+ continue; \
69116+ }
69117+
69118+#define FOR_EACH_SUBJECT_END(subj,iter) \
69119+ subj = subj->next; \
69120+ if (subj == NULL) \
69121+ iter++; \
69122+ }
69123+
69124+
69125+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
69126+ subj = role->hash->first; \
69127+ while (subj != NULL) {
69128+
69129+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
69130+ subj = subj->next; \
69131+ }
69132+
69133+#endif
69134+
69135diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
69136new file mode 100644
69137index 0000000..323ecf2
69138--- /dev/null
69139+++ b/include/linux/gralloc.h
69140@@ -0,0 +1,9 @@
69141+#ifndef __GRALLOC_H
69142+#define __GRALLOC_H
69143+
69144+void acl_free_all(void);
69145+int acl_alloc_stack_init(unsigned long size);
69146+void *acl_alloc(unsigned long len);
69147+void *acl_alloc_num(unsigned long num, unsigned long len);
69148+
69149+#endif
69150diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
69151new file mode 100644
69152index 0000000..be66033
69153--- /dev/null
69154+++ b/include/linux/grdefs.h
69155@@ -0,0 +1,140 @@
69156+#ifndef GRDEFS_H
69157+#define GRDEFS_H
69158+
69159+/* Begin grsecurity status declarations */
69160+
69161+enum {
69162+ GR_READY = 0x01,
69163+ GR_STATUS_INIT = 0x00 // disabled state
69164+};
69165+
69166+/* Begin ACL declarations */
69167+
69168+/* Role flags */
69169+
69170+enum {
69171+ GR_ROLE_USER = 0x0001,
69172+ GR_ROLE_GROUP = 0x0002,
69173+ GR_ROLE_DEFAULT = 0x0004,
69174+ GR_ROLE_SPECIAL = 0x0008,
69175+ GR_ROLE_AUTH = 0x0010,
69176+ GR_ROLE_NOPW = 0x0020,
69177+ GR_ROLE_GOD = 0x0040,
69178+ GR_ROLE_LEARN = 0x0080,
69179+ GR_ROLE_TPE = 0x0100,
69180+ GR_ROLE_DOMAIN = 0x0200,
69181+ GR_ROLE_PAM = 0x0400,
69182+ GR_ROLE_PERSIST = 0x0800
69183+};
69184+
69185+/* ACL Subject and Object mode flags */
69186+enum {
69187+ GR_DELETED = 0x80000000
69188+};
69189+
69190+/* ACL Object-only mode flags */
69191+enum {
69192+ GR_READ = 0x00000001,
69193+ GR_APPEND = 0x00000002,
69194+ GR_WRITE = 0x00000004,
69195+ GR_EXEC = 0x00000008,
69196+ GR_FIND = 0x00000010,
69197+ GR_INHERIT = 0x00000020,
69198+ GR_SETID = 0x00000040,
69199+ GR_CREATE = 0x00000080,
69200+ GR_DELETE = 0x00000100,
69201+ GR_LINK = 0x00000200,
69202+ GR_AUDIT_READ = 0x00000400,
69203+ GR_AUDIT_APPEND = 0x00000800,
69204+ GR_AUDIT_WRITE = 0x00001000,
69205+ GR_AUDIT_EXEC = 0x00002000,
69206+ GR_AUDIT_FIND = 0x00004000,
69207+ GR_AUDIT_INHERIT= 0x00008000,
69208+ GR_AUDIT_SETID = 0x00010000,
69209+ GR_AUDIT_CREATE = 0x00020000,
69210+ GR_AUDIT_DELETE = 0x00040000,
69211+ GR_AUDIT_LINK = 0x00080000,
69212+ GR_PTRACERD = 0x00100000,
69213+ GR_NOPTRACE = 0x00200000,
69214+ GR_SUPPRESS = 0x00400000,
69215+ GR_NOLEARN = 0x00800000,
69216+ GR_INIT_TRANSFER= 0x01000000
69217+};
69218+
69219+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
69220+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
69221+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
69222+
69223+/* ACL subject-only mode flags */
69224+enum {
69225+ GR_KILL = 0x00000001,
69226+ GR_VIEW = 0x00000002,
69227+ GR_PROTECTED = 0x00000004,
69228+ GR_LEARN = 0x00000008,
69229+ GR_OVERRIDE = 0x00000010,
69230+ /* just a placeholder, this mode is only used in userspace */
69231+ GR_DUMMY = 0x00000020,
69232+ GR_PROTSHM = 0x00000040,
69233+ GR_KILLPROC = 0x00000080,
69234+ GR_KILLIPPROC = 0x00000100,
69235+ /* just a placeholder, this mode is only used in userspace */
69236+ GR_NOTROJAN = 0x00000200,
69237+ GR_PROTPROCFD = 0x00000400,
69238+ GR_PROCACCT = 0x00000800,
69239+ GR_RELAXPTRACE = 0x00001000,
69240+ //GR_NESTED = 0x00002000,
69241+ GR_INHERITLEARN = 0x00004000,
69242+ GR_PROCFIND = 0x00008000,
69243+ GR_POVERRIDE = 0x00010000,
69244+ GR_KERNELAUTH = 0x00020000,
69245+ GR_ATSECURE = 0x00040000,
69246+ GR_SHMEXEC = 0x00080000
69247+};
69248+
69249+enum {
69250+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
69251+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
69252+ GR_PAX_ENABLE_MPROTECT = 0x0004,
69253+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
69254+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
69255+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
69256+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
69257+ GR_PAX_DISABLE_MPROTECT = 0x0400,
69258+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
69259+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
69260+};
69261+
69262+enum {
69263+ GR_ID_USER = 0x01,
69264+ GR_ID_GROUP = 0x02,
69265+};
69266+
69267+enum {
69268+ GR_ID_ALLOW = 0x01,
69269+ GR_ID_DENY = 0x02,
69270+};
69271+
69272+#define GR_CRASH_RES 31
69273+#define GR_UIDTABLE_MAX 500
69274+
69275+/* begin resource learning section */
69276+enum {
69277+ GR_RLIM_CPU_BUMP = 60,
69278+ GR_RLIM_FSIZE_BUMP = 50000,
69279+ GR_RLIM_DATA_BUMP = 10000,
69280+ GR_RLIM_STACK_BUMP = 1000,
69281+ GR_RLIM_CORE_BUMP = 10000,
69282+ GR_RLIM_RSS_BUMP = 500000,
69283+ GR_RLIM_NPROC_BUMP = 1,
69284+ GR_RLIM_NOFILE_BUMP = 5,
69285+ GR_RLIM_MEMLOCK_BUMP = 50000,
69286+ GR_RLIM_AS_BUMP = 500000,
69287+ GR_RLIM_LOCKS_BUMP = 2,
69288+ GR_RLIM_SIGPENDING_BUMP = 5,
69289+ GR_RLIM_MSGQUEUE_BUMP = 10000,
69290+ GR_RLIM_NICE_BUMP = 1,
69291+ GR_RLIM_RTPRIO_BUMP = 1,
69292+ GR_RLIM_RTTIME_BUMP = 1000000
69293+};
69294+
69295+#endif
69296diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
69297new file mode 100644
69298index 0000000..12994b5
69299--- /dev/null
69300+++ b/include/linux/grinternal.h
69301@@ -0,0 +1,227 @@
69302+#ifndef __GRINTERNAL_H
69303+#define __GRINTERNAL_H
69304+
69305+#ifdef CONFIG_GRKERNSEC
69306+
69307+#include <linux/fs.h>
69308+#include <linux/mnt_namespace.h>
69309+#include <linux/nsproxy.h>
69310+#include <linux/gracl.h>
69311+#include <linux/grdefs.h>
69312+#include <linux/grmsg.h>
69313+
69314+void gr_add_learn_entry(const char *fmt, ...)
69315+ __attribute__ ((format (printf, 1, 2)));
69316+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
69317+ const struct vfsmount *mnt);
69318+__u32 gr_check_create(const struct dentry *new_dentry,
69319+ const struct dentry *parent,
69320+ const struct vfsmount *mnt, const __u32 mode);
69321+int gr_check_protected_task(const struct task_struct *task);
69322+__u32 to_gr_audit(const __u32 reqmode);
69323+int gr_set_acls(const int type);
69324+int gr_apply_subject_to_task(struct task_struct *task);
69325+int gr_acl_is_enabled(void);
69326+char gr_roletype_to_char(void);
69327+
69328+void gr_handle_alertkill(struct task_struct *task);
69329+char *gr_to_filename(const struct dentry *dentry,
69330+ const struct vfsmount *mnt);
69331+char *gr_to_filename1(const struct dentry *dentry,
69332+ const struct vfsmount *mnt);
69333+char *gr_to_filename2(const struct dentry *dentry,
69334+ const struct vfsmount *mnt);
69335+char *gr_to_filename3(const struct dentry *dentry,
69336+ const struct vfsmount *mnt);
69337+
69338+extern int grsec_enable_ptrace_readexec;
69339+extern int grsec_enable_harden_ptrace;
69340+extern int grsec_enable_link;
69341+extern int grsec_enable_fifo;
69342+extern int grsec_enable_execve;
69343+extern int grsec_enable_shm;
69344+extern int grsec_enable_execlog;
69345+extern int grsec_enable_signal;
69346+extern int grsec_enable_audit_ptrace;
69347+extern int grsec_enable_forkfail;
69348+extern int grsec_enable_time;
69349+extern int grsec_enable_rofs;
69350+extern int grsec_enable_chroot_shmat;
69351+extern int grsec_enable_chroot_mount;
69352+extern int grsec_enable_chroot_double;
69353+extern int grsec_enable_chroot_pivot;
69354+extern int grsec_enable_chroot_chdir;
69355+extern int grsec_enable_chroot_chmod;
69356+extern int grsec_enable_chroot_mknod;
69357+extern int grsec_enable_chroot_fchdir;
69358+extern int grsec_enable_chroot_nice;
69359+extern int grsec_enable_chroot_execlog;
69360+extern int grsec_enable_chroot_caps;
69361+extern int grsec_enable_chroot_sysctl;
69362+extern int grsec_enable_chroot_unix;
69363+extern int grsec_enable_symlinkown;
69364+extern kgid_t grsec_symlinkown_gid;
69365+extern int grsec_enable_tpe;
69366+extern kgid_t grsec_tpe_gid;
69367+extern int grsec_enable_tpe_all;
69368+extern int grsec_enable_tpe_invert;
69369+extern int grsec_enable_socket_all;
69370+extern kgid_t grsec_socket_all_gid;
69371+extern int grsec_enable_socket_client;
69372+extern kgid_t grsec_socket_client_gid;
69373+extern int grsec_enable_socket_server;
69374+extern kgid_t grsec_socket_server_gid;
69375+extern kgid_t grsec_audit_gid;
69376+extern int grsec_enable_group;
69377+extern int grsec_enable_audit_textrel;
69378+extern int grsec_enable_log_rwxmaps;
69379+extern int grsec_enable_mount;
69380+extern int grsec_enable_chdir;
69381+extern int grsec_resource_logging;
69382+extern int grsec_enable_blackhole;
69383+extern int grsec_lastack_retries;
69384+extern int grsec_enable_brute;
69385+extern int grsec_lock;
69386+
69387+extern spinlock_t grsec_alert_lock;
69388+extern unsigned long grsec_alert_wtime;
69389+extern unsigned long grsec_alert_fyet;
69390+
69391+extern spinlock_t grsec_audit_lock;
69392+
69393+extern rwlock_t grsec_exec_file_lock;
69394+
69395+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
69396+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
69397+ (tsk)->exec_file->f_path.mnt) : "/")
69398+
69399+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
69400+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
69401+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
69402+
69403+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
69404+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
69405+ (tsk)->exec_file->f_path.mnt) : "/")
69406+
69407+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
69408+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
69409+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
69410+
69411+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
69412+
69413+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
69414+
69415+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
69416+{
69417+ if (file1 && file2) {
69418+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
69419+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
69420+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
69421+ return true;
69422+ }
69423+
69424+ return false;
69425+}
69426+
69427+#define GR_CHROOT_CAPS {{ \
69428+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
69429+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
69430+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
69431+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
69432+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
69433+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
69434+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
69435+
69436+#define security_learn(normal_msg,args...) \
69437+({ \
69438+ read_lock(&grsec_exec_file_lock); \
69439+ gr_add_learn_entry(normal_msg "\n", ## args); \
69440+ read_unlock(&grsec_exec_file_lock); \
69441+})
69442+
69443+enum {
69444+ GR_DO_AUDIT,
69445+ GR_DONT_AUDIT,
69446+ /* used for non-audit messages that we shouldn't kill the task on */
69447+ GR_DONT_AUDIT_GOOD
69448+};
69449+
69450+enum {
69451+ GR_TTYSNIFF,
69452+ GR_RBAC,
69453+ GR_RBAC_STR,
69454+ GR_STR_RBAC,
69455+ GR_RBAC_MODE2,
69456+ GR_RBAC_MODE3,
69457+ GR_FILENAME,
69458+ GR_SYSCTL_HIDDEN,
69459+ GR_NOARGS,
69460+ GR_ONE_INT,
69461+ GR_ONE_INT_TWO_STR,
69462+ GR_ONE_STR,
69463+ GR_STR_INT,
69464+ GR_TWO_STR_INT,
69465+ GR_TWO_INT,
69466+ GR_TWO_U64,
69467+ GR_THREE_INT,
69468+ GR_FIVE_INT_TWO_STR,
69469+ GR_TWO_STR,
69470+ GR_THREE_STR,
69471+ GR_FOUR_STR,
69472+ GR_STR_FILENAME,
69473+ GR_FILENAME_STR,
69474+ GR_FILENAME_TWO_INT,
69475+ GR_FILENAME_TWO_INT_STR,
69476+ GR_TEXTREL,
69477+ GR_PTRACE,
69478+ GR_RESOURCE,
69479+ GR_CAP,
69480+ GR_SIG,
69481+ GR_SIG2,
69482+ GR_CRASH1,
69483+ GR_CRASH2,
69484+ GR_PSACCT,
69485+ GR_RWXMAP
69486+};
69487+
69488+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
69489+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
69490+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
69491+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
69492+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
69493+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
69494+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
69495+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
69496+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
69497+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
69498+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
69499+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
69500+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
69501+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
69502+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
69503+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
69504+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
69505+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
69506+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
69507+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
69508+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
69509+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
69510+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
69511+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
69512+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
69513+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
69514+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
69515+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
69516+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
69517+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
69518+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
69519+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
69520+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
69521+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
69522+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
69523+
69524+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
69525+
69526+#endif
69527+
69528+#endif
69529diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
69530new file mode 100644
69531index 0000000..2f159b5
69532--- /dev/null
69533+++ b/include/linux/grmsg.h
69534@@ -0,0 +1,112 @@
69535+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
69536+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
69537+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
69538+#define GR_STOPMOD_MSG "denied modification of module state by "
69539+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
69540+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
69541+#define GR_IOPERM_MSG "denied use of ioperm() by "
69542+#define GR_IOPL_MSG "denied use of iopl() by "
69543+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
69544+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
69545+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
69546+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
69547+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
69548+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
69549+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
69550+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
69551+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
69552+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
69553+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
69554+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
69555+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
69556+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
69557+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
69558+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
69559+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
69560+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
69561+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
69562+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
69563+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
69564+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
69565+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
69566+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
69567+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
69568+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
69569+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
69570+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
69571+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
69572+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
69573+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
69574+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69575+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69576+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69577+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69578+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69579+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69580+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69581+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69582+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69583+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69584+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69585+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69586+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69587+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69588+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69589+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69590+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69591+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69592+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69593+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69594+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69595+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
69596+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
69597+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
69598+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
69599+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
69600+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
69601+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
69602+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
69603+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
69604+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
69605+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
69606+#define GR_FAILFORK_MSG "failed fork with errno %s by "
69607+#define GR_NICE_CHROOT_MSG "denied priority change by "
69608+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
69609+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
69610+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
69611+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
69612+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
69613+#define GR_TIME_MSG "time set by "
69614+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
69615+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
69616+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
69617+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
69618+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
69619+#define GR_BIND_MSG "denied bind() by "
69620+#define GR_CONNECT_MSG "denied connect() by "
69621+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
69622+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
69623+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
69624+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
69625+#define GR_CAP_ACL_MSG "use of %s denied for "
69626+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
69627+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
69628+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
69629+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
69630+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
69631+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
69632+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
69633+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
69634+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
69635+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
69636+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
69637+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
69638+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
69639+#define GR_VM86_MSG "denied use of vm86 by "
69640+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
69641+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
69642+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
69643+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
69644+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
69645+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
69646+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
69647diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
69648new file mode 100644
69649index 0000000..d957f6d
69650--- /dev/null
69651+++ b/include/linux/grsecurity.h
69652@@ -0,0 +1,241 @@
69653+#ifndef GR_SECURITY_H
69654+#define GR_SECURITY_H
69655+#include <linux/fs.h>
69656+#include <linux/fs_struct.h>
69657+#include <linux/binfmts.h>
69658+#include <linux/gracl.h>
69659+
69660+/* notify of brain-dead configs */
69661+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69662+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
69663+#endif
69664+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
69665+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
69666+#endif
69667+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
69668+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
69669+#endif
69670+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
69671+#error "CONFIG_PAX enabled, but no PaX options are enabled."
69672+#endif
69673+
69674+void gr_handle_brute_attach(unsigned long mm_flags);
69675+void gr_handle_brute_check(void);
69676+void gr_handle_kernel_exploit(void);
69677+
69678+char gr_roletype_to_char(void);
69679+
69680+int gr_acl_enable_at_secure(void);
69681+
69682+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
69683+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
69684+
69685+void gr_del_task_from_ip_table(struct task_struct *p);
69686+
69687+int gr_pid_is_chrooted(struct task_struct *p);
69688+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
69689+int gr_handle_chroot_nice(void);
69690+int gr_handle_chroot_sysctl(const int op);
69691+int gr_handle_chroot_setpriority(struct task_struct *p,
69692+ const int niceval);
69693+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
69694+int gr_handle_chroot_chroot(const struct dentry *dentry,
69695+ const struct vfsmount *mnt);
69696+void gr_handle_chroot_chdir(const struct path *path);
69697+int gr_handle_chroot_chmod(const struct dentry *dentry,
69698+ const struct vfsmount *mnt, const int mode);
69699+int gr_handle_chroot_mknod(const struct dentry *dentry,
69700+ const struct vfsmount *mnt, const int mode);
69701+int gr_handle_chroot_mount(const struct dentry *dentry,
69702+ const struct vfsmount *mnt,
69703+ const char *dev_name);
69704+int gr_handle_chroot_pivot(void);
69705+int gr_handle_chroot_unix(const pid_t pid);
69706+
69707+int gr_handle_rawio(const struct inode *inode);
69708+
69709+void gr_handle_ioperm(void);
69710+void gr_handle_iopl(void);
69711+
69712+umode_t gr_acl_umask(void);
69713+
69714+int gr_tpe_allow(const struct file *file);
69715+
69716+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
69717+void gr_clear_chroot_entries(struct task_struct *task);
69718+
69719+void gr_log_forkfail(const int retval);
69720+void gr_log_timechange(void);
69721+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
69722+void gr_log_chdir(const struct dentry *dentry,
69723+ const struct vfsmount *mnt);
69724+void gr_log_chroot_exec(const struct dentry *dentry,
69725+ const struct vfsmount *mnt);
69726+void gr_log_remount(const char *devname, const int retval);
69727+void gr_log_unmount(const char *devname, const int retval);
69728+void gr_log_mount(const char *from, const char *to, const int retval);
69729+void gr_log_textrel(struct vm_area_struct *vma);
69730+void gr_log_rwxmmap(struct file *file);
69731+void gr_log_rwxmprotect(struct file *file);
69732+
69733+int gr_handle_follow_link(const struct inode *parent,
69734+ const struct inode *inode,
69735+ const struct dentry *dentry,
69736+ const struct vfsmount *mnt);
69737+int gr_handle_fifo(const struct dentry *dentry,
69738+ const struct vfsmount *mnt,
69739+ const struct dentry *dir, const int flag,
69740+ const int acc_mode);
69741+int gr_handle_hardlink(const struct dentry *dentry,
69742+ const struct vfsmount *mnt,
69743+ struct inode *inode,
69744+ const int mode, const struct filename *to);
69745+
69746+int gr_is_capable(const int cap);
69747+int gr_is_capable_nolog(const int cap);
69748+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69749+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
69750+
69751+void gr_copy_label(struct task_struct *tsk);
69752+void gr_handle_crash(struct task_struct *task, const int sig);
69753+int gr_handle_signal(const struct task_struct *p, const int sig);
69754+int gr_check_crash_uid(const kuid_t uid);
69755+int gr_check_protected_task(const struct task_struct *task);
69756+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
69757+int gr_acl_handle_mmap(const struct file *file,
69758+ const unsigned long prot);
69759+int gr_acl_handle_mprotect(const struct file *file,
69760+ const unsigned long prot);
69761+int gr_check_hidden_task(const struct task_struct *tsk);
69762+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
69763+ const struct vfsmount *mnt);
69764+__u32 gr_acl_handle_utime(const struct dentry *dentry,
69765+ const struct vfsmount *mnt);
69766+__u32 gr_acl_handle_access(const struct dentry *dentry,
69767+ const struct vfsmount *mnt, const int fmode);
69768+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
69769+ const struct vfsmount *mnt, umode_t *mode);
69770+__u32 gr_acl_handle_chown(const struct dentry *dentry,
69771+ const struct vfsmount *mnt);
69772+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
69773+ const struct vfsmount *mnt);
69774+int gr_handle_ptrace(struct task_struct *task, const long request);
69775+int gr_handle_proc_ptrace(struct task_struct *task);
69776+__u32 gr_acl_handle_execve(const struct dentry *dentry,
69777+ const struct vfsmount *mnt);
69778+int gr_check_crash_exec(const struct file *filp);
69779+int gr_acl_is_enabled(void);
69780+void gr_set_kernel_label(struct task_struct *task);
69781+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
69782+ const kgid_t gid);
69783+int gr_set_proc_label(const struct dentry *dentry,
69784+ const struct vfsmount *mnt,
69785+ const int unsafe_flags);
69786+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
69787+ const struct vfsmount *mnt);
69788+__u32 gr_acl_handle_open(const struct dentry *dentry,
69789+ const struct vfsmount *mnt, int acc_mode);
69790+__u32 gr_acl_handle_creat(const struct dentry *dentry,
69791+ const struct dentry *p_dentry,
69792+ const struct vfsmount *p_mnt,
69793+ int open_flags, int acc_mode, const int imode);
69794+void gr_handle_create(const struct dentry *dentry,
69795+ const struct vfsmount *mnt);
69796+void gr_handle_proc_create(const struct dentry *dentry,
69797+ const struct inode *inode);
69798+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
69799+ const struct dentry *parent_dentry,
69800+ const struct vfsmount *parent_mnt,
69801+ const int mode);
69802+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
69803+ const struct dentry *parent_dentry,
69804+ const struct vfsmount *parent_mnt);
69805+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
69806+ const struct vfsmount *mnt);
69807+void gr_handle_delete(const ino_t ino, const dev_t dev);
69808+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
69809+ const struct vfsmount *mnt);
69810+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
69811+ const struct dentry *parent_dentry,
69812+ const struct vfsmount *parent_mnt,
69813+ const struct filename *from);
69814+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
69815+ const struct dentry *parent_dentry,
69816+ const struct vfsmount *parent_mnt,
69817+ const struct dentry *old_dentry,
69818+ const struct vfsmount *old_mnt, const struct filename *to);
69819+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
69820+int gr_acl_handle_rename(struct dentry *new_dentry,
69821+ struct dentry *parent_dentry,
69822+ const struct vfsmount *parent_mnt,
69823+ struct dentry *old_dentry,
69824+ struct inode *old_parent_inode,
69825+ struct vfsmount *old_mnt, const struct filename *newname);
69826+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69827+ struct dentry *old_dentry,
69828+ struct dentry *new_dentry,
69829+ struct vfsmount *mnt, const __u8 replace);
69830+__u32 gr_check_link(const struct dentry *new_dentry,
69831+ const struct dentry *parent_dentry,
69832+ const struct vfsmount *parent_mnt,
69833+ const struct dentry *old_dentry,
69834+ const struct vfsmount *old_mnt);
69835+int gr_acl_handle_filldir(const struct file *file, const char *name,
69836+ const unsigned int namelen, const ino_t ino);
69837+
69838+__u32 gr_acl_handle_unix(const struct dentry *dentry,
69839+ const struct vfsmount *mnt);
69840+void gr_acl_handle_exit(void);
69841+void gr_acl_handle_psacct(struct task_struct *task, const long code);
69842+int gr_acl_handle_procpidmem(const struct task_struct *task);
69843+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
69844+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
69845+void gr_audit_ptrace(struct task_struct *task);
69846+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
69847+void gr_put_exec_file(struct task_struct *task);
69848+
69849+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
69850+
69851+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
69852+extern void gr_learn_resource(const struct task_struct *task, const int res,
69853+ const unsigned long wanted, const int gt);
69854+#else
69855+static inline void gr_learn_resource(const struct task_struct *task, const int res,
69856+ const unsigned long wanted, const int gt)
69857+{
69858+}
69859+#endif
69860+
69861+#ifdef CONFIG_GRKERNSEC_RESLOG
69862+extern void gr_log_resource(const struct task_struct *task, const int res,
69863+ const unsigned long wanted, const int gt);
69864+#else
69865+static inline void gr_log_resource(const struct task_struct *task, const int res,
69866+ const unsigned long wanted, const int gt)
69867+{
69868+}
69869+#endif
69870+
69871+#ifdef CONFIG_GRKERNSEC
69872+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
69873+void gr_handle_vm86(void);
69874+void gr_handle_mem_readwrite(u64 from, u64 to);
69875+
69876+void gr_log_badprocpid(const char *entry);
69877+
69878+extern int grsec_enable_dmesg;
69879+extern int grsec_disable_privio;
69880+
69881+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69882+extern kgid_t grsec_proc_gid;
69883+#endif
69884+
69885+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69886+extern int grsec_enable_chroot_findtask;
69887+#endif
69888+#ifdef CONFIG_GRKERNSEC_SETXID
69889+extern int grsec_enable_setxid;
69890+#endif
69891+#endif
69892+
69893+#endif
69894diff --git a/include/linux/grsock.h b/include/linux/grsock.h
69895new file mode 100644
69896index 0000000..e7ffaaf
69897--- /dev/null
69898+++ b/include/linux/grsock.h
69899@@ -0,0 +1,19 @@
69900+#ifndef __GRSOCK_H
69901+#define __GRSOCK_H
69902+
69903+extern void gr_attach_curr_ip(const struct sock *sk);
69904+extern int gr_handle_sock_all(const int family, const int type,
69905+ const int protocol);
69906+extern int gr_handle_sock_server(const struct sockaddr *sck);
69907+extern int gr_handle_sock_server_other(const struct sock *sck);
69908+extern int gr_handle_sock_client(const struct sockaddr *sck);
69909+extern int gr_search_connect(struct socket * sock,
69910+ struct sockaddr_in * addr);
69911+extern int gr_search_bind(struct socket * sock,
69912+ struct sockaddr_in * addr);
69913+extern int gr_search_listen(struct socket * sock);
69914+extern int gr_search_accept(struct socket * sock);
69915+extern int gr_search_socket(const int domain, const int type,
69916+ const int protocol);
69917+
69918+#endif
69919diff --git a/include/linux/highmem.h b/include/linux/highmem.h
69920index 7fb31da..08b5114 100644
69921--- a/include/linux/highmem.h
69922+++ b/include/linux/highmem.h
69923@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
69924 kunmap_atomic(kaddr);
69925 }
69926
69927+static inline void sanitize_highpage(struct page *page)
69928+{
69929+ void *kaddr;
69930+ unsigned long flags;
69931+
69932+ local_irq_save(flags);
69933+ kaddr = kmap_atomic(page);
69934+ clear_page(kaddr);
69935+ kunmap_atomic(kaddr);
69936+ local_irq_restore(flags);
69937+}
69938+
69939 static inline void zero_user_segments(struct page *page,
69940 unsigned start1, unsigned end1,
69941 unsigned start2, unsigned end2)
69942diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
69943index 1c7b89a..7f52502 100644
69944--- a/include/linux/hwmon-sysfs.h
69945+++ b/include/linux/hwmon-sysfs.h
69946@@ -25,7 +25,8 @@
69947 struct sensor_device_attribute{
69948 struct device_attribute dev_attr;
69949 int index;
69950-};
69951+} __do_const;
69952+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
69953 #define to_sensor_dev_attr(_dev_attr) \
69954 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
69955
69956@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
69957 struct device_attribute dev_attr;
69958 u8 index;
69959 u8 nr;
69960-};
69961+} __do_const;
69962 #define to_sensor_dev_attr_2(_dev_attr) \
69963 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
69964
69965diff --git a/include/linux/i2c.h b/include/linux/i2c.h
69966index e988fa9..ff9f17e 100644
69967--- a/include/linux/i2c.h
69968+++ b/include/linux/i2c.h
69969@@ -366,6 +366,7 @@ struct i2c_algorithm {
69970 /* To determine what the adapter supports */
69971 u32 (*functionality) (struct i2c_adapter *);
69972 };
69973+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
69974
69975 /**
69976 * struct i2c_bus_recovery_info - I2C bus recovery information
69977diff --git a/include/linux/i2o.h b/include/linux/i2o.h
69978index d23c3c2..eb63c81 100644
69979--- a/include/linux/i2o.h
69980+++ b/include/linux/i2o.h
69981@@ -565,7 +565,7 @@ struct i2o_controller {
69982 struct i2o_device *exec; /* Executive */
69983 #if BITS_PER_LONG == 64
69984 spinlock_t context_list_lock; /* lock for context_list */
69985- atomic_t context_list_counter; /* needed for unique contexts */
69986+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
69987 struct list_head context_list; /* list of context id's
69988 and pointers */
69989 #endif
69990diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
69991index aff7ad8..3942bbd 100644
69992--- a/include/linux/if_pppox.h
69993+++ b/include/linux/if_pppox.h
69994@@ -76,7 +76,7 @@ struct pppox_proto {
69995 int (*ioctl)(struct socket *sock, unsigned int cmd,
69996 unsigned long arg);
69997 struct module *owner;
69998-};
69999+} __do_const;
70000
70001 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
70002 extern void unregister_pppox_proto(int proto_num);
70003diff --git a/include/linux/init.h b/include/linux/init.h
70004index 8618147..0821126 100644
70005--- a/include/linux/init.h
70006+++ b/include/linux/init.h
70007@@ -39,9 +39,36 @@
70008 * Also note, that this data cannot be "const".
70009 */
70010
70011+#ifdef MODULE
70012+#define add_init_latent_entropy
70013+#define add_devinit_latent_entropy
70014+#define add_cpuinit_latent_entropy
70015+#define add_meminit_latent_entropy
70016+#else
70017+#define add_init_latent_entropy __latent_entropy
70018+
70019+#ifdef CONFIG_HOTPLUG
70020+#define add_devinit_latent_entropy
70021+#else
70022+#define add_devinit_latent_entropy __latent_entropy
70023+#endif
70024+
70025+#ifdef CONFIG_HOTPLUG_CPU
70026+#define add_cpuinit_latent_entropy
70027+#else
70028+#define add_cpuinit_latent_entropy __latent_entropy
70029+#endif
70030+
70031+#ifdef CONFIG_MEMORY_HOTPLUG
70032+#define add_meminit_latent_entropy
70033+#else
70034+#define add_meminit_latent_entropy __latent_entropy
70035+#endif
70036+#endif
70037+
70038 /* These are for everybody (although not all archs will actually
70039 discard it in modules) */
70040-#define __init __section(.init.text) __cold notrace
70041+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
70042 #define __initdata __section(.init.data)
70043 #define __initconst __constsection(.init.rodata)
70044 #define __exitdata __section(.exit.data)
70045@@ -94,7 +121,7 @@
70046 #define __exit __section(.exit.text) __exitused __cold notrace
70047
70048 /* Used for HOTPLUG_CPU */
70049-#define __cpuinit __section(.cpuinit.text) __cold notrace
70050+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
70051 #define __cpuinitdata __section(.cpuinit.data)
70052 #define __cpuinitconst __constsection(.cpuinit.rodata)
70053 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
70054@@ -102,7 +129,7 @@
70055 #define __cpuexitconst __constsection(.cpuexit.rodata)
70056
70057 /* Used for MEMORY_HOTPLUG */
70058-#define __meminit __section(.meminit.text) __cold notrace
70059+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
70060 #define __meminitdata __section(.meminit.data)
70061 #define __meminitconst __constsection(.meminit.rodata)
70062 #define __memexit __section(.memexit.text) __exitused __cold notrace
70063diff --git a/include/linux/init_task.h b/include/linux/init_task.h
70064index 5cd0f09..c9f67cc 100644
70065--- a/include/linux/init_task.h
70066+++ b/include/linux/init_task.h
70067@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
70068
70069 #define INIT_TASK_COMM "swapper"
70070
70071+#ifdef CONFIG_X86
70072+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
70073+#else
70074+#define INIT_TASK_THREAD_INFO
70075+#endif
70076+
70077 /*
70078 * INIT_TASK is used to set up the first task table, touch at
70079 * your own risk!. Base=0, limit=0x1fffff (=2MB)
70080@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
70081 RCU_POINTER_INITIALIZER(cred, &init_cred), \
70082 .comm = INIT_TASK_COMM, \
70083 .thread = INIT_THREAD, \
70084+ INIT_TASK_THREAD_INFO \
70085 .fs = &init_fs, \
70086 .files = &init_files, \
70087 .signal = &init_signals, \
70088diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
70089index 5fa5afe..ac55b25 100644
70090--- a/include/linux/interrupt.h
70091+++ b/include/linux/interrupt.h
70092@@ -430,7 +430,7 @@ enum
70093 /* map softirq index to softirq name. update 'softirq_to_name' in
70094 * kernel/softirq.c when adding a new softirq.
70095 */
70096-extern char *softirq_to_name[NR_SOFTIRQS];
70097+extern const char * const softirq_to_name[NR_SOFTIRQS];
70098
70099 /* softirq mask and active fields moved to irq_cpustat_t in
70100 * asm/hardirq.h to get better cache usage. KAO
70101@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
70102
70103 struct softirq_action
70104 {
70105- void (*action)(struct softirq_action *);
70106-};
70107+ void (*action)(void);
70108+} __no_const;
70109
70110 asmlinkage void do_softirq(void);
70111 asmlinkage void __do_softirq(void);
70112-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
70113+extern void open_softirq(int nr, void (*action)(void));
70114 extern void softirq_init(void);
70115 extern void __raise_softirq_irqoff(unsigned int nr);
70116
70117diff --git a/include/linux/iommu.h b/include/linux/iommu.h
70118index 3aeb730..2177f39 100644
70119--- a/include/linux/iommu.h
70120+++ b/include/linux/iommu.h
70121@@ -113,7 +113,7 @@ struct iommu_ops {
70122 u32 (*domain_get_windows)(struct iommu_domain *domain);
70123
70124 unsigned long pgsize_bitmap;
70125-};
70126+} __do_const;
70127
70128 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
70129 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
70130diff --git a/include/linux/ioport.h b/include/linux/ioport.h
70131index 89b7c24..382af74 100644
70132--- a/include/linux/ioport.h
70133+++ b/include/linux/ioport.h
70134@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
70135 int adjust_resource(struct resource *res, resource_size_t start,
70136 resource_size_t size);
70137 resource_size_t resource_alignment(struct resource *res);
70138-static inline resource_size_t resource_size(const struct resource *res)
70139+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
70140 {
70141 return res->end - res->start + 1;
70142 }
70143diff --git a/include/linux/irq.h b/include/linux/irq.h
70144index bc4e066..50468a9 100644
70145--- a/include/linux/irq.h
70146+++ b/include/linux/irq.h
70147@@ -328,7 +328,8 @@ struct irq_chip {
70148 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
70149
70150 unsigned long flags;
70151-};
70152+} __do_const;
70153+typedef struct irq_chip __no_const irq_chip_no_const;
70154
70155 /*
70156 * irq_chip specific flags
70157diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
70158index 3e203eb..3fe68d0 100644
70159--- a/include/linux/irqchip/arm-gic.h
70160+++ b/include/linux/irqchip/arm-gic.h
70161@@ -59,9 +59,11 @@
70162
70163 #ifndef __ASSEMBLY__
70164
70165+#include <linux/irq.h>
70166+
70167 struct device_node;
70168
70169-extern struct irq_chip gic_arch_extn;
70170+extern irq_chip_no_const gic_arch_extn;
70171
70172 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
70173 u32 offset, struct device_node *);
70174diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
70175index 6883e19..06992b1 100644
70176--- a/include/linux/kallsyms.h
70177+++ b/include/linux/kallsyms.h
70178@@ -15,7 +15,8 @@
70179
70180 struct module;
70181
70182-#ifdef CONFIG_KALLSYMS
70183+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
70184+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
70185 /* Lookup the address for a symbol. Returns 0 if not found. */
70186 unsigned long kallsyms_lookup_name(const char *name);
70187
70188@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
70189 /* Stupid that this does nothing, but I didn't create this mess. */
70190 #define __print_symbol(fmt, addr)
70191 #endif /*CONFIG_KALLSYMS*/
70192+#else /* when included by kallsyms.c, vsnprintf.c, or
70193+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
70194+extern void __print_symbol(const char *fmt, unsigned long address);
70195+extern int sprint_backtrace(char *buffer, unsigned long address);
70196+extern int sprint_symbol(char *buffer, unsigned long address);
70197+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
70198+const char *kallsyms_lookup(unsigned long addr,
70199+ unsigned long *symbolsize,
70200+ unsigned long *offset,
70201+ char **modname, char *namebuf);
70202+#endif
70203
70204 /* This macro allows us to keep printk typechecking */
70205 static __printf(1, 2)
70206diff --git a/include/linux/key-type.h b/include/linux/key-type.h
70207index 518a53a..5e28358 100644
70208--- a/include/linux/key-type.h
70209+++ b/include/linux/key-type.h
70210@@ -125,7 +125,7 @@ struct key_type {
70211 /* internal fields */
70212 struct list_head link; /* link in types list */
70213 struct lock_class_key lock_class; /* key->sem lock class */
70214-};
70215+} __do_const;
70216
70217 extern struct key_type key_type_keyring;
70218
70219diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
70220index c6e091b..a940adf 100644
70221--- a/include/linux/kgdb.h
70222+++ b/include/linux/kgdb.h
70223@@ -52,7 +52,7 @@ extern int kgdb_connected;
70224 extern int kgdb_io_module_registered;
70225
70226 extern atomic_t kgdb_setting_breakpoint;
70227-extern atomic_t kgdb_cpu_doing_single_step;
70228+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
70229
70230 extern struct task_struct *kgdb_usethread;
70231 extern struct task_struct *kgdb_contthread;
70232@@ -254,7 +254,7 @@ struct kgdb_arch {
70233 void (*correct_hw_break)(void);
70234
70235 void (*enable_nmi)(bool on);
70236-};
70237+} __do_const;
70238
70239 /**
70240 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
70241@@ -279,7 +279,7 @@ struct kgdb_io {
70242 void (*pre_exception) (void);
70243 void (*post_exception) (void);
70244 int is_console;
70245-};
70246+} __do_const;
70247
70248 extern struct kgdb_arch arch_kgdb_ops;
70249
70250diff --git a/include/linux/kmod.h b/include/linux/kmod.h
70251index 0555cc6..b16a7a4 100644
70252--- a/include/linux/kmod.h
70253+++ b/include/linux/kmod.h
70254@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
70255 * usually useless though. */
70256 extern __printf(2, 3)
70257 int __request_module(bool wait, const char *name, ...);
70258+extern __printf(3, 4)
70259+int ___request_module(bool wait, char *param_name, const char *name, ...);
70260 #define request_module(mod...) __request_module(true, mod)
70261 #define request_module_nowait(mod...) __request_module(false, mod)
70262 #define try_then_request_module(x, mod...) \
70263diff --git a/include/linux/kobject.h b/include/linux/kobject.h
70264index 939b112..ed6ed51 100644
70265--- a/include/linux/kobject.h
70266+++ b/include/linux/kobject.h
70267@@ -111,7 +111,7 @@ struct kobj_type {
70268 struct attribute **default_attrs;
70269 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
70270 const void *(*namespace)(struct kobject *kobj);
70271-};
70272+} __do_const;
70273
70274 struct kobj_uevent_env {
70275 char *envp[UEVENT_NUM_ENVP];
70276@@ -134,6 +134,7 @@ struct kobj_attribute {
70277 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
70278 const char *buf, size_t count);
70279 };
70280+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
70281
70282 extern const struct sysfs_ops kobj_sysfs_ops;
70283
70284diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
70285index f66b065..c2c29b4 100644
70286--- a/include/linux/kobject_ns.h
70287+++ b/include/linux/kobject_ns.h
70288@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
70289 const void *(*netlink_ns)(struct sock *sk);
70290 const void *(*initial_ns)(void);
70291 void (*drop_ns)(void *);
70292-};
70293+} __do_const;
70294
70295 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
70296 int kobj_ns_type_registered(enum kobj_ns_type type);
70297diff --git a/include/linux/kref.h b/include/linux/kref.h
70298index 484604d..0f6c5b6 100644
70299--- a/include/linux/kref.h
70300+++ b/include/linux/kref.h
70301@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
70302 static inline int kref_sub(struct kref *kref, unsigned int count,
70303 void (*release)(struct kref *kref))
70304 {
70305- WARN_ON(release == NULL);
70306+ BUG_ON(release == NULL);
70307
70308 if (atomic_sub_and_test((int) count, &kref->refcount)) {
70309 release(kref);
70310diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
70311index 8db53cf..c21121d 100644
70312--- a/include/linux/kvm_host.h
70313+++ b/include/linux/kvm_host.h
70314@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
70315 {
70316 }
70317 #endif
70318-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70319+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70320 struct module *module);
70321 void kvm_exit(void);
70322
70323@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
70324 struct kvm_guest_debug *dbg);
70325 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
70326
70327-int kvm_arch_init(void *opaque);
70328+int kvm_arch_init(const void *opaque);
70329 void kvm_arch_exit(void);
70330
70331 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
70332diff --git a/include/linux/libata.h b/include/linux/libata.h
70333index eae7a05..2cdd875 100644
70334--- a/include/linux/libata.h
70335+++ b/include/linux/libata.h
70336@@ -919,7 +919,7 @@ struct ata_port_operations {
70337 * fields must be pointers.
70338 */
70339 const struct ata_port_operations *inherits;
70340-};
70341+} __do_const;
70342
70343 struct ata_port_info {
70344 unsigned long flags;
70345diff --git a/include/linux/list.h b/include/linux/list.h
70346index b83e565..baa6c1d 100644
70347--- a/include/linux/list.h
70348+++ b/include/linux/list.h
70349@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
70350 extern void list_del(struct list_head *entry);
70351 #endif
70352
70353+extern void __pax_list_add(struct list_head *new,
70354+ struct list_head *prev,
70355+ struct list_head *next);
70356+static inline void pax_list_add(struct list_head *new, struct list_head *head)
70357+{
70358+ __pax_list_add(new, head, head->next);
70359+}
70360+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
70361+{
70362+ __pax_list_add(new, head->prev, head);
70363+}
70364+extern void pax_list_del(struct list_head *entry);
70365+
70366 /**
70367 * list_replace - replace old entry by new one
70368 * @old : the element to be replaced
70369@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
70370 INIT_LIST_HEAD(entry);
70371 }
70372
70373+extern void pax_list_del_init(struct list_head *entry);
70374+
70375 /**
70376 * list_move - delete from one list and add as another's head
70377 * @list: the entry to move
70378diff --git a/include/linux/math64.h b/include/linux/math64.h
70379index 2913b86..4209244 100644
70380--- a/include/linux/math64.h
70381+++ b/include/linux/math64.h
70382@@ -15,7 +15,7 @@
70383 * This is commonly provided by 32bit archs to provide an optimized 64bit
70384 * divide.
70385 */
70386-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70387+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70388 {
70389 *remainder = dividend % divisor;
70390 return dividend / divisor;
70391@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
70392 #define div64_ul(x, y) div_u64((x), (y))
70393
70394 #ifndef div_u64_rem
70395-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70396+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70397 {
70398 *remainder = do_div(dividend, divisor);
70399 return dividend;
70400@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
70401 * divide.
70402 */
70403 #ifndef div_u64
70404-static inline u64 div_u64(u64 dividend, u32 divisor)
70405+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
70406 {
70407 u32 remainder;
70408 return div_u64_rem(dividend, divisor, &remainder);
70409diff --git a/include/linux/mm.h b/include/linux/mm.h
70410index e0c8528..bcf0c29 100644
70411--- a/include/linux/mm.h
70412+++ b/include/linux/mm.h
70413@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
70414 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
70415 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
70416 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
70417+
70418+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70419+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
70420+#endif
70421+
70422 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
70423
70424 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
70425@@ -205,8 +210,8 @@ struct vm_operations_struct {
70426 /* called by access_process_vm when get_user_pages() fails, typically
70427 * for use by special VMAs that can switch between memory and hardware
70428 */
70429- int (*access)(struct vm_area_struct *vma, unsigned long addr,
70430- void *buf, int len, int write);
70431+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
70432+ void *buf, size_t len, int write);
70433 #ifdef CONFIG_NUMA
70434 /*
70435 * set_policy() op must add a reference to any non-NULL @new mempolicy
70436@@ -236,6 +241,7 @@ struct vm_operations_struct {
70437 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
70438 unsigned long size, pgoff_t pgoff);
70439 };
70440+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
70441
70442 struct mmu_gather;
70443 struct inode;
70444@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
70445 unsigned long *pfn);
70446 int follow_phys(struct vm_area_struct *vma, unsigned long address,
70447 unsigned int flags, unsigned long *prot, resource_size_t *phys);
70448-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70449- void *buf, int len, int write);
70450+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70451+ void *buf, size_t len, int write);
70452
70453 static inline void unmap_shared_mapping_range(struct address_space *mapping,
70454 loff_t const holebegin, loff_t const holelen)
70455@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
70456 }
70457 #endif
70458
70459-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
70460-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
70461- void *buf, int len, int write);
70462+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
70463+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
70464+ void *buf, size_t len, int write);
70465
70466 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70467 unsigned long start, unsigned long nr_pages,
70468@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
70469 int set_page_dirty_lock(struct page *page);
70470 int clear_page_dirty_for_io(struct page *page);
70471
70472-/* Is the vma a continuation of the stack vma above it? */
70473-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
70474-{
70475- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
70476-}
70477-
70478-static inline int stack_guard_page_start(struct vm_area_struct *vma,
70479- unsigned long addr)
70480-{
70481- return (vma->vm_flags & VM_GROWSDOWN) &&
70482- (vma->vm_start == addr) &&
70483- !vma_growsdown(vma->vm_prev, addr);
70484-}
70485-
70486-/* Is the vma a continuation of the stack vma below it? */
70487-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
70488-{
70489- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
70490-}
70491-
70492-static inline int stack_guard_page_end(struct vm_area_struct *vma,
70493- unsigned long addr)
70494-{
70495- return (vma->vm_flags & VM_GROWSUP) &&
70496- (vma->vm_end == addr) &&
70497- !vma_growsup(vma->vm_next, addr);
70498-}
70499-
70500 extern pid_t
70501 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
70502
70503@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
70504 }
70505 #endif
70506
70507+#ifdef CONFIG_MMU
70508+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
70509+#else
70510+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70511+{
70512+ return __pgprot(0);
70513+}
70514+#endif
70515+
70516 int vma_wants_writenotify(struct vm_area_struct *vma);
70517
70518 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
70519@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
70520 {
70521 return 0;
70522 }
70523+
70524+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
70525+ unsigned long address)
70526+{
70527+ return 0;
70528+}
70529 #else
70530 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70531+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70532 #endif
70533
70534 #ifdef __PAGETABLE_PMD_FOLDED
70535@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
70536 {
70537 return 0;
70538 }
70539+
70540+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
70541+ unsigned long address)
70542+{
70543+ return 0;
70544+}
70545 #else
70546 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
70547+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
70548 #endif
70549
70550 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
70551@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
70552 NULL: pud_offset(pgd, address);
70553 }
70554
70555+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70556+{
70557+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
70558+ NULL: pud_offset(pgd, address);
70559+}
70560+
70561 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70562 {
70563 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
70564 NULL: pmd_offset(pud, address);
70565 }
70566+
70567+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70568+{
70569+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
70570+ NULL: pmd_offset(pud, address);
70571+}
70572 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
70573
70574 #if USE_SPLIT_PTLOCKS
70575@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70576 unsigned long len, unsigned long prot, unsigned long flags,
70577 unsigned long pgoff, unsigned long *populate);
70578 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
70579+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
70580
70581 #ifdef CONFIG_MMU
70582 extern int __mm_populate(unsigned long addr, unsigned long len,
70583@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
70584 unsigned long high_limit;
70585 unsigned long align_mask;
70586 unsigned long align_offset;
70587+ unsigned long threadstack_offset;
70588 };
70589
70590-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
70591-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
70592+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
70593+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
70594
70595 /*
70596 * Search for an unmapped address range.
70597@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
70598 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
70599 */
70600 static inline unsigned long
70601-vm_unmapped_area(struct vm_unmapped_area_info *info)
70602+vm_unmapped_area(const struct vm_unmapped_area_info *info)
70603 {
70604 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
70605 return unmapped_area(info);
70606@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
70607 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
70608 struct vm_area_struct **pprev);
70609
70610+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
70611+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
70612+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
70613+
70614 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
70615 NULL if none. Assume start_addr < end_addr. */
70616 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
70617@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
70618 return vma;
70619 }
70620
70621-#ifdef CONFIG_MMU
70622-pgprot_t vm_get_page_prot(unsigned long vm_flags);
70623-#else
70624-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
70625-{
70626- return __pgprot(0);
70627-}
70628-#endif
70629-
70630 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70631 unsigned long change_prot_numa(struct vm_area_struct *vma,
70632 unsigned long start, unsigned long end);
70633@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
70634 static inline void vm_stat_account(struct mm_struct *mm,
70635 unsigned long flags, struct file *file, long pages)
70636 {
70637+
70638+#ifdef CONFIG_PAX_RANDMMAP
70639+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
70640+#endif
70641+
70642 mm->total_vm += pages;
70643 }
70644 #endif /* CONFIG_PROC_FS */
70645@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
70646 extern int sysctl_memory_failure_early_kill;
70647 extern int sysctl_memory_failure_recovery;
70648 extern void shake_page(struct page *p, int access);
70649-extern atomic_long_t num_poisoned_pages;
70650+extern atomic_long_unchecked_t num_poisoned_pages;
70651 extern int soft_offline_page(struct page *page, int flags);
70652
70653 extern void dump_page(struct page *page);
70654@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
70655 static inline void setup_nr_node_ids(void) {}
70656 #endif
70657
70658+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70659+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
70660+#else
70661+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
70662+#endif
70663+
70664 #endif /* __KERNEL__ */
70665 #endif /* _LINUX_MM_H */
70666diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
70667index ace9a5f..81bdb59 100644
70668--- a/include/linux/mm_types.h
70669+++ b/include/linux/mm_types.h
70670@@ -289,6 +289,8 @@ struct vm_area_struct {
70671 #ifdef CONFIG_NUMA
70672 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
70673 #endif
70674+
70675+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
70676 };
70677
70678 struct core_thread {
70679@@ -437,6 +439,24 @@ struct mm_struct {
70680 int first_nid;
70681 #endif
70682 struct uprobes_state uprobes_state;
70683+
70684+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70685+ unsigned long pax_flags;
70686+#endif
70687+
70688+#ifdef CONFIG_PAX_DLRESOLVE
70689+ unsigned long call_dl_resolve;
70690+#endif
70691+
70692+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
70693+ unsigned long call_syscall;
70694+#endif
70695+
70696+#ifdef CONFIG_PAX_ASLR
70697+ unsigned long delta_mmap; /* randomized offset */
70698+ unsigned long delta_stack; /* randomized offset */
70699+#endif
70700+
70701 };
70702
70703 /* first nid will either be a valid NID or one of these values */
70704diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
70705index c5d5278..f0b68c8 100644
70706--- a/include/linux/mmiotrace.h
70707+++ b/include/linux/mmiotrace.h
70708@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
70709 /* Called from ioremap.c */
70710 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
70711 void __iomem *addr);
70712-extern void mmiotrace_iounmap(volatile void __iomem *addr);
70713+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
70714
70715 /* For anyone to insert markers. Remember trailing newline. */
70716 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
70717@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
70718 {
70719 }
70720
70721-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
70722+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
70723 {
70724 }
70725
70726diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
70727index 5c76737..61f518e 100644
70728--- a/include/linux/mmzone.h
70729+++ b/include/linux/mmzone.h
70730@@ -396,7 +396,7 @@ struct zone {
70731 unsigned long flags; /* zone flags, see below */
70732
70733 /* Zone statistics */
70734- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70735+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70736
70737 /*
70738 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
70739diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
70740index b508016..237cfe5 100644
70741--- a/include/linux/mod_devicetable.h
70742+++ b/include/linux/mod_devicetable.h
70743@@ -13,7 +13,7 @@
70744 typedef unsigned long kernel_ulong_t;
70745 #endif
70746
70747-#define PCI_ANY_ID (~0)
70748+#define PCI_ANY_ID ((__u16)~0)
70749
70750 struct pci_device_id {
70751 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
70752@@ -139,7 +139,7 @@ struct usb_device_id {
70753 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
70754 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
70755
70756-#define HID_ANY_ID (~0)
70757+#define HID_ANY_ID (~0U)
70758 #define HID_BUS_ANY 0xffff
70759 #define HID_GROUP_ANY 0x0000
70760
70761@@ -465,7 +465,7 @@ struct dmi_system_id {
70762 const char *ident;
70763 struct dmi_strmatch matches[4];
70764 void *driver_data;
70765-};
70766+} __do_const;
70767 /*
70768 * struct dmi_device_id appears during expansion of
70769 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
70770diff --git a/include/linux/module.h b/include/linux/module.h
70771index 46f1ea0..a34ca37 100644
70772--- a/include/linux/module.h
70773+++ b/include/linux/module.h
70774@@ -17,9 +17,11 @@
70775 #include <linux/moduleparam.h>
70776 #include <linux/tracepoint.h>
70777 #include <linux/export.h>
70778+#include <linux/fs.h>
70779
70780 #include <linux/percpu.h>
70781 #include <asm/module.h>
70782+#include <asm/pgtable.h>
70783
70784 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
70785 #define MODULE_SIG_STRING "~Module signature appended~\n"
70786@@ -54,12 +56,13 @@ struct module_attribute {
70787 int (*test)(struct module *);
70788 void (*free)(struct module *);
70789 };
70790+typedef struct module_attribute __no_const module_attribute_no_const;
70791
70792 struct module_version_attribute {
70793 struct module_attribute mattr;
70794 const char *module_name;
70795 const char *version;
70796-} __attribute__ ((__aligned__(sizeof(void *))));
70797+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
70798
70799 extern ssize_t __modver_version_show(struct module_attribute *,
70800 struct module_kobject *, char *);
70801@@ -232,7 +235,7 @@ struct module
70802
70803 /* Sysfs stuff. */
70804 struct module_kobject mkobj;
70805- struct module_attribute *modinfo_attrs;
70806+ module_attribute_no_const *modinfo_attrs;
70807 const char *version;
70808 const char *srcversion;
70809 struct kobject *holders_dir;
70810@@ -281,19 +284,16 @@ struct module
70811 int (*init)(void);
70812
70813 /* If this is non-NULL, vfree after init() returns */
70814- void *module_init;
70815+ void *module_init_rx, *module_init_rw;
70816
70817 /* Here is the actual code + data, vfree'd on unload. */
70818- void *module_core;
70819+ void *module_core_rx, *module_core_rw;
70820
70821 /* Here are the sizes of the init and core sections */
70822- unsigned int init_size, core_size;
70823+ unsigned int init_size_rw, core_size_rw;
70824
70825 /* The size of the executable code in each section. */
70826- unsigned int init_text_size, core_text_size;
70827-
70828- /* Size of RO sections of the module (text+rodata) */
70829- unsigned int init_ro_size, core_ro_size;
70830+ unsigned int init_size_rx, core_size_rx;
70831
70832 /* Arch-specific module values */
70833 struct mod_arch_specific arch;
70834@@ -349,6 +349,10 @@ struct module
70835 #ifdef CONFIG_EVENT_TRACING
70836 struct ftrace_event_call **trace_events;
70837 unsigned int num_trace_events;
70838+ struct file_operations trace_id;
70839+ struct file_operations trace_enable;
70840+ struct file_operations trace_format;
70841+ struct file_operations trace_filter;
70842 #endif
70843 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
70844 unsigned int num_ftrace_callsites;
70845@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
70846 bool is_module_percpu_address(unsigned long addr);
70847 bool is_module_text_address(unsigned long addr);
70848
70849+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
70850+{
70851+
70852+#ifdef CONFIG_PAX_KERNEXEC
70853+ if (ktla_ktva(addr) >= (unsigned long)start &&
70854+ ktla_ktva(addr) < (unsigned long)start + size)
70855+ return 1;
70856+#endif
70857+
70858+ return ((void *)addr >= start && (void *)addr < start + size);
70859+}
70860+
70861+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
70862+{
70863+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
70864+}
70865+
70866+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
70867+{
70868+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
70869+}
70870+
70871+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
70872+{
70873+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
70874+}
70875+
70876+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
70877+{
70878+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
70879+}
70880+
70881 static inline int within_module_core(unsigned long addr, const struct module *mod)
70882 {
70883- return (unsigned long)mod->module_core <= addr &&
70884- addr < (unsigned long)mod->module_core + mod->core_size;
70885+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
70886 }
70887
70888 static inline int within_module_init(unsigned long addr, const struct module *mod)
70889 {
70890- return (unsigned long)mod->module_init <= addr &&
70891- addr < (unsigned long)mod->module_init + mod->init_size;
70892+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
70893 }
70894
70895 /* Search for module by name: must hold module_mutex. */
70896diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
70897index 560ca53..ef621ef 100644
70898--- a/include/linux/moduleloader.h
70899+++ b/include/linux/moduleloader.h
70900@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
70901 sections. Returns NULL on failure. */
70902 void *module_alloc(unsigned long size);
70903
70904+#ifdef CONFIG_PAX_KERNEXEC
70905+void *module_alloc_exec(unsigned long size);
70906+#else
70907+#define module_alloc_exec(x) module_alloc(x)
70908+#endif
70909+
70910 /* Free memory returned from module_alloc. */
70911 void module_free(struct module *mod, void *module_region);
70912
70913+#ifdef CONFIG_PAX_KERNEXEC
70914+void module_free_exec(struct module *mod, void *module_region);
70915+#else
70916+#define module_free_exec(x, y) module_free((x), (y))
70917+#endif
70918+
70919 /*
70920 * Apply the given relocation to the (simplified) ELF. Return -error
70921 * or 0.
70922@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
70923 unsigned int relsec,
70924 struct module *me)
70925 {
70926+#ifdef CONFIG_MODULES
70927 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70928+#endif
70929 return -ENOEXEC;
70930 }
70931 #endif
70932@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
70933 unsigned int relsec,
70934 struct module *me)
70935 {
70936+#ifdef CONFIG_MODULES
70937 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
70938+#endif
70939 return -ENOEXEC;
70940 }
70941 #endif
70942diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
70943index 137b419..fe663ec 100644
70944--- a/include/linux/moduleparam.h
70945+++ b/include/linux/moduleparam.h
70946@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
70947 * @len is usually just sizeof(string).
70948 */
70949 #define module_param_string(name, string, len, perm) \
70950- static const struct kparam_string __param_string_##name \
70951+ static const struct kparam_string __param_string_##name __used \
70952 = { len, string }; \
70953 __module_param_call(MODULE_PARAM_PREFIX, name, \
70954 &param_ops_string, \
70955@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
70956 */
70957 #define module_param_array_named(name, array, type, nump, perm) \
70958 param_check_##type(name, &(array)[0]); \
70959- static const struct kparam_array __param_arr_##name \
70960+ static const struct kparam_array __param_arr_##name __used \
70961 = { .max = ARRAY_SIZE(array), .num = nump, \
70962 .ops = &param_ops_##type, \
70963 .elemsize = sizeof(array[0]), .elem = array }; \
70964diff --git a/include/linux/namei.h b/include/linux/namei.h
70965index 5a5ff57..5ae5070 100644
70966--- a/include/linux/namei.h
70967+++ b/include/linux/namei.h
70968@@ -19,7 +19,7 @@ struct nameidata {
70969 unsigned seq;
70970 int last_type;
70971 unsigned depth;
70972- char *saved_names[MAX_NESTED_LINKS + 1];
70973+ const char *saved_names[MAX_NESTED_LINKS + 1];
70974 };
70975
70976 /*
70977@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
70978
70979 extern void nd_jump_link(struct nameidata *nd, struct path *path);
70980
70981-static inline void nd_set_link(struct nameidata *nd, char *path)
70982+static inline void nd_set_link(struct nameidata *nd, const char *path)
70983 {
70984 nd->saved_names[nd->depth] = path;
70985 }
70986
70987-static inline char *nd_get_link(struct nameidata *nd)
70988+static inline const char *nd_get_link(const struct nameidata *nd)
70989 {
70990 return nd->saved_names[nd->depth];
70991 }
70992diff --git a/include/linux/net.h b/include/linux/net.h
70993index 99c9f0c..e1cf296 100644
70994--- a/include/linux/net.h
70995+++ b/include/linux/net.h
70996@@ -183,7 +183,7 @@ struct net_proto_family {
70997 int (*create)(struct net *net, struct socket *sock,
70998 int protocol, int kern);
70999 struct module *owner;
71000-};
71001+} __do_const;
71002
71003 struct iovec;
71004 struct kvec;
71005diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
71006index 96e4c21..9cc8278 100644
71007--- a/include/linux/netdevice.h
71008+++ b/include/linux/netdevice.h
71009@@ -1026,6 +1026,7 @@ struct net_device_ops {
71010 int (*ndo_change_carrier)(struct net_device *dev,
71011 bool new_carrier);
71012 };
71013+typedef struct net_device_ops __no_const net_device_ops_no_const;
71014
71015 /*
71016 * The DEVICE structure.
71017@@ -1094,7 +1095,7 @@ struct net_device {
71018 int iflink;
71019
71020 struct net_device_stats stats;
71021- atomic_long_t rx_dropped; /* dropped packets by core network
71022+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
71023 * Do not use this in drivers.
71024 */
71025
71026diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
71027index 0060fde..481c6ae 100644
71028--- a/include/linux/netfilter.h
71029+++ b/include/linux/netfilter.h
71030@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
71031 #endif
71032 /* Use the module struct to lock set/get code in place */
71033 struct module *owner;
71034-};
71035+} __do_const;
71036
71037 /* Function to register/unregister hook points. */
71038 int nf_register_hook(struct nf_hook_ops *reg);
71039diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
71040index d80e275..c3510b8 100644
71041--- a/include/linux/netfilter/ipset/ip_set.h
71042+++ b/include/linux/netfilter/ipset/ip_set.h
71043@@ -124,7 +124,7 @@ struct ip_set_type_variant {
71044 /* Return true if "b" set is the same as "a"
71045 * according to the create set parameters */
71046 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
71047-};
71048+} __do_const;
71049
71050 /* The core set type structure */
71051 struct ip_set_type {
71052diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
71053index cadb740..d7c37c0 100644
71054--- a/include/linux/netfilter/nfnetlink.h
71055+++ b/include/linux/netfilter/nfnetlink.h
71056@@ -16,7 +16,7 @@ struct nfnl_callback {
71057 const struct nlattr * const cda[]);
71058 const struct nla_policy *policy; /* netlink attribute policy */
71059 const u_int16_t attr_count; /* number of nlattr's */
71060-};
71061+} __do_const;
71062
71063 struct nfnetlink_subsystem {
71064 const char *name;
71065diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
71066new file mode 100644
71067index 0000000..33f4af8
71068--- /dev/null
71069+++ b/include/linux/netfilter/xt_gradm.h
71070@@ -0,0 +1,9 @@
71071+#ifndef _LINUX_NETFILTER_XT_GRADM_H
71072+#define _LINUX_NETFILTER_XT_GRADM_H 1
71073+
71074+struct xt_gradm_mtinfo {
71075+ __u16 flags;
71076+ __u16 invflags;
71077+};
71078+
71079+#endif
71080diff --git a/include/linux/nls.h b/include/linux/nls.h
71081index 5dc635f..35f5e11 100644
71082--- a/include/linux/nls.h
71083+++ b/include/linux/nls.h
71084@@ -31,7 +31,7 @@ struct nls_table {
71085 const unsigned char *charset2upper;
71086 struct module *owner;
71087 struct nls_table *next;
71088-};
71089+} __do_const;
71090
71091 /* this value hold the maximum octet of charset */
71092 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
71093diff --git a/include/linux/notifier.h b/include/linux/notifier.h
71094index d14a4c3..a078786 100644
71095--- a/include/linux/notifier.h
71096+++ b/include/linux/notifier.h
71097@@ -54,7 +54,8 @@ struct notifier_block {
71098 notifier_fn_t notifier_call;
71099 struct notifier_block __rcu *next;
71100 int priority;
71101-};
71102+} __do_const;
71103+typedef struct notifier_block __no_const notifier_block_no_const;
71104
71105 struct atomic_notifier_head {
71106 spinlock_t lock;
71107diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
71108index a4c5624..79d6d88 100644
71109--- a/include/linux/oprofile.h
71110+++ b/include/linux/oprofile.h
71111@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
71112 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
71113 char const * name, ulong * val);
71114
71115-/** Create a file for read-only access to an atomic_t. */
71116+/** Create a file for read-only access to an atomic_unchecked_t. */
71117 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
71118- char const * name, atomic_t * val);
71119+ char const * name, atomic_unchecked_t * val);
71120
71121 /** create a directory */
71122 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
71123diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
71124index 8db71dc..a76bf2c 100644
71125--- a/include/linux/pci_hotplug.h
71126+++ b/include/linux/pci_hotplug.h
71127@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
71128 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
71129 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
71130 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
71131-};
71132+} __do_const;
71133+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
71134
71135 /**
71136 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
71137diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
71138index c5b6dbf..b124155 100644
71139--- a/include/linux/perf_event.h
71140+++ b/include/linux/perf_event.h
71141@@ -318,8 +318,8 @@ struct perf_event {
71142
71143 enum perf_event_active_state state;
71144 unsigned int attach_state;
71145- local64_t count;
71146- atomic64_t child_count;
71147+ local64_t count; /* PaX: fix it one day */
71148+ atomic64_unchecked_t child_count;
71149
71150 /*
71151 * These are the total time in nanoseconds that the event
71152@@ -370,8 +370,8 @@ struct perf_event {
71153 * These accumulate total time (in nanoseconds) that children
71154 * events have been enabled and running, respectively.
71155 */
71156- atomic64_t child_total_time_enabled;
71157- atomic64_t child_total_time_running;
71158+ atomic64_unchecked_t child_total_time_enabled;
71159+ atomic64_unchecked_t child_total_time_running;
71160
71161 /*
71162 * Protect attach/detach and child_list:
71163@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
71164 entry->ip[entry->nr++] = ip;
71165 }
71166
71167-extern int sysctl_perf_event_paranoid;
71168+extern int sysctl_perf_event_legitimately_concerned;
71169 extern int sysctl_perf_event_mlock;
71170 extern int sysctl_perf_event_sample_rate;
71171
71172@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
71173 void __user *buffer, size_t *lenp,
71174 loff_t *ppos);
71175
71176+static inline bool perf_paranoid_any(void)
71177+{
71178+ return sysctl_perf_event_legitimately_concerned > 2;
71179+}
71180+
71181 static inline bool perf_paranoid_tracepoint_raw(void)
71182 {
71183- return sysctl_perf_event_paranoid > -1;
71184+ return sysctl_perf_event_legitimately_concerned > -1;
71185 }
71186
71187 static inline bool perf_paranoid_cpu(void)
71188 {
71189- return sysctl_perf_event_paranoid > 0;
71190+ return sysctl_perf_event_legitimately_concerned > 0;
71191 }
71192
71193 static inline bool perf_paranoid_kernel(void)
71194 {
71195- return sysctl_perf_event_paranoid > 1;
71196+ return sysctl_perf_event_legitimately_concerned > 1;
71197 }
71198
71199 extern void perf_event_init(void);
71200@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
71201 */
71202 #define perf_cpu_notifier(fn) \
71203 do { \
71204- static struct notifier_block fn##_nb __cpuinitdata = \
71205+ static struct notifier_block fn##_nb = \
71206 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
71207 unsigned long cpu = smp_processor_id(); \
71208 unsigned long flags; \
71209@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
71210 struct device_attribute attr;
71211 u64 id;
71212 const char *event_str;
71213-};
71214+} __do_const;
71215
71216 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
71217 static struct perf_pmu_events_attr _var = { \
71218diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
71219index b8809fe..ae4ccd0 100644
71220--- a/include/linux/pipe_fs_i.h
71221+++ b/include/linux/pipe_fs_i.h
71222@@ -47,10 +47,10 @@ struct pipe_inode_info {
71223 struct mutex mutex;
71224 wait_queue_head_t wait;
71225 unsigned int nrbufs, curbuf, buffers;
71226- unsigned int readers;
71227- unsigned int writers;
71228- unsigned int files;
71229- unsigned int waiting_writers;
71230+ atomic_t readers;
71231+ atomic_t writers;
71232+ atomic_t files;
71233+ atomic_t waiting_writers;
71234 unsigned int r_counter;
71235 unsigned int w_counter;
71236 struct page *tmp_page;
71237diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
71238index 5f28cae..3d23723 100644
71239--- a/include/linux/platform_data/usb-ehci-s5p.h
71240+++ b/include/linux/platform_data/usb-ehci-s5p.h
71241@@ -14,7 +14,7 @@
71242 struct s5p_ehci_platdata {
71243 int (*phy_init)(struct platform_device *pdev, int type);
71244 int (*phy_exit)(struct platform_device *pdev, int type);
71245-};
71246+} __no_const;
71247
71248 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
71249
71250diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
71251index c256c59..8ea94c7 100644
71252--- a/include/linux/platform_data/usb-ohci-exynos.h
71253+++ b/include/linux/platform_data/usb-ohci-exynos.h
71254@@ -14,7 +14,7 @@
71255 struct exynos4_ohci_platdata {
71256 int (*phy_init)(struct platform_device *pdev, int type);
71257 int (*phy_exit)(struct platform_device *pdev, int type);
71258-};
71259+} __no_const;
71260
71261 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
71262
71263diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
71264index 7c1d252..c5c773e 100644
71265--- a/include/linux/pm_domain.h
71266+++ b/include/linux/pm_domain.h
71267@@ -48,7 +48,7 @@ struct gpd_dev_ops {
71268
71269 struct gpd_cpu_data {
71270 unsigned int saved_exit_latency;
71271- struct cpuidle_state *idle_state;
71272+ cpuidle_state_no_const *idle_state;
71273 };
71274
71275 struct generic_pm_domain {
71276diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
71277index 7d7e09e..8671ef8 100644
71278--- a/include/linux/pm_runtime.h
71279+++ b/include/linux/pm_runtime.h
71280@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
71281
71282 static inline void pm_runtime_mark_last_busy(struct device *dev)
71283 {
71284- ACCESS_ONCE(dev->power.last_busy) = jiffies;
71285+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
71286 }
71287
71288 #else /* !CONFIG_PM_RUNTIME */
71289diff --git a/include/linux/pnp.h b/include/linux/pnp.h
71290index 195aafc..49a7bc2 100644
71291--- a/include/linux/pnp.h
71292+++ b/include/linux/pnp.h
71293@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
71294 struct pnp_fixup {
71295 char id[7];
71296 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
71297-};
71298+} __do_const;
71299
71300 /* config parameters */
71301 #define PNP_CONFIG_NORMAL 0x0001
71302diff --git a/include/linux/poison.h b/include/linux/poison.h
71303index 2110a81..13a11bb 100644
71304--- a/include/linux/poison.h
71305+++ b/include/linux/poison.h
71306@@ -19,8 +19,8 @@
71307 * under normal circumstances, used to verify that nobody uses
71308 * non-initialized list entries.
71309 */
71310-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
71311-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
71312+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
71313+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
71314
71315 /********** include/linux/timer.h **********/
71316 /*
71317diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
71318index c0f44c2..1572583 100644
71319--- a/include/linux/power/smartreflex.h
71320+++ b/include/linux/power/smartreflex.h
71321@@ -238,7 +238,7 @@ struct omap_sr_class_data {
71322 int (*notify)(struct omap_sr *sr, u32 status);
71323 u8 notify_flags;
71324 u8 class_type;
71325-};
71326+} __do_const;
71327
71328 /**
71329 * struct omap_sr_nvalue_table - Smartreflex n-target value info
71330diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
71331index 4ea1d37..80f4b33 100644
71332--- a/include/linux/ppp-comp.h
71333+++ b/include/linux/ppp-comp.h
71334@@ -84,7 +84,7 @@ struct compressor {
71335 struct module *owner;
71336 /* Extra skb space needed by the compressor algorithm */
71337 unsigned int comp_extra;
71338-};
71339+} __do_const;
71340
71341 /*
71342 * The return value from decompress routine is the length of the
71343diff --git a/include/linux/printk.h b/include/linux/printk.h
71344index 22c7052..ad3fa0a 100644
71345--- a/include/linux/printk.h
71346+++ b/include/linux/printk.h
71347@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
71348 void early_printk(const char *s, ...) { }
71349 #endif
71350
71351+extern int kptr_restrict;
71352+
71353 #ifdef CONFIG_PRINTK
71354 asmlinkage __printf(5, 0)
71355 int vprintk_emit(int facility, int level,
71356@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
71357
71358 extern int printk_delay_msec;
71359 extern int dmesg_restrict;
71360-extern int kptr_restrict;
71361
71362 extern void wake_up_klogd(void);
71363
71364diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
71365index 608e60a..c26f864 100644
71366--- a/include/linux/proc_fs.h
71367+++ b/include/linux/proc_fs.h
71368@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
71369 return proc_create_data(name, mode, parent, proc_fops, NULL);
71370 }
71371
71372+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
71373+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
71374+{
71375+#ifdef CONFIG_GRKERNSEC_PROC_USER
71376+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
71377+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71378+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
71379+#else
71380+ return proc_create_data(name, mode, parent, proc_fops, NULL);
71381+#endif
71382+}
71383+
71384+
71385 extern void proc_set_size(struct proc_dir_entry *, loff_t);
71386 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
71387 extern void *PDE_DATA(const struct inode *);
71388diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
71389index 34a1e10..03a6d03 100644
71390--- a/include/linux/proc_ns.h
71391+++ b/include/linux/proc_ns.h
71392@@ -14,7 +14,7 @@ struct proc_ns_operations {
71393 void (*put)(void *ns);
71394 int (*install)(struct nsproxy *nsproxy, void *ns);
71395 unsigned int (*inum)(void *ns);
71396-};
71397+} __do_const;
71398
71399 struct proc_ns {
71400 void *ns;
71401diff --git a/include/linux/random.h b/include/linux/random.h
71402index 3b9377d..61b506a 100644
71403--- a/include/linux/random.h
71404+++ b/include/linux/random.h
71405@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
71406 u32 prandom_u32_state(struct rnd_state *);
71407 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
71408
71409+static inline unsigned long pax_get_random_long(void)
71410+{
71411+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
71412+}
71413+
71414 /*
71415 * Handle minimum values for seeds
71416 */
71417diff --git a/include/linux/rculist.h b/include/linux/rculist.h
71418index f4b1001..8ddb2b6 100644
71419--- a/include/linux/rculist.h
71420+++ b/include/linux/rculist.h
71421@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
71422 struct list_head *prev, struct list_head *next);
71423 #endif
71424
71425+extern void __pax_list_add_rcu(struct list_head *new,
71426+ struct list_head *prev, struct list_head *next);
71427+
71428 /**
71429 * list_add_rcu - add a new entry to rcu-protected list
71430 * @new: new entry to be added
71431@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
71432 __list_add_rcu(new, head, head->next);
71433 }
71434
71435+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
71436+{
71437+ __pax_list_add_rcu(new, head, head->next);
71438+}
71439+
71440 /**
71441 * list_add_tail_rcu - add a new entry to rcu-protected list
71442 * @new: new entry to be added
71443@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
71444 __list_add_rcu(new, head->prev, head);
71445 }
71446
71447+static inline void pax_list_add_tail_rcu(struct list_head *new,
71448+ struct list_head *head)
71449+{
71450+ __pax_list_add_rcu(new, head->prev, head);
71451+}
71452+
71453 /**
71454 * list_del_rcu - deletes entry from list without re-initialization
71455 * @entry: the element to delete from the list.
71456@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
71457 entry->prev = LIST_POISON2;
71458 }
71459
71460+extern void pax_list_del_rcu(struct list_head *entry);
71461+
71462 /**
71463 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
71464 * @n: the element to delete from the hash list.
71465diff --git a/include/linux/reboot.h b/include/linux/reboot.h
71466index 23b3630..e1bc12b 100644
71467--- a/include/linux/reboot.h
71468+++ b/include/linux/reboot.h
71469@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
71470 * Architecture-specific implementations of sys_reboot commands.
71471 */
71472
71473-extern void machine_restart(char *cmd);
71474-extern void machine_halt(void);
71475-extern void machine_power_off(void);
71476+extern void machine_restart(char *cmd) __noreturn;
71477+extern void machine_halt(void) __noreturn;
71478+extern void machine_power_off(void) __noreturn;
71479
71480 extern void machine_shutdown(void);
71481 struct pt_regs;
71482@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
71483 */
71484
71485 extern void kernel_restart_prepare(char *cmd);
71486-extern void kernel_restart(char *cmd);
71487-extern void kernel_halt(void);
71488-extern void kernel_power_off(void);
71489+extern void kernel_restart(char *cmd) __noreturn;
71490+extern void kernel_halt(void) __noreturn;
71491+extern void kernel_power_off(void) __noreturn;
71492
71493 extern int C_A_D; /* for sysctl */
71494 void ctrl_alt_del(void);
71495@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
71496 * Emergency restart, callable from an interrupt handler.
71497 */
71498
71499-extern void emergency_restart(void);
71500+extern void emergency_restart(void) __noreturn;
71501 #include <asm/emergency-restart.h>
71502
71503 #endif /* _LINUX_REBOOT_H */
71504diff --git a/include/linux/regset.h b/include/linux/regset.h
71505index 8e0c9fe..ac4d221 100644
71506--- a/include/linux/regset.h
71507+++ b/include/linux/regset.h
71508@@ -161,7 +161,8 @@ struct user_regset {
71509 unsigned int align;
71510 unsigned int bias;
71511 unsigned int core_note_type;
71512-};
71513+} __do_const;
71514+typedef struct user_regset __no_const user_regset_no_const;
71515
71516 /**
71517 * struct user_regset_view - available regsets
71518diff --git a/include/linux/relay.h b/include/linux/relay.h
71519index d7c8359..818daf5 100644
71520--- a/include/linux/relay.h
71521+++ b/include/linux/relay.h
71522@@ -157,7 +157,7 @@ struct rchan_callbacks
71523 * The callback should return 0 if successful, negative if not.
71524 */
71525 int (*remove_buf_file)(struct dentry *dentry);
71526-};
71527+} __no_const;
71528
71529 /*
71530 * CONFIG_RELAY kernel API, kernel/relay.c
71531diff --git a/include/linux/rio.h b/include/linux/rio.h
71532index 18e0993..8ab5b21 100644
71533--- a/include/linux/rio.h
71534+++ b/include/linux/rio.h
71535@@ -345,7 +345,7 @@ struct rio_ops {
71536 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
71537 u64 rstart, u32 size, u32 flags);
71538 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
71539-};
71540+} __no_const;
71541
71542 #define RIO_RESOURCE_MEM 0x00000100
71543 #define RIO_RESOURCE_DOORBELL 0x00000200
71544diff --git a/include/linux/rmap.h b/include/linux/rmap.h
71545index 6dacb93..6174423 100644
71546--- a/include/linux/rmap.h
71547+++ b/include/linux/rmap.h
71548@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
71549 void anon_vma_init(void); /* create anon_vma_cachep */
71550 int anon_vma_prepare(struct vm_area_struct *);
71551 void unlink_anon_vmas(struct vm_area_struct *);
71552-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
71553-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
71554+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
71555+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
71556
71557 static inline void anon_vma_merge(struct vm_area_struct *vma,
71558 struct vm_area_struct *next)
71559diff --git a/include/linux/sched.h b/include/linux/sched.h
71560index 178a8d9..52e71a3 100644
71561--- a/include/linux/sched.h
71562+++ b/include/linux/sched.h
71563@@ -62,6 +62,7 @@ struct bio_list;
71564 struct fs_struct;
71565 struct perf_event_context;
71566 struct blk_plug;
71567+struct linux_binprm;
71568
71569 /*
71570 * List of flags we want to share for kernel threads,
71571@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
71572 extern int in_sched_functions(unsigned long addr);
71573
71574 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
71575-extern signed long schedule_timeout(signed long timeout);
71576+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
71577 extern signed long schedule_timeout_interruptible(signed long timeout);
71578 extern signed long schedule_timeout_killable(signed long timeout);
71579 extern signed long schedule_timeout_uninterruptible(signed long timeout);
71580@@ -314,6 +315,19 @@ struct nsproxy;
71581 struct user_namespace;
71582
71583 #ifdef CONFIG_MMU
71584+
71585+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
71586+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
71587+#else
71588+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
71589+{
71590+ return 0;
71591+}
71592+#endif
71593+
71594+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
71595+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
71596+
71597 extern void arch_pick_mmap_layout(struct mm_struct *mm);
71598 extern unsigned long
71599 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
71600@@ -591,6 +605,17 @@ struct signal_struct {
71601 #ifdef CONFIG_TASKSTATS
71602 struct taskstats *stats;
71603 #endif
71604+
71605+#ifdef CONFIG_GRKERNSEC
71606+ u32 curr_ip;
71607+ u32 saved_ip;
71608+ u32 gr_saddr;
71609+ u32 gr_daddr;
71610+ u16 gr_sport;
71611+ u16 gr_dport;
71612+ u8 used_accept:1;
71613+#endif
71614+
71615 #ifdef CONFIG_AUDIT
71616 unsigned audit_tty;
71617 unsigned audit_tty_log_passwd;
71618@@ -671,6 +696,14 @@ struct user_struct {
71619 struct key *session_keyring; /* UID's default session keyring */
71620 #endif
71621
71622+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
71623+ unsigned char kernel_banned;
71624+#endif
71625+#ifdef CONFIG_GRKERNSEC_BRUTE
71626+ unsigned char suid_banned;
71627+ unsigned long suid_ban_expires;
71628+#endif
71629+
71630 /* Hash table maintenance information */
71631 struct hlist_node uidhash_node;
71632 kuid_t uid;
71633@@ -1158,8 +1191,8 @@ struct task_struct {
71634 struct list_head thread_group;
71635
71636 struct completion *vfork_done; /* for vfork() */
71637- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
71638- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71639+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
71640+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71641
71642 cputime_t utime, stime, utimescaled, stimescaled;
71643 cputime_t gtime;
71644@@ -1184,11 +1217,6 @@ struct task_struct {
71645 struct task_cputime cputime_expires;
71646 struct list_head cpu_timers[3];
71647
71648-/* process credentials */
71649- const struct cred __rcu *real_cred; /* objective and real subjective task
71650- * credentials (COW) */
71651- const struct cred __rcu *cred; /* effective (overridable) subjective task
71652- * credentials (COW) */
71653 char comm[TASK_COMM_LEN]; /* executable name excluding path
71654 - access with [gs]et_task_comm (which lock
71655 it with task_lock())
71656@@ -1205,6 +1233,10 @@ struct task_struct {
71657 #endif
71658 /* CPU-specific state of this task */
71659 struct thread_struct thread;
71660+/* thread_info moved to task_struct */
71661+#ifdef CONFIG_X86
71662+ struct thread_info tinfo;
71663+#endif
71664 /* filesystem information */
71665 struct fs_struct *fs;
71666 /* open file information */
71667@@ -1278,6 +1310,10 @@ struct task_struct {
71668 gfp_t lockdep_reclaim_gfp;
71669 #endif
71670
71671+/* process credentials */
71672+ const struct cred __rcu *real_cred; /* objective and real subjective task
71673+ * credentials (COW) */
71674+
71675 /* journalling filesystem info */
71676 void *journal_info;
71677
71678@@ -1316,6 +1352,10 @@ struct task_struct {
71679 /* cg_list protected by css_set_lock and tsk->alloc_lock */
71680 struct list_head cg_list;
71681 #endif
71682+
71683+ const struct cred __rcu *cred; /* effective (overridable) subjective task
71684+ * credentials (COW) */
71685+
71686 #ifdef CONFIG_FUTEX
71687 struct robust_list_head __user *robust_list;
71688 #ifdef CONFIG_COMPAT
71689@@ -1416,8 +1456,74 @@ struct task_struct {
71690 unsigned int sequential_io;
71691 unsigned int sequential_io_avg;
71692 #endif
71693+
71694+#ifdef CONFIG_GRKERNSEC
71695+ /* grsecurity */
71696+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71697+ u64 exec_id;
71698+#endif
71699+#ifdef CONFIG_GRKERNSEC_SETXID
71700+ const struct cred *delayed_cred;
71701+#endif
71702+ struct dentry *gr_chroot_dentry;
71703+ struct acl_subject_label *acl;
71704+ struct acl_role_label *role;
71705+ struct file *exec_file;
71706+ unsigned long brute_expires;
71707+ u16 acl_role_id;
71708+ /* is this the task that authenticated to the special role */
71709+ u8 acl_sp_role;
71710+ u8 is_writable;
71711+ u8 brute;
71712+ u8 gr_is_chrooted;
71713+#endif
71714+
71715 };
71716
71717+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
71718+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
71719+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
71720+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
71721+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
71722+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
71723+
71724+#ifdef CONFIG_PAX_SOFTMODE
71725+extern int pax_softmode;
71726+#endif
71727+
71728+extern int pax_check_flags(unsigned long *);
71729+
71730+/* if tsk != current then task_lock must be held on it */
71731+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71732+static inline unsigned long pax_get_flags(struct task_struct *tsk)
71733+{
71734+ if (likely(tsk->mm))
71735+ return tsk->mm->pax_flags;
71736+ else
71737+ return 0UL;
71738+}
71739+
71740+/* if tsk != current then task_lock must be held on it */
71741+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
71742+{
71743+ if (likely(tsk->mm)) {
71744+ tsk->mm->pax_flags = flags;
71745+ return 0;
71746+ }
71747+ return -EINVAL;
71748+}
71749+#endif
71750+
71751+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
71752+extern void pax_set_initial_flags(struct linux_binprm *bprm);
71753+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
71754+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
71755+#endif
71756+
71757+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
71758+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
71759+extern void pax_report_refcount_overflow(struct pt_regs *regs);
71760+
71761 /* Future-safe accessor for struct task_struct's cpus_allowed. */
71762 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
71763
71764@@ -1476,7 +1582,7 @@ struct pid_namespace;
71765 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
71766 struct pid_namespace *ns);
71767
71768-static inline pid_t task_pid_nr(struct task_struct *tsk)
71769+static inline pid_t task_pid_nr(const struct task_struct *tsk)
71770 {
71771 return tsk->pid;
71772 }
71773@@ -1919,7 +2025,9 @@ void yield(void);
71774 extern struct exec_domain default_exec_domain;
71775
71776 union thread_union {
71777+#ifndef CONFIG_X86
71778 struct thread_info thread_info;
71779+#endif
71780 unsigned long stack[THREAD_SIZE/sizeof(long)];
71781 };
71782
71783@@ -1952,6 +2060,7 @@ extern struct pid_namespace init_pid_ns;
71784 */
71785
71786 extern struct task_struct *find_task_by_vpid(pid_t nr);
71787+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
71788 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
71789 struct pid_namespace *ns);
71790
71791@@ -2118,7 +2227,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
71792 extern void exit_itimers(struct signal_struct *);
71793 extern void flush_itimer_signals(void);
71794
71795-extern void do_group_exit(int);
71796+extern __noreturn void do_group_exit(int);
71797
71798 extern int allow_signal(int);
71799 extern int disallow_signal(int);
71800@@ -2309,9 +2418,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
71801
71802 #endif
71803
71804-static inline int object_is_on_stack(void *obj)
71805+static inline int object_starts_on_stack(void *obj)
71806 {
71807- void *stack = task_stack_page(current);
71808+ const void *stack = task_stack_page(current);
71809
71810 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
71811 }
71812diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
71813index bf8086b..962b035 100644
71814--- a/include/linux/sched/sysctl.h
71815+++ b/include/linux/sched/sysctl.h
71816@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
71817 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
71818
71819 extern int sysctl_max_map_count;
71820+extern unsigned long sysctl_heap_stack_gap;
71821
71822 extern unsigned int sysctl_sched_latency;
71823 extern unsigned int sysctl_sched_min_granularity;
71824diff --git a/include/linux/security.h b/include/linux/security.h
71825index 4686491..2bd210e 100644
71826--- a/include/linux/security.h
71827+++ b/include/linux/security.h
71828@@ -26,6 +26,7 @@
71829 #include <linux/capability.h>
71830 #include <linux/slab.h>
71831 #include <linux/err.h>
71832+#include <linux/grsecurity.h>
71833
71834 struct linux_binprm;
71835 struct cred;
71836diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
71837index 2da29ac..aac448ec 100644
71838--- a/include/linux/seq_file.h
71839+++ b/include/linux/seq_file.h
71840@@ -26,6 +26,9 @@ struct seq_file {
71841 struct mutex lock;
71842 const struct seq_operations *op;
71843 int poll_event;
71844+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71845+ u64 exec_id;
71846+#endif
71847 #ifdef CONFIG_USER_NS
71848 struct user_namespace *user_ns;
71849 #endif
71850@@ -38,6 +41,7 @@ struct seq_operations {
71851 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
71852 int (*show) (struct seq_file *m, void *v);
71853 };
71854+typedef struct seq_operations __no_const seq_operations_no_const;
71855
71856 #define SEQ_SKIP 1
71857
71858diff --git a/include/linux/shm.h b/include/linux/shm.h
71859index 429c199..4d42e38 100644
71860--- a/include/linux/shm.h
71861+++ b/include/linux/shm.h
71862@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
71863
71864 /* The task created the shm object. NULL if the task is dead. */
71865 struct task_struct *shm_creator;
71866+#ifdef CONFIG_GRKERNSEC
71867+ time_t shm_createtime;
71868+ pid_t shm_lapid;
71869+#endif
71870 };
71871
71872 /* shm_mode upper byte flags */
71873diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
71874index dec1748..112c1f9 100644
71875--- a/include/linux/skbuff.h
71876+++ b/include/linux/skbuff.h
71877@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
71878 extern struct sk_buff *__alloc_skb(unsigned int size,
71879 gfp_t priority, int flags, int node);
71880 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
71881-static inline struct sk_buff *alloc_skb(unsigned int size,
71882+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
71883 gfp_t priority)
71884 {
71885 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
71886@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
71887 */
71888 static inline int skb_queue_empty(const struct sk_buff_head *list)
71889 {
71890- return list->next == (struct sk_buff *)list;
71891+ return list->next == (const struct sk_buff *)list;
71892 }
71893
71894 /**
71895@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
71896 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71897 const struct sk_buff *skb)
71898 {
71899- return skb->next == (struct sk_buff *)list;
71900+ return skb->next == (const struct sk_buff *)list;
71901 }
71902
71903 /**
71904@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
71905 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
71906 const struct sk_buff *skb)
71907 {
71908- return skb->prev == (struct sk_buff *)list;
71909+ return skb->prev == (const struct sk_buff *)list;
71910 }
71911
71912 /**
71913@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
71914 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
71915 */
71916 #ifndef NET_SKB_PAD
71917-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
71918+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
71919 #endif
71920
71921 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
71922@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
71923 int noblock, int *err);
71924 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
71925 struct poll_table_struct *wait);
71926-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
71927+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
71928 int offset, struct iovec *to,
71929 int size);
71930 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
71931@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
71932 nf_bridge_put(skb->nf_bridge);
71933 skb->nf_bridge = NULL;
71934 #endif
71935+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
71936+ skb->nf_trace = 0;
71937+#endif
71938 }
71939
71940 static inline void nf_reset_trace(struct sk_buff *skb)
71941diff --git a/include/linux/slab.h b/include/linux/slab.h
71942index 0c62175..9ece3d8 100644
71943--- a/include/linux/slab.h
71944+++ b/include/linux/slab.h
71945@@ -12,13 +12,20 @@
71946 #include <linux/gfp.h>
71947 #include <linux/types.h>
71948 #include <linux/workqueue.h>
71949-
71950+#include <linux/err.h>
71951
71952 /*
71953 * Flags to pass to kmem_cache_create().
71954 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
71955 */
71956 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
71957+
71958+#ifdef CONFIG_PAX_USERCOPY_SLABS
71959+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
71960+#else
71961+#define SLAB_USERCOPY 0x00000000UL
71962+#endif
71963+
71964 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
71965 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
71966 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
71967@@ -89,10 +96,13 @@
71968 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
71969 * Both make kfree a no-op.
71970 */
71971-#define ZERO_SIZE_PTR ((void *)16)
71972+#define ZERO_SIZE_PTR \
71973+({ \
71974+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
71975+ (void *)(-MAX_ERRNO-1L); \
71976+})
71977
71978-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
71979- (unsigned long)ZERO_SIZE_PTR)
71980+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
71981
71982
71983 struct mem_cgroup;
71984@@ -132,6 +142,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
71985 void kfree(const void *);
71986 void kzfree(const void *);
71987 size_t ksize(const void *);
71988+const char *check_heap_object(const void *ptr, unsigned long n);
71989+bool is_usercopy_object(const void *ptr);
71990
71991 /*
71992 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
71993@@ -164,7 +176,7 @@ struct kmem_cache {
71994 unsigned int align; /* Alignment as calculated */
71995 unsigned long flags; /* Active flags on the slab */
71996 const char *name; /* Slab name for sysfs */
71997- int refcount; /* Use counter */
71998+ atomic_t refcount; /* Use counter */
71999 void (*ctor)(void *); /* Called on object slot creation */
72000 struct list_head list; /* List of all slab caches on the system */
72001 };
72002@@ -226,6 +238,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
72003 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
72004 #endif
72005
72006+#ifdef CONFIG_PAX_USERCOPY_SLABS
72007+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
72008+#endif
72009+
72010 /*
72011 * Figure out which kmalloc slab an allocation of a certain size
72012 * belongs to.
72013@@ -234,7 +250,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
72014 * 2 = 120 .. 192 bytes
72015 * n = 2^(n-1) .. 2^n -1
72016 */
72017-static __always_inline int kmalloc_index(size_t size)
72018+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
72019 {
72020 if (!size)
72021 return 0;
72022@@ -406,6 +422,7 @@ void print_slabinfo_header(struct seq_file *m);
72023 * for general use, and so are not documented here. For a full list of
72024 * potential flags, always refer to linux/gfp.h.
72025 */
72026+
72027 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
72028 {
72029 if (size != 0 && n > SIZE_MAX / size)
72030@@ -465,7 +482,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
72031 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
72032 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
72033 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
72034-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
72035+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
72036 #define kmalloc_track_caller(size, flags) \
72037 __kmalloc_track_caller(size, flags, _RET_IP_)
72038 #else
72039@@ -485,7 +502,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
72040 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
72041 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
72042 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
72043-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
72044+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
72045 #define kmalloc_node_track_caller(size, flags, node) \
72046 __kmalloc_node_track_caller(size, flags, node, \
72047 _RET_IP_)
72048diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
72049index cd40158..d9dc02c 100644
72050--- a/include/linux/slab_def.h
72051+++ b/include/linux/slab_def.h
72052@@ -50,7 +50,7 @@ struct kmem_cache {
72053 /* 4) cache creation/removal */
72054 const char *name;
72055 struct list_head list;
72056- int refcount;
72057+ atomic_t refcount;
72058 int object_size;
72059 int align;
72060
72061@@ -66,10 +66,10 @@ struct kmem_cache {
72062 unsigned long node_allocs;
72063 unsigned long node_frees;
72064 unsigned long node_overflow;
72065- atomic_t allochit;
72066- atomic_t allocmiss;
72067- atomic_t freehit;
72068- atomic_t freemiss;
72069+ atomic_unchecked_t allochit;
72070+ atomic_unchecked_t allocmiss;
72071+ atomic_unchecked_t freehit;
72072+ atomic_unchecked_t freemiss;
72073
72074 /*
72075 * If debugging is enabled, then the allocator can add additional
72076@@ -103,7 +103,7 @@ struct kmem_cache {
72077 };
72078
72079 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
72080-void *__kmalloc(size_t size, gfp_t flags);
72081+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
72082
72083 #ifdef CONFIG_TRACING
72084 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
72085@@ -136,6 +136,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72086 cachep = kmalloc_dma_caches[i];
72087 else
72088 #endif
72089+
72090+#ifdef CONFIG_PAX_USERCOPY_SLABS
72091+ if (flags & GFP_USERCOPY)
72092+ cachep = kmalloc_usercopy_caches[i];
72093+ else
72094+#endif
72095+
72096 cachep = kmalloc_caches[i];
72097
72098 ret = kmem_cache_alloc_trace(cachep, flags, size);
72099@@ -146,7 +153,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72100 }
72101
72102 #ifdef CONFIG_NUMA
72103-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
72104+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72105 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72106
72107 #ifdef CONFIG_TRACING
72108@@ -185,6 +192,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
72109 cachep = kmalloc_dma_caches[i];
72110 else
72111 #endif
72112+
72113+#ifdef CONFIG_PAX_USERCOPY_SLABS
72114+ if (flags & GFP_USERCOPY)
72115+ cachep = kmalloc_usercopy_caches[i];
72116+ else
72117+#endif
72118+
72119 cachep = kmalloc_caches[i];
72120
72121 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
72122diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
72123index f28e14a..7831211 100644
72124--- a/include/linux/slob_def.h
72125+++ b/include/linux/slob_def.h
72126@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
72127 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
72128 }
72129
72130-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72131+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72132
72133 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
72134 {
72135@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72136 return __kmalloc_node(size, flags, NUMA_NO_NODE);
72137 }
72138
72139-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
72140+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
72141 {
72142 return kmalloc(size, flags);
72143 }
72144diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
72145index 027276f..092bfe8 100644
72146--- a/include/linux/slub_def.h
72147+++ b/include/linux/slub_def.h
72148@@ -80,7 +80,7 @@ struct kmem_cache {
72149 struct kmem_cache_order_objects max;
72150 struct kmem_cache_order_objects min;
72151 gfp_t allocflags; /* gfp flags to use on each alloc */
72152- int refcount; /* Refcount for slab cache destroy */
72153+ atomic_t refcount; /* Refcount for slab cache destroy */
72154 void (*ctor)(void *);
72155 int inuse; /* Offset to metadata */
72156 int align; /* Alignment */
72157@@ -105,7 +105,7 @@ struct kmem_cache {
72158 };
72159
72160 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
72161-void *__kmalloc(size_t size, gfp_t flags);
72162+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
72163
72164 static __always_inline void *
72165 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
72166@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
72167 }
72168 #endif
72169
72170-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
72171+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
72172 {
72173 unsigned int order = get_order(size);
72174 return kmalloc_order_trace(size, flags, order);
72175@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72176 }
72177
72178 #ifdef CONFIG_NUMA
72179-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72180+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72181 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72182
72183 #ifdef CONFIG_TRACING
72184diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
72185index 54f91d3..be2c379 100644
72186--- a/include/linux/sock_diag.h
72187+++ b/include/linux/sock_diag.h
72188@@ -11,7 +11,7 @@ struct sock;
72189 struct sock_diag_handler {
72190 __u8 family;
72191 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
72192-};
72193+} __do_const;
72194
72195 int sock_diag_register(const struct sock_diag_handler *h);
72196 void sock_diag_unregister(const struct sock_diag_handler *h);
72197diff --git a/include/linux/sonet.h b/include/linux/sonet.h
72198index 680f9a3..f13aeb0 100644
72199--- a/include/linux/sonet.h
72200+++ b/include/linux/sonet.h
72201@@ -7,7 +7,7 @@
72202 #include <uapi/linux/sonet.h>
72203
72204 struct k_sonet_stats {
72205-#define __HANDLE_ITEM(i) atomic_t i
72206+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72207 __SONET_ITEMS
72208 #undef __HANDLE_ITEM
72209 };
72210diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
72211index 07d8e53..dc934c9 100644
72212--- a/include/linux/sunrpc/addr.h
72213+++ b/include/linux/sunrpc/addr.h
72214@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
72215 {
72216 switch (sap->sa_family) {
72217 case AF_INET:
72218- return ntohs(((struct sockaddr_in *)sap)->sin_port);
72219+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
72220 case AF_INET6:
72221- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
72222+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
72223 }
72224 return 0;
72225 }
72226@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
72227 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
72228 const struct sockaddr *src)
72229 {
72230- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
72231+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
72232 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
72233
72234 dsin->sin_family = ssin->sin_family;
72235@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
72236 if (sa->sa_family != AF_INET6)
72237 return 0;
72238
72239- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
72240+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
72241 }
72242
72243 #endif /* _LINUX_SUNRPC_ADDR_H */
72244diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
72245index bfe11be..12bc8c4 100644
72246--- a/include/linux/sunrpc/clnt.h
72247+++ b/include/linux/sunrpc/clnt.h
72248@@ -96,7 +96,7 @@ struct rpc_procinfo {
72249 unsigned int p_timer; /* Which RTT timer to use */
72250 u32 p_statidx; /* Which procedure to account */
72251 const char * p_name; /* name of procedure */
72252-};
72253+} __do_const;
72254
72255 #ifdef __KERNEL__
72256
72257diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
72258index 1f0216b..6a4fa50 100644
72259--- a/include/linux/sunrpc/svc.h
72260+++ b/include/linux/sunrpc/svc.h
72261@@ -411,7 +411,7 @@ struct svc_procedure {
72262 unsigned int pc_count; /* call count */
72263 unsigned int pc_cachetype; /* cache info (NFS) */
72264 unsigned int pc_xdrressize; /* maximum size of XDR reply */
72265-};
72266+} __do_const;
72267
72268 /*
72269 * Function prototypes.
72270diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
72271index 0b8e3e6..33e0a01 100644
72272--- a/include/linux/sunrpc/svc_rdma.h
72273+++ b/include/linux/sunrpc/svc_rdma.h
72274@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
72275 extern unsigned int svcrdma_max_requests;
72276 extern unsigned int svcrdma_max_req_size;
72277
72278-extern atomic_t rdma_stat_recv;
72279-extern atomic_t rdma_stat_read;
72280-extern atomic_t rdma_stat_write;
72281-extern atomic_t rdma_stat_sq_starve;
72282-extern atomic_t rdma_stat_rq_starve;
72283-extern atomic_t rdma_stat_rq_poll;
72284-extern atomic_t rdma_stat_rq_prod;
72285-extern atomic_t rdma_stat_sq_poll;
72286-extern atomic_t rdma_stat_sq_prod;
72287+extern atomic_unchecked_t rdma_stat_recv;
72288+extern atomic_unchecked_t rdma_stat_read;
72289+extern atomic_unchecked_t rdma_stat_write;
72290+extern atomic_unchecked_t rdma_stat_sq_starve;
72291+extern atomic_unchecked_t rdma_stat_rq_starve;
72292+extern atomic_unchecked_t rdma_stat_rq_poll;
72293+extern atomic_unchecked_t rdma_stat_rq_prod;
72294+extern atomic_unchecked_t rdma_stat_sq_poll;
72295+extern atomic_unchecked_t rdma_stat_sq_prod;
72296
72297 #define RPCRDMA_VERSION 1
72298
72299diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
72300index ff374ab..7fd2ecb 100644
72301--- a/include/linux/sunrpc/svcauth.h
72302+++ b/include/linux/sunrpc/svcauth.h
72303@@ -109,7 +109,7 @@ struct auth_ops {
72304 int (*release)(struct svc_rqst *rq);
72305 void (*domain_release)(struct auth_domain *);
72306 int (*set_client)(struct svc_rqst *rq);
72307-};
72308+} __do_const;
72309
72310 #define SVC_GARBAGE 1
72311 #define SVC_SYSERR 2
72312diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
72313index a5ffd32..0935dea 100644
72314--- a/include/linux/swiotlb.h
72315+++ b/include/linux/swiotlb.h
72316@@ -60,7 +60,8 @@ extern void
72317
72318 extern void
72319 swiotlb_free_coherent(struct device *hwdev, size_t size,
72320- void *vaddr, dma_addr_t dma_handle);
72321+ void *vaddr, dma_addr_t dma_handle,
72322+ struct dma_attrs *attrs);
72323
72324 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
72325 unsigned long offset, size_t size,
72326diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
72327index 4147d70..31a1791 100644
72328--- a/include/linux/syscalls.h
72329+++ b/include/linux/syscalls.h
72330@@ -362,11 +362,11 @@ asmlinkage long sys_sync(void);
72331 asmlinkage long sys_fsync(unsigned int fd);
72332 asmlinkage long sys_fdatasync(unsigned int fd);
72333 asmlinkage long sys_bdflush(int func, long data);
72334-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
72335- char __user *type, unsigned long flags,
72336+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
72337+ const char __user *type, unsigned long flags,
72338 void __user *data);
72339-asmlinkage long sys_umount(char __user *name, int flags);
72340-asmlinkage long sys_oldumount(char __user *name);
72341+asmlinkage long sys_umount(const char __user *name, int flags);
72342+asmlinkage long sys_oldumount(const char __user *name);
72343 asmlinkage long sys_truncate(const char __user *path, long length);
72344 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
72345 asmlinkage long sys_stat(const char __user *filename,
72346@@ -578,7 +578,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
72347 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
72348 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
72349 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
72350- struct sockaddr __user *, int);
72351+ struct sockaddr __user *, int) __intentional_overflow(0);
72352 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
72353 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
72354 unsigned int vlen, unsigned flags);
72355diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
72356index 27b3b0b..e093dd9 100644
72357--- a/include/linux/syscore_ops.h
72358+++ b/include/linux/syscore_ops.h
72359@@ -16,7 +16,7 @@ struct syscore_ops {
72360 int (*suspend)(void);
72361 void (*resume)(void);
72362 void (*shutdown)(void);
72363-};
72364+} __do_const;
72365
72366 extern void register_syscore_ops(struct syscore_ops *ops);
72367 extern void unregister_syscore_ops(struct syscore_ops *ops);
72368diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
72369index 14a8ff2..af52bad 100644
72370--- a/include/linux/sysctl.h
72371+++ b/include/linux/sysctl.h
72372@@ -34,13 +34,13 @@ struct ctl_table_root;
72373 struct ctl_table_header;
72374 struct ctl_dir;
72375
72376-typedef struct ctl_table ctl_table;
72377-
72378 typedef int proc_handler (struct ctl_table *ctl, int write,
72379 void __user *buffer, size_t *lenp, loff_t *ppos);
72380
72381 extern int proc_dostring(struct ctl_table *, int,
72382 void __user *, size_t *, loff_t *);
72383+extern int proc_dostring_modpriv(struct ctl_table *, int,
72384+ void __user *, size_t *, loff_t *);
72385 extern int proc_dointvec(struct ctl_table *, int,
72386 void __user *, size_t *, loff_t *);
72387 extern int proc_dointvec_minmax(struct ctl_table *, int,
72388@@ -115,7 +115,9 @@ struct ctl_table
72389 struct ctl_table_poll *poll;
72390 void *extra1;
72391 void *extra2;
72392-};
72393+} __do_const;
72394+typedef struct ctl_table __no_const ctl_table_no_const;
72395+typedef struct ctl_table ctl_table;
72396
72397 struct ctl_node {
72398 struct rb_node node;
72399diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
72400index e2cee22..3ddb921 100644
72401--- a/include/linux/sysfs.h
72402+++ b/include/linux/sysfs.h
72403@@ -31,7 +31,8 @@ struct attribute {
72404 struct lock_class_key *key;
72405 struct lock_class_key skey;
72406 #endif
72407-};
72408+} __do_const;
72409+typedef struct attribute __no_const attribute_no_const;
72410
72411 /**
72412 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
72413@@ -59,8 +60,8 @@ struct attribute_group {
72414 umode_t (*is_visible)(struct kobject *,
72415 struct attribute *, int);
72416 struct attribute **attrs;
72417-};
72418-
72419+} __do_const;
72420+typedef struct attribute_group __no_const attribute_group_no_const;
72421
72422
72423 /**
72424@@ -107,7 +108,8 @@ struct bin_attribute {
72425 char *, loff_t, size_t);
72426 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
72427 struct vm_area_struct *vma);
72428-};
72429+} __do_const;
72430+typedef struct bin_attribute __no_const bin_attribute_no_const;
72431
72432 /**
72433 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
72434diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
72435index 7faf933..9b85a0c 100644
72436--- a/include/linux/sysrq.h
72437+++ b/include/linux/sysrq.h
72438@@ -16,6 +16,7 @@
72439
72440 #include <linux/errno.h>
72441 #include <linux/types.h>
72442+#include <linux/compiler.h>
72443
72444 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
72445 #define SYSRQ_DEFAULT_ENABLE 1
72446@@ -36,7 +37,7 @@ struct sysrq_key_op {
72447 char *help_msg;
72448 char *action_msg;
72449 int enable_mask;
72450-};
72451+} __do_const;
72452
72453 #ifdef CONFIG_MAGIC_SYSRQ
72454
72455diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
72456index e7e0473..7989295 100644
72457--- a/include/linux/thread_info.h
72458+++ b/include/linux/thread_info.h
72459@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
72460 #error "no set_restore_sigmask() provided and default one won't work"
72461 #endif
72462
72463+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
72464+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
72465+{
72466+#ifndef CONFIG_PAX_USERCOPY_DEBUG
72467+ if (!__builtin_constant_p(n))
72468+#endif
72469+ __check_object_size(ptr, n, to_user);
72470+}
72471+
72472 #endif /* __KERNEL__ */
72473
72474 #endif /* _LINUX_THREAD_INFO_H */
72475diff --git a/include/linux/tty.h b/include/linux/tty.h
72476index 8780bd2..d1ae08b 100644
72477--- a/include/linux/tty.h
72478+++ b/include/linux/tty.h
72479@@ -194,7 +194,7 @@ struct tty_port {
72480 const struct tty_port_operations *ops; /* Port operations */
72481 spinlock_t lock; /* Lock protecting tty field */
72482 int blocked_open; /* Waiting to open */
72483- int count; /* Usage count */
72484+ atomic_t count; /* Usage count */
72485 wait_queue_head_t open_wait; /* Open waiters */
72486 wait_queue_head_t close_wait; /* Close waiters */
72487 wait_queue_head_t delta_msr_wait; /* Modem status change */
72488@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
72489 struct tty_struct *tty, struct file *filp);
72490 static inline int tty_port_users(struct tty_port *port)
72491 {
72492- return port->count + port->blocked_open;
72493+ return atomic_read(&port->count) + port->blocked_open;
72494 }
72495
72496 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
72497diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
72498index 756a609..b302dd6 100644
72499--- a/include/linux/tty_driver.h
72500+++ b/include/linux/tty_driver.h
72501@@ -285,7 +285,7 @@ struct tty_operations {
72502 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
72503 #endif
72504 const struct file_operations *proc_fops;
72505-};
72506+} __do_const;
72507
72508 struct tty_driver {
72509 int magic; /* magic number for this structure */
72510diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
72511index 58390c7..95e214c 100644
72512--- a/include/linux/tty_ldisc.h
72513+++ b/include/linux/tty_ldisc.h
72514@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
72515
72516 struct module *owner;
72517
72518- int refcount;
72519+ atomic_t refcount;
72520 };
72521
72522 struct tty_ldisc {
72523diff --git a/include/linux/types.h b/include/linux/types.h
72524index 4d118ba..c3ee9bf 100644
72525--- a/include/linux/types.h
72526+++ b/include/linux/types.h
72527@@ -176,10 +176,26 @@ typedef struct {
72528 int counter;
72529 } atomic_t;
72530
72531+#ifdef CONFIG_PAX_REFCOUNT
72532+typedef struct {
72533+ int counter;
72534+} atomic_unchecked_t;
72535+#else
72536+typedef atomic_t atomic_unchecked_t;
72537+#endif
72538+
72539 #ifdef CONFIG_64BIT
72540 typedef struct {
72541 long counter;
72542 } atomic64_t;
72543+
72544+#ifdef CONFIG_PAX_REFCOUNT
72545+typedef struct {
72546+ long counter;
72547+} atomic64_unchecked_t;
72548+#else
72549+typedef atomic64_t atomic64_unchecked_t;
72550+#endif
72551 #endif
72552
72553 struct list_head {
72554diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
72555index 5ca0951..ab496a5 100644
72556--- a/include/linux/uaccess.h
72557+++ b/include/linux/uaccess.h
72558@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
72559 long ret; \
72560 mm_segment_t old_fs = get_fs(); \
72561 \
72562- set_fs(KERNEL_DS); \
72563 pagefault_disable(); \
72564- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
72565- pagefault_enable(); \
72566+ set_fs(KERNEL_DS); \
72567+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
72568 set_fs(old_fs); \
72569+ pagefault_enable(); \
72570 ret; \
72571 })
72572
72573diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
72574index 8e522cbc..aa8572d 100644
72575--- a/include/linux/uidgid.h
72576+++ b/include/linux/uidgid.h
72577@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
72578
72579 #endif /* CONFIG_USER_NS */
72580
72581+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
72582+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
72583+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
72584+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
72585+
72586 #endif /* _LINUX_UIDGID_H */
72587diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
72588index 99c1b4d..562e6f3 100644
72589--- a/include/linux/unaligned/access_ok.h
72590+++ b/include/linux/unaligned/access_ok.h
72591@@ -4,34 +4,34 @@
72592 #include <linux/kernel.h>
72593 #include <asm/byteorder.h>
72594
72595-static inline u16 get_unaligned_le16(const void *p)
72596+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
72597 {
72598- return le16_to_cpup((__le16 *)p);
72599+ return le16_to_cpup((const __le16 *)p);
72600 }
72601
72602-static inline u32 get_unaligned_le32(const void *p)
72603+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
72604 {
72605- return le32_to_cpup((__le32 *)p);
72606+ return le32_to_cpup((const __le32 *)p);
72607 }
72608
72609-static inline u64 get_unaligned_le64(const void *p)
72610+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
72611 {
72612- return le64_to_cpup((__le64 *)p);
72613+ return le64_to_cpup((const __le64 *)p);
72614 }
72615
72616-static inline u16 get_unaligned_be16(const void *p)
72617+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
72618 {
72619- return be16_to_cpup((__be16 *)p);
72620+ return be16_to_cpup((const __be16 *)p);
72621 }
72622
72623-static inline u32 get_unaligned_be32(const void *p)
72624+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
72625 {
72626- return be32_to_cpup((__be32 *)p);
72627+ return be32_to_cpup((const __be32 *)p);
72628 }
72629
72630-static inline u64 get_unaligned_be64(const void *p)
72631+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
72632 {
72633- return be64_to_cpup((__be64 *)p);
72634+ return be64_to_cpup((const __be64 *)p);
72635 }
72636
72637 static inline void put_unaligned_le16(u16 val, void *p)
72638diff --git a/include/linux/usb.h b/include/linux/usb.h
72639index a0bee5a..5533a52 100644
72640--- a/include/linux/usb.h
72641+++ b/include/linux/usb.h
72642@@ -552,7 +552,7 @@ struct usb_device {
72643 int maxchild;
72644
72645 u32 quirks;
72646- atomic_t urbnum;
72647+ atomic_unchecked_t urbnum;
72648
72649 unsigned long active_duration;
72650
72651@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
72652
72653 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
72654 __u8 request, __u8 requesttype, __u16 value, __u16 index,
72655- void *data, __u16 size, int timeout);
72656+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
72657 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
72658 void *data, int len, int *actual_length, int timeout);
72659 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
72660diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
72661index e452ba6..78f8e80 100644
72662--- a/include/linux/usb/renesas_usbhs.h
72663+++ b/include/linux/usb/renesas_usbhs.h
72664@@ -39,7 +39,7 @@ enum {
72665 */
72666 struct renesas_usbhs_driver_callback {
72667 int (*notify_hotplug)(struct platform_device *pdev);
72668-};
72669+} __no_const;
72670
72671 /*
72672 * callback functions for platform
72673diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72674index 6f8fbcf..8259001 100644
72675--- a/include/linux/vermagic.h
72676+++ b/include/linux/vermagic.h
72677@@ -25,9 +25,35 @@
72678 #define MODULE_ARCH_VERMAGIC ""
72679 #endif
72680
72681+#ifdef CONFIG_PAX_REFCOUNT
72682+#define MODULE_PAX_REFCOUNT "REFCOUNT "
72683+#else
72684+#define MODULE_PAX_REFCOUNT ""
72685+#endif
72686+
72687+#ifdef CONSTIFY_PLUGIN
72688+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
72689+#else
72690+#define MODULE_CONSTIFY_PLUGIN ""
72691+#endif
72692+
72693+#ifdef STACKLEAK_PLUGIN
72694+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
72695+#else
72696+#define MODULE_STACKLEAK_PLUGIN ""
72697+#endif
72698+
72699+#ifdef CONFIG_GRKERNSEC
72700+#define MODULE_GRSEC "GRSEC "
72701+#else
72702+#define MODULE_GRSEC ""
72703+#endif
72704+
72705 #define VERMAGIC_STRING \
72706 UTS_RELEASE " " \
72707 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
72708 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
72709- MODULE_ARCH_VERMAGIC
72710+ MODULE_ARCH_VERMAGIC \
72711+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
72712+ MODULE_GRSEC
72713
72714diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
72715index 7d5773a..541c01c 100644
72716--- a/include/linux/vmalloc.h
72717+++ b/include/linux/vmalloc.h
72718@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
72719 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
72720 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
72721 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
72722+
72723+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72724+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
72725+#endif
72726+
72727 /* bits [20..32] reserved for arch specific ioremap internals */
72728
72729 /*
72730@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
72731 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
72732 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
72733 unsigned long start, unsigned long end, gfp_t gfp_mask,
72734- pgprot_t prot, int node, const void *caller);
72735+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
72736 extern void vfree(const void *addr);
72737
72738 extern void *vmap(struct page **pages, unsigned int count,
72739@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
72740 extern void free_vm_area(struct vm_struct *area);
72741
72742 /* for /dev/kmem */
72743-extern long vread(char *buf, char *addr, unsigned long count);
72744-extern long vwrite(char *buf, char *addr, unsigned long count);
72745+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
72746+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
72747
72748 /*
72749 * Internals. Dont't use..
72750diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
72751index c586679..f06b389 100644
72752--- a/include/linux/vmstat.h
72753+++ b/include/linux/vmstat.h
72754@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
72755 /*
72756 * Zone based page accounting with per cpu differentials.
72757 */
72758-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72759+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72760
72761 static inline void zone_page_state_add(long x, struct zone *zone,
72762 enum zone_stat_item item)
72763 {
72764- atomic_long_add(x, &zone->vm_stat[item]);
72765- atomic_long_add(x, &vm_stat[item]);
72766+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
72767+ atomic_long_add_unchecked(x, &vm_stat[item]);
72768 }
72769
72770 static inline unsigned long global_page_state(enum zone_stat_item item)
72771 {
72772- long x = atomic_long_read(&vm_stat[item]);
72773+ long x = atomic_long_read_unchecked(&vm_stat[item]);
72774 #ifdef CONFIG_SMP
72775 if (x < 0)
72776 x = 0;
72777@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
72778 static inline unsigned long zone_page_state(struct zone *zone,
72779 enum zone_stat_item item)
72780 {
72781- long x = atomic_long_read(&zone->vm_stat[item]);
72782+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72783 #ifdef CONFIG_SMP
72784 if (x < 0)
72785 x = 0;
72786@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
72787 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
72788 enum zone_stat_item item)
72789 {
72790- long x = atomic_long_read(&zone->vm_stat[item]);
72791+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72792
72793 #ifdef CONFIG_SMP
72794 int cpu;
72795@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
72796
72797 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
72798 {
72799- atomic_long_inc(&zone->vm_stat[item]);
72800- atomic_long_inc(&vm_stat[item]);
72801+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
72802+ atomic_long_inc_unchecked(&vm_stat[item]);
72803 }
72804
72805 static inline void __inc_zone_page_state(struct page *page,
72806@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
72807
72808 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
72809 {
72810- atomic_long_dec(&zone->vm_stat[item]);
72811- atomic_long_dec(&vm_stat[item]);
72812+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
72813+ atomic_long_dec_unchecked(&vm_stat[item]);
72814 }
72815
72816 static inline void __dec_zone_page_state(struct page *page,
72817diff --git a/include/linux/xattr.h b/include/linux/xattr.h
72818index fdbafc6..49dfe4f 100644
72819--- a/include/linux/xattr.h
72820+++ b/include/linux/xattr.h
72821@@ -28,7 +28,7 @@ struct xattr_handler {
72822 size_t size, int handler_flags);
72823 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
72824 size_t size, int flags, int handler_flags);
72825-};
72826+} __do_const;
72827
72828 struct xattr {
72829 char *name;
72830@@ -37,6 +37,9 @@ struct xattr {
72831 };
72832
72833 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
72834+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
72835+ssize_t pax_getxattr(struct dentry *, void *, size_t);
72836+#endif
72837 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
72838 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
72839 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
72840diff --git a/include/linux/zlib.h b/include/linux/zlib.h
72841index 9c5a6b4..09c9438 100644
72842--- a/include/linux/zlib.h
72843+++ b/include/linux/zlib.h
72844@@ -31,6 +31,7 @@
72845 #define _ZLIB_H
72846
72847 #include <linux/zconf.h>
72848+#include <linux/compiler.h>
72849
72850 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
72851 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
72852@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
72853
72854 /* basic functions */
72855
72856-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
72857+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
72858 /*
72859 Returns the number of bytes that needs to be allocated for a per-
72860 stream workspace with the specified parameters. A pointer to this
72861diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
72862index 95d1c91..6798cca 100644
72863--- a/include/media/v4l2-dev.h
72864+++ b/include/media/v4l2-dev.h
72865@@ -76,7 +76,7 @@ struct v4l2_file_operations {
72866 int (*mmap) (struct file *, struct vm_area_struct *);
72867 int (*open) (struct file *);
72868 int (*release) (struct file *);
72869-};
72870+} __do_const;
72871
72872 /*
72873 * Newer version of video_device, handled by videodev2.c
72874diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
72875index adcbb20..62c2559 100644
72876--- a/include/net/9p/transport.h
72877+++ b/include/net/9p/transport.h
72878@@ -57,7 +57,7 @@ struct p9_trans_module {
72879 int (*cancel) (struct p9_client *, struct p9_req_t *req);
72880 int (*zc_request)(struct p9_client *, struct p9_req_t *,
72881 char *, char *, int , int, int, int);
72882-};
72883+} __do_const;
72884
72885 void v9fs_register_trans(struct p9_trans_module *m);
72886 void v9fs_unregister_trans(struct p9_trans_module *m);
72887diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
72888index fb94cf1..7c0c987 100644
72889--- a/include/net/bluetooth/l2cap.h
72890+++ b/include/net/bluetooth/l2cap.h
72891@@ -551,7 +551,7 @@ struct l2cap_ops {
72892 void (*defer) (struct l2cap_chan *chan);
72893 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
72894 unsigned long len, int nb);
72895-};
72896+} __do_const;
72897
72898 struct l2cap_conn {
72899 struct hci_conn *hcon;
72900diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
72901index f2ae33d..c457cf0 100644
72902--- a/include/net/caif/cfctrl.h
72903+++ b/include/net/caif/cfctrl.h
72904@@ -52,7 +52,7 @@ struct cfctrl_rsp {
72905 void (*radioset_rsp)(void);
72906 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
72907 struct cflayer *client_layer);
72908-};
72909+} __no_const;
72910
72911 /* Link Setup Parameters for CAIF-Links. */
72912 struct cfctrl_link_param {
72913@@ -101,8 +101,8 @@ struct cfctrl_request_info {
72914 struct cfctrl {
72915 struct cfsrvl serv;
72916 struct cfctrl_rsp res;
72917- atomic_t req_seq_no;
72918- atomic_t rsp_seq_no;
72919+ atomic_unchecked_t req_seq_no;
72920+ atomic_unchecked_t rsp_seq_no;
72921 struct list_head list;
72922 /* Protects from simultaneous access to first_req list */
72923 spinlock_t info_list_lock;
72924diff --git a/include/net/flow.h b/include/net/flow.h
72925index 628e11b..4c475df 100644
72926--- a/include/net/flow.h
72927+++ b/include/net/flow.h
72928@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
72929
72930 extern void flow_cache_flush(void);
72931 extern void flow_cache_flush_deferred(void);
72932-extern atomic_t flow_cache_genid;
72933+extern atomic_unchecked_t flow_cache_genid;
72934
72935 #endif
72936diff --git a/include/net/genetlink.h b/include/net/genetlink.h
72937index 93024a4..eeb6b6e 100644
72938--- a/include/net/genetlink.h
72939+++ b/include/net/genetlink.h
72940@@ -119,7 +119,7 @@ struct genl_ops {
72941 struct netlink_callback *cb);
72942 int (*done)(struct netlink_callback *cb);
72943 struct list_head ops_list;
72944-};
72945+} __do_const;
72946
72947 extern int genl_register_family(struct genl_family *family);
72948 extern int genl_register_family_with_ops(struct genl_family *family,
72949diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
72950index 734d9b5..48a9a4b 100644
72951--- a/include/net/gro_cells.h
72952+++ b/include/net/gro_cells.h
72953@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
72954 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
72955
72956 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
72957- atomic_long_inc(&dev->rx_dropped);
72958+ atomic_long_inc_unchecked(&dev->rx_dropped);
72959 kfree_skb(skb);
72960 return;
72961 }
72962diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
72963index de2c785..0588a6b 100644
72964--- a/include/net/inet_connection_sock.h
72965+++ b/include/net/inet_connection_sock.h
72966@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
72967 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
72968 int (*bind_conflict)(const struct sock *sk,
72969 const struct inet_bind_bucket *tb, bool relax);
72970-};
72971+} __do_const;
72972
72973 /** inet_connection_sock - INET connection oriented sock
72974 *
72975diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
72976index 53f464d..ba76aaa 100644
72977--- a/include/net/inetpeer.h
72978+++ b/include/net/inetpeer.h
72979@@ -47,8 +47,8 @@ struct inet_peer {
72980 */
72981 union {
72982 struct {
72983- atomic_t rid; /* Frag reception counter */
72984- atomic_t ip_id_count; /* IP ID for the next packet */
72985+ atomic_unchecked_t rid; /* Frag reception counter */
72986+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
72987 };
72988 struct rcu_head rcu;
72989 struct inet_peer *gc_next;
72990@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
72991 more++;
72992 inet_peer_refcheck(p);
72993 do {
72994- old = atomic_read(&p->ip_id_count);
72995+ old = atomic_read_unchecked(&p->ip_id_count);
72996 new = old + more;
72997 if (!new)
72998 new = 1;
72999- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
73000+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
73001 return new;
73002 }
73003
73004diff --git a/include/net/ip.h b/include/net/ip.h
73005index a68f838..74518ab 100644
73006--- a/include/net/ip.h
73007+++ b/include/net/ip.h
73008@@ -202,7 +202,7 @@ extern struct local_ports {
73009 } sysctl_local_ports;
73010 extern void inet_get_local_port_range(int *low, int *high);
73011
73012-extern unsigned long *sysctl_local_reserved_ports;
73013+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
73014 static inline int inet_is_reserved_local_port(int port)
73015 {
73016 return test_bit(port, sysctl_local_reserved_ports);
73017diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
73018index e49db91..76a81de 100644
73019--- a/include/net/ip_fib.h
73020+++ b/include/net/ip_fib.h
73021@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
73022
73023 #define FIB_RES_SADDR(net, res) \
73024 ((FIB_RES_NH(res).nh_saddr_genid == \
73025- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
73026+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
73027 FIB_RES_NH(res).nh_saddr : \
73028 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
73029 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
73030diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
73031index 4c062cc..3562c31 100644
73032--- a/include/net/ip_vs.h
73033+++ b/include/net/ip_vs.h
73034@@ -612,7 +612,7 @@ struct ip_vs_conn {
73035 struct ip_vs_conn *control; /* Master control connection */
73036 atomic_t n_control; /* Number of controlled ones */
73037 struct ip_vs_dest *dest; /* real server */
73038- atomic_t in_pkts; /* incoming packet counter */
73039+ atomic_unchecked_t in_pkts; /* incoming packet counter */
73040
73041 /* packet transmitter for different forwarding methods. If it
73042 mangles the packet, it must return NF_DROP or better NF_STOLEN,
73043@@ -761,7 +761,7 @@ struct ip_vs_dest {
73044 __be16 port; /* port number of the server */
73045 union nf_inet_addr addr; /* IP address of the server */
73046 volatile unsigned int flags; /* dest status flags */
73047- atomic_t conn_flags; /* flags to copy to conn */
73048+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
73049 atomic_t weight; /* server weight */
73050
73051 atomic_t refcnt; /* reference counter */
73052@@ -1013,11 +1013,11 @@ struct netns_ipvs {
73053 /* ip_vs_lblc */
73054 int sysctl_lblc_expiration;
73055 struct ctl_table_header *lblc_ctl_header;
73056- struct ctl_table *lblc_ctl_table;
73057+ ctl_table_no_const *lblc_ctl_table;
73058 /* ip_vs_lblcr */
73059 int sysctl_lblcr_expiration;
73060 struct ctl_table_header *lblcr_ctl_header;
73061- struct ctl_table *lblcr_ctl_table;
73062+ ctl_table_no_const *lblcr_ctl_table;
73063 /* ip_vs_est */
73064 struct list_head est_list; /* estimator list */
73065 spinlock_t est_lock;
73066diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
73067index 80ffde3..968b0f4 100644
73068--- a/include/net/irda/ircomm_tty.h
73069+++ b/include/net/irda/ircomm_tty.h
73070@@ -35,6 +35,7 @@
73071 #include <linux/termios.h>
73072 #include <linux/timer.h>
73073 #include <linux/tty.h> /* struct tty_struct */
73074+#include <asm/local.h>
73075
73076 #include <net/irda/irias_object.h>
73077 #include <net/irda/ircomm_core.h>
73078diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
73079index 714cc9a..ea05f3e 100644
73080--- a/include/net/iucv/af_iucv.h
73081+++ b/include/net/iucv/af_iucv.h
73082@@ -149,7 +149,7 @@ struct iucv_skb_cb {
73083 struct iucv_sock_list {
73084 struct hlist_head head;
73085 rwlock_t lock;
73086- atomic_t autobind_name;
73087+ atomic_unchecked_t autobind_name;
73088 };
73089
73090 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
73091diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
73092index df83f69..9b640b8 100644
73093--- a/include/net/llc_c_ac.h
73094+++ b/include/net/llc_c_ac.h
73095@@ -87,7 +87,7 @@
73096 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
73097 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
73098
73099-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
73100+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
73101
73102 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
73103 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
73104diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
73105index 6ca3113..f8026dd 100644
73106--- a/include/net/llc_c_ev.h
73107+++ b/include/net/llc_c_ev.h
73108@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
73109 return (struct llc_conn_state_ev *)skb->cb;
73110 }
73111
73112-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
73113-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
73114+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
73115+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
73116
73117 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
73118 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
73119diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
73120index 0e79cfb..f46db31 100644
73121--- a/include/net/llc_c_st.h
73122+++ b/include/net/llc_c_st.h
73123@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
73124 u8 next_state;
73125 llc_conn_ev_qfyr_t *ev_qualifiers;
73126 llc_conn_action_t *ev_actions;
73127-};
73128+} __do_const;
73129
73130 struct llc_conn_state {
73131 u8 current_state;
73132diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
73133index 37a3bbd..55a4241 100644
73134--- a/include/net/llc_s_ac.h
73135+++ b/include/net/llc_s_ac.h
73136@@ -23,7 +23,7 @@
73137 #define SAP_ACT_TEST_IND 9
73138
73139 /* All action functions must look like this */
73140-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
73141+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
73142
73143 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
73144 struct sk_buff *skb);
73145diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
73146index 567c681..cd73ac0 100644
73147--- a/include/net/llc_s_st.h
73148+++ b/include/net/llc_s_st.h
73149@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
73150 llc_sap_ev_t ev;
73151 u8 next_state;
73152 llc_sap_action_t *ev_actions;
73153-};
73154+} __do_const;
73155
73156 struct llc_sap_state {
73157 u8 curr_state;
73158diff --git a/include/net/mac80211.h b/include/net/mac80211.h
73159index 885898a..cdace34 100644
73160--- a/include/net/mac80211.h
73161+++ b/include/net/mac80211.h
73162@@ -4205,7 +4205,7 @@ struct rate_control_ops {
73163 void (*add_sta_debugfs)(void *priv, void *priv_sta,
73164 struct dentry *dir);
73165 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
73166-};
73167+} __do_const;
73168
73169 static inline int rate_supported(struct ieee80211_sta *sta,
73170 enum ieee80211_band band,
73171diff --git a/include/net/neighbour.h b/include/net/neighbour.h
73172index 7e748ad..5c6229b 100644
73173--- a/include/net/neighbour.h
73174+++ b/include/net/neighbour.h
73175@@ -123,7 +123,7 @@ struct neigh_ops {
73176 void (*error_report)(struct neighbour *, struct sk_buff *);
73177 int (*output)(struct neighbour *, struct sk_buff *);
73178 int (*connected_output)(struct neighbour *, struct sk_buff *);
73179-};
73180+} __do_const;
73181
73182 struct pneigh_entry {
73183 struct pneigh_entry *next;
73184diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
73185index b176978..ea169f4 100644
73186--- a/include/net/net_namespace.h
73187+++ b/include/net/net_namespace.h
73188@@ -117,7 +117,7 @@ struct net {
73189 #endif
73190 struct netns_ipvs *ipvs;
73191 struct sock *diag_nlsk;
73192- atomic_t rt_genid;
73193+ atomic_unchecked_t rt_genid;
73194 };
73195
73196 /*
73197@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
73198 #define __net_init __init
73199 #define __net_exit __exit_refok
73200 #define __net_initdata __initdata
73201+#ifdef CONSTIFY_PLUGIN
73202 #define __net_initconst __initconst
73203+#else
73204+#define __net_initconst __initdata
73205+#endif
73206 #endif
73207
73208 struct pernet_operations {
73209@@ -284,7 +288,7 @@ struct pernet_operations {
73210 void (*exit_batch)(struct list_head *net_exit_list);
73211 int *id;
73212 size_t size;
73213-};
73214+} __do_const;
73215
73216 /*
73217 * Use these carefully. If you implement a network device and it
73218@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
73219
73220 static inline int rt_genid(struct net *net)
73221 {
73222- return atomic_read(&net->rt_genid);
73223+ return atomic_read_unchecked(&net->rt_genid);
73224 }
73225
73226 static inline void rt_genid_bump(struct net *net)
73227 {
73228- atomic_inc(&net->rt_genid);
73229+ atomic_inc_unchecked(&net->rt_genid);
73230 }
73231
73232 #endif /* __NET_NET_NAMESPACE_H */
73233diff --git a/include/net/netdma.h b/include/net/netdma.h
73234index 8ba8ce2..99b7fff 100644
73235--- a/include/net/netdma.h
73236+++ b/include/net/netdma.h
73237@@ -24,7 +24,7 @@
73238 #include <linux/dmaengine.h>
73239 #include <linux/skbuff.h>
73240
73241-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73242+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73243 struct sk_buff *skb, int offset, struct iovec *to,
73244 size_t len, struct dma_pinned_list *pinned_list);
73245
73246diff --git a/include/net/netlink.h b/include/net/netlink.h
73247index 9690b0f..87aded7 100644
73248--- a/include/net/netlink.h
73249+++ b/include/net/netlink.h
73250@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
73251 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
73252 {
73253 if (mark)
73254- skb_trim(skb, (unsigned char *) mark - skb->data);
73255+ skb_trim(skb, (const unsigned char *) mark - skb->data);
73256 }
73257
73258 /**
73259diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
73260index c9c0c53..53f24c3 100644
73261--- a/include/net/netns/conntrack.h
73262+++ b/include/net/netns/conntrack.h
73263@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
73264 struct nf_proto_net {
73265 #ifdef CONFIG_SYSCTL
73266 struct ctl_table_header *ctl_table_header;
73267- struct ctl_table *ctl_table;
73268+ ctl_table_no_const *ctl_table;
73269 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
73270 struct ctl_table_header *ctl_compat_header;
73271- struct ctl_table *ctl_compat_table;
73272+ ctl_table_no_const *ctl_compat_table;
73273 #endif
73274 #endif
73275 unsigned int users;
73276@@ -58,7 +58,7 @@ struct nf_ip_net {
73277 struct nf_icmp_net icmpv6;
73278 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
73279 struct ctl_table_header *ctl_table_header;
73280- struct ctl_table *ctl_table;
73281+ ctl_table_no_const *ctl_table;
73282 #endif
73283 };
73284
73285diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
73286index 2ba9de8..47bd6c7 100644
73287--- a/include/net/netns/ipv4.h
73288+++ b/include/net/netns/ipv4.h
73289@@ -67,7 +67,7 @@ struct netns_ipv4 {
73290 kgid_t sysctl_ping_group_range[2];
73291 long sysctl_tcp_mem[3];
73292
73293- atomic_t dev_addr_genid;
73294+ atomic_unchecked_t dev_addr_genid;
73295
73296 #ifdef CONFIG_IP_MROUTE
73297 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
73298diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
73299index 005e2c2..023d340 100644
73300--- a/include/net/netns/ipv6.h
73301+++ b/include/net/netns/ipv6.h
73302@@ -71,7 +71,7 @@ struct netns_ipv6 {
73303 struct fib_rules_ops *mr6_rules_ops;
73304 #endif
73305 #endif
73306- atomic_t dev_addr_genid;
73307+ atomic_unchecked_t dev_addr_genid;
73308 };
73309
73310 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
73311diff --git a/include/net/protocol.h b/include/net/protocol.h
73312index 047c047..b9dad15 100644
73313--- a/include/net/protocol.h
73314+++ b/include/net/protocol.h
73315@@ -44,7 +44,7 @@ struct net_protocol {
73316 void (*err_handler)(struct sk_buff *skb, u32 info);
73317 unsigned int no_policy:1,
73318 netns_ok:1;
73319-};
73320+} __do_const;
73321
73322 #if IS_ENABLED(CONFIG_IPV6)
73323 struct inet6_protocol {
73324@@ -57,7 +57,7 @@ struct inet6_protocol {
73325 u8 type, u8 code, int offset,
73326 __be32 info);
73327 unsigned int flags; /* INET6_PROTO_xxx */
73328-};
73329+} __do_const;
73330
73331 #define INET6_PROTO_NOPOLICY 0x1
73332 #define INET6_PROTO_FINAL 0x2
73333diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
73334index 7026648..584cc8c 100644
73335--- a/include/net/rtnetlink.h
73336+++ b/include/net/rtnetlink.h
73337@@ -81,7 +81,7 @@ struct rtnl_link_ops {
73338 const struct net_device *dev);
73339 unsigned int (*get_num_tx_queues)(void);
73340 unsigned int (*get_num_rx_queues)(void);
73341-};
73342+} __do_const;
73343
73344 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
73345 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
73346diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
73347index cd89510..d67810f 100644
73348--- a/include/net/sctp/sctp.h
73349+++ b/include/net/sctp/sctp.h
73350@@ -330,9 +330,9 @@ do { \
73351
73352 #else /* SCTP_DEBUG */
73353
73354-#define SCTP_DEBUG_PRINTK(whatever...)
73355-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
73356-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
73357+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
73358+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
73359+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
73360 #define SCTP_ENABLE_DEBUG
73361 #define SCTP_DISABLE_DEBUG
73362 #define SCTP_ASSERT(expr, str, func)
73363diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
73364index 2a82d13..62a31c2 100644
73365--- a/include/net/sctp/sm.h
73366+++ b/include/net/sctp/sm.h
73367@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
73368 typedef struct {
73369 sctp_state_fn_t *fn;
73370 const char *name;
73371-} sctp_sm_table_entry_t;
73372+} __do_const sctp_sm_table_entry_t;
73373
73374 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
73375 * currently in use.
73376@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
73377 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
73378
73379 /* Extern declarations for major data structures. */
73380-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73381+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73382
73383
73384 /* Get the size of a DATA chunk payload. */
73385diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
73386index 1bd4c41..9250b5b 100644
73387--- a/include/net/sctp/structs.h
73388+++ b/include/net/sctp/structs.h
73389@@ -516,7 +516,7 @@ struct sctp_pf {
73390 struct sctp_association *asoc);
73391 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
73392 struct sctp_af *af;
73393-};
73394+} __do_const;
73395
73396
73397 /* Structure to track chunk fragments that have been acked, but peer
73398diff --git a/include/net/sock.h b/include/net/sock.h
73399index 66772cf..25bc45b 100644
73400--- a/include/net/sock.h
73401+++ b/include/net/sock.h
73402@@ -325,7 +325,7 @@ struct sock {
73403 #ifdef CONFIG_RPS
73404 __u32 sk_rxhash;
73405 #endif
73406- atomic_t sk_drops;
73407+ atomic_unchecked_t sk_drops;
73408 int sk_rcvbuf;
73409
73410 struct sk_filter __rcu *sk_filter;
73411@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
73412 }
73413
73414 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
73415- char __user *from, char *to,
73416+ char __user *from, unsigned char *to,
73417 int copy, int offset)
73418 {
73419 if (skb->ip_summed == CHECKSUM_NONE) {
73420@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
73421 }
73422 }
73423
73424-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73425+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73426
73427 /**
73428 * sk_page_frag - return an appropriate page_frag
73429diff --git a/include/net/tcp.h b/include/net/tcp.h
73430index 5bba80f..8520a82 100644
73431--- a/include/net/tcp.h
73432+++ b/include/net/tcp.h
73433@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
73434 extern void tcp_xmit_retransmit_queue(struct sock *);
73435 extern void tcp_simple_retransmit(struct sock *);
73436 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
73437-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73438+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73439
73440 extern void tcp_send_probe0(struct sock *);
73441 extern void tcp_send_partial(struct sock *);
73442@@ -697,8 +697,8 @@ struct tcp_skb_cb {
73443 struct inet6_skb_parm h6;
73444 #endif
73445 } header; /* For incoming frames */
73446- __u32 seq; /* Starting sequence number */
73447- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
73448+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
73449+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
73450 __u32 when; /* used to compute rtt's */
73451 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
73452
73453@@ -712,7 +712,7 @@ struct tcp_skb_cb {
73454
73455 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
73456 /* 1 byte hole */
73457- __u32 ack_seq; /* Sequence number ACK'd */
73458+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
73459 };
73460
73461 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
73462diff --git a/include/net/udp.h b/include/net/udp.h
73463index 065f379..ad99eed 100644
73464--- a/include/net/udp.h
73465+++ b/include/net/udp.h
73466@@ -181,6 +181,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
73467 extern void udp_err(struct sk_buff *, u32);
73468 extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
73469 struct msghdr *msg, size_t len);
73470+extern int udp_push_pending_frames(struct sock *sk);
73471 extern void udp_flush_pending_frames(struct sock *sk);
73472 extern int udp_rcv(struct sk_buff *skb);
73473 extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
73474diff --git a/include/net/xfrm.h b/include/net/xfrm.h
73475index 94ce082..62b278d 100644
73476--- a/include/net/xfrm.h
73477+++ b/include/net/xfrm.h
73478@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
73479 struct net_device *dev,
73480 const struct flowi *fl);
73481 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
73482-};
73483+} __do_const;
73484
73485 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
73486 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
73487@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
73488 struct sk_buff *skb);
73489 int (*transport_finish)(struct sk_buff *skb,
73490 int async);
73491-};
73492+} __do_const;
73493
73494 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
73495 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
73496@@ -424,7 +424,7 @@ struct xfrm_mode {
73497 struct module *owner;
73498 unsigned int encap;
73499 int flags;
73500-};
73501+} __do_const;
73502
73503 /* Flags for xfrm_mode. */
73504 enum {
73505@@ -521,7 +521,7 @@ struct xfrm_policy {
73506 struct timer_list timer;
73507
73508 struct flow_cache_object flo;
73509- atomic_t genid;
73510+ atomic_unchecked_t genid;
73511 u32 priority;
73512 u32 index;
73513 struct xfrm_mark mark;
73514diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
73515index 1a046b1..ee0bef0 100644
73516--- a/include/rdma/iw_cm.h
73517+++ b/include/rdma/iw_cm.h
73518@@ -122,7 +122,7 @@ struct iw_cm_verbs {
73519 int backlog);
73520
73521 int (*destroy_listen)(struct iw_cm_id *cm_id);
73522-};
73523+} __no_const;
73524
73525 /**
73526 * iw_create_cm_id - Create an IW CM identifier.
73527diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
73528index e1379b4..67eafbe 100644
73529--- a/include/scsi/libfc.h
73530+++ b/include/scsi/libfc.h
73531@@ -762,6 +762,7 @@ struct libfc_function_template {
73532 */
73533 void (*disc_stop_final) (struct fc_lport *);
73534 };
73535+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
73536
73537 /**
73538 * struct fc_disc - Discovery context
73539@@ -866,7 +867,7 @@ struct fc_lport {
73540 struct fc_vport *vport;
73541
73542 /* Operational Information */
73543- struct libfc_function_template tt;
73544+ libfc_function_template_no_const tt;
73545 u8 link_up;
73546 u8 qfull;
73547 enum fc_lport_state state;
73548diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
73549index cc64587..608f523 100644
73550--- a/include/scsi/scsi_device.h
73551+++ b/include/scsi/scsi_device.h
73552@@ -171,9 +171,9 @@ struct scsi_device {
73553 unsigned int max_device_blocked; /* what device_blocked counts down from */
73554 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
73555
73556- atomic_t iorequest_cnt;
73557- atomic_t iodone_cnt;
73558- atomic_t ioerr_cnt;
73559+ atomic_unchecked_t iorequest_cnt;
73560+ atomic_unchecked_t iodone_cnt;
73561+ atomic_unchecked_t ioerr_cnt;
73562
73563 struct device sdev_gendev,
73564 sdev_dev;
73565diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
73566index b797e8f..8e2c3aa 100644
73567--- a/include/scsi/scsi_transport_fc.h
73568+++ b/include/scsi/scsi_transport_fc.h
73569@@ -751,7 +751,8 @@ struct fc_function_template {
73570 unsigned long show_host_system_hostname:1;
73571
73572 unsigned long disable_target_scan:1;
73573-};
73574+} __do_const;
73575+typedef struct fc_function_template __no_const fc_function_template_no_const;
73576
73577
73578 /**
73579diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
73580index 9031a26..750d592 100644
73581--- a/include/sound/compress_driver.h
73582+++ b/include/sound/compress_driver.h
73583@@ -128,7 +128,7 @@ struct snd_compr_ops {
73584 struct snd_compr_caps *caps);
73585 int (*get_codec_caps) (struct snd_compr_stream *stream,
73586 struct snd_compr_codec_caps *codec);
73587-};
73588+} __no_const;
73589
73590 /**
73591 * struct snd_compr: Compressed device
73592diff --git a/include/sound/soc.h b/include/sound/soc.h
73593index 85c1522..f44bad1 100644
73594--- a/include/sound/soc.h
73595+++ b/include/sound/soc.h
73596@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
73597 /* probe ordering - for components with runtime dependencies */
73598 int probe_order;
73599 int remove_order;
73600-};
73601+} __do_const;
73602
73603 /* SoC platform interface */
73604 struct snd_soc_platform_driver {
73605@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
73606 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
73607 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
73608 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
73609-};
73610+} __do_const;
73611
73612 struct snd_soc_platform {
73613 const char *name;
73614diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
73615index 4ea4f98..a63629b 100644
73616--- a/include/target/target_core_base.h
73617+++ b/include/target/target_core_base.h
73618@@ -653,7 +653,7 @@ struct se_device {
73619 spinlock_t stats_lock;
73620 /* Active commands on this virtual SE device */
73621 atomic_t simple_cmds;
73622- atomic_t dev_ordered_id;
73623+ atomic_unchecked_t dev_ordered_id;
73624 atomic_t dev_ordered_sync;
73625 atomic_t dev_qf_count;
73626 int export_count;
73627diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
73628new file mode 100644
73629index 0000000..fb634b7
73630--- /dev/null
73631+++ b/include/trace/events/fs.h
73632@@ -0,0 +1,53 @@
73633+#undef TRACE_SYSTEM
73634+#define TRACE_SYSTEM fs
73635+
73636+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
73637+#define _TRACE_FS_H
73638+
73639+#include <linux/fs.h>
73640+#include <linux/tracepoint.h>
73641+
73642+TRACE_EVENT(do_sys_open,
73643+
73644+ TP_PROTO(const char *filename, int flags, int mode),
73645+
73646+ TP_ARGS(filename, flags, mode),
73647+
73648+ TP_STRUCT__entry(
73649+ __string( filename, filename )
73650+ __field( int, flags )
73651+ __field( int, mode )
73652+ ),
73653+
73654+ TP_fast_assign(
73655+ __assign_str(filename, filename);
73656+ __entry->flags = flags;
73657+ __entry->mode = mode;
73658+ ),
73659+
73660+ TP_printk("\"%s\" %x %o",
73661+ __get_str(filename), __entry->flags, __entry->mode)
73662+);
73663+
73664+TRACE_EVENT(open_exec,
73665+
73666+ TP_PROTO(const char *filename),
73667+
73668+ TP_ARGS(filename),
73669+
73670+ TP_STRUCT__entry(
73671+ __string( filename, filename )
73672+ ),
73673+
73674+ TP_fast_assign(
73675+ __assign_str(filename, filename);
73676+ ),
73677+
73678+ TP_printk("\"%s\"",
73679+ __get_str(filename))
73680+);
73681+
73682+#endif /* _TRACE_FS_H */
73683+
73684+/* This part must be outside protection */
73685+#include <trace/define_trace.h>
73686diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
73687index 1c09820..7f5ec79 100644
73688--- a/include/trace/events/irq.h
73689+++ b/include/trace/events/irq.h
73690@@ -36,7 +36,7 @@ struct softirq_action;
73691 */
73692 TRACE_EVENT(irq_handler_entry,
73693
73694- TP_PROTO(int irq, struct irqaction *action),
73695+ TP_PROTO(int irq, const struct irqaction *action),
73696
73697 TP_ARGS(irq, action),
73698
73699@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
73700 */
73701 TRACE_EVENT(irq_handler_exit,
73702
73703- TP_PROTO(int irq, struct irqaction *action, int ret),
73704+ TP_PROTO(int irq, const struct irqaction *action, int ret),
73705
73706 TP_ARGS(irq, action, ret),
73707
73708diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
73709index 7caf44c..23c6f27 100644
73710--- a/include/uapi/linux/a.out.h
73711+++ b/include/uapi/linux/a.out.h
73712@@ -39,6 +39,14 @@ enum machine_type {
73713 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
73714 };
73715
73716+/* Constants for the N_FLAGS field */
73717+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73718+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
73719+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
73720+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
73721+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73722+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73723+
73724 #if !defined (N_MAGIC)
73725 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
73726 #endif
73727diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
73728index d876736..ccce5c0 100644
73729--- a/include/uapi/linux/byteorder/little_endian.h
73730+++ b/include/uapi/linux/byteorder/little_endian.h
73731@@ -42,51 +42,51 @@
73732
73733 static inline __le64 __cpu_to_le64p(const __u64 *p)
73734 {
73735- return (__force __le64)*p;
73736+ return (__force const __le64)*p;
73737 }
73738-static inline __u64 __le64_to_cpup(const __le64 *p)
73739+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
73740 {
73741- return (__force __u64)*p;
73742+ return (__force const __u64)*p;
73743 }
73744 static inline __le32 __cpu_to_le32p(const __u32 *p)
73745 {
73746- return (__force __le32)*p;
73747+ return (__force const __le32)*p;
73748 }
73749 static inline __u32 __le32_to_cpup(const __le32 *p)
73750 {
73751- return (__force __u32)*p;
73752+ return (__force const __u32)*p;
73753 }
73754 static inline __le16 __cpu_to_le16p(const __u16 *p)
73755 {
73756- return (__force __le16)*p;
73757+ return (__force const __le16)*p;
73758 }
73759 static inline __u16 __le16_to_cpup(const __le16 *p)
73760 {
73761- return (__force __u16)*p;
73762+ return (__force const __u16)*p;
73763 }
73764 static inline __be64 __cpu_to_be64p(const __u64 *p)
73765 {
73766- return (__force __be64)__swab64p(p);
73767+ return (__force const __be64)__swab64p(p);
73768 }
73769 static inline __u64 __be64_to_cpup(const __be64 *p)
73770 {
73771- return __swab64p((__u64 *)p);
73772+ return __swab64p((const __u64 *)p);
73773 }
73774 static inline __be32 __cpu_to_be32p(const __u32 *p)
73775 {
73776- return (__force __be32)__swab32p(p);
73777+ return (__force const __be32)__swab32p(p);
73778 }
73779-static inline __u32 __be32_to_cpup(const __be32 *p)
73780+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
73781 {
73782- return __swab32p((__u32 *)p);
73783+ return __swab32p((const __u32 *)p);
73784 }
73785 static inline __be16 __cpu_to_be16p(const __u16 *p)
73786 {
73787- return (__force __be16)__swab16p(p);
73788+ return (__force const __be16)__swab16p(p);
73789 }
73790 static inline __u16 __be16_to_cpup(const __be16 *p)
73791 {
73792- return __swab16p((__u16 *)p);
73793+ return __swab16p((const __u16 *)p);
73794 }
73795 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
73796 #define __le64_to_cpus(x) do { (void)(x); } while (0)
73797diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
73798index ef6103b..d4e65dd 100644
73799--- a/include/uapi/linux/elf.h
73800+++ b/include/uapi/linux/elf.h
73801@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
73802 #define PT_GNU_EH_FRAME 0x6474e550
73803
73804 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
73805+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
73806+
73807+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
73808+
73809+/* Constants for the e_flags field */
73810+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73811+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
73812+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
73813+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
73814+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73815+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73816
73817 /*
73818 * Extended Numbering
73819@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
73820 #define DT_DEBUG 21
73821 #define DT_TEXTREL 22
73822 #define DT_JMPREL 23
73823+#define DT_FLAGS 30
73824+ #define DF_TEXTREL 0x00000004
73825 #define DT_ENCODING 32
73826 #define OLD_DT_LOOS 0x60000000
73827 #define DT_LOOS 0x6000000d
73828@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
73829 #define PF_W 0x2
73830 #define PF_X 0x1
73831
73832+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
73833+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
73834+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
73835+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
73836+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
73837+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
73838+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
73839+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
73840+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
73841+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
73842+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
73843+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
73844+
73845 typedef struct elf32_phdr{
73846 Elf32_Word p_type;
73847 Elf32_Off p_offset;
73848@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
73849 #define EI_OSABI 7
73850 #define EI_PAD 8
73851
73852+#define EI_PAX 14
73853+
73854 #define ELFMAG0 0x7f /* EI_MAG */
73855 #define ELFMAG1 'E'
73856 #define ELFMAG2 'L'
73857diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
73858index aa169c4..6a2771d 100644
73859--- a/include/uapi/linux/personality.h
73860+++ b/include/uapi/linux/personality.h
73861@@ -30,6 +30,7 @@ enum {
73862 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
73863 ADDR_NO_RANDOMIZE | \
73864 ADDR_COMPAT_LAYOUT | \
73865+ ADDR_LIMIT_3GB | \
73866 MMAP_PAGE_ZERO)
73867
73868 /*
73869diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
73870index 7530e74..e714828 100644
73871--- a/include/uapi/linux/screen_info.h
73872+++ b/include/uapi/linux/screen_info.h
73873@@ -43,7 +43,8 @@ struct screen_info {
73874 __u16 pages; /* 0x32 */
73875 __u16 vesa_attributes; /* 0x34 */
73876 __u32 capabilities; /* 0x36 */
73877- __u8 _reserved[6]; /* 0x3a */
73878+ __u16 vesapm_size; /* 0x3a */
73879+ __u8 _reserved[4]; /* 0x3c */
73880 } __attribute__((packed));
73881
73882 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
73883diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
73884index 0e011eb..82681b1 100644
73885--- a/include/uapi/linux/swab.h
73886+++ b/include/uapi/linux/swab.h
73887@@ -43,7 +43,7 @@
73888 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
73889 */
73890
73891-static inline __attribute_const__ __u16 __fswab16(__u16 val)
73892+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
73893 {
73894 #ifdef __HAVE_BUILTIN_BSWAP16__
73895 return __builtin_bswap16(val);
73896@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
73897 #endif
73898 }
73899
73900-static inline __attribute_const__ __u32 __fswab32(__u32 val)
73901+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
73902 {
73903 #ifdef __HAVE_BUILTIN_BSWAP32__
73904 return __builtin_bswap32(val);
73905@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
73906 #endif
73907 }
73908
73909-static inline __attribute_const__ __u64 __fswab64(__u64 val)
73910+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
73911 {
73912 #ifdef __HAVE_BUILTIN_BSWAP64__
73913 return __builtin_bswap64(val);
73914diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
73915index 6d67213..8dab561 100644
73916--- a/include/uapi/linux/sysctl.h
73917+++ b/include/uapi/linux/sysctl.h
73918@@ -155,7 +155,11 @@ enum
73919 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
73920 };
73921
73922-
73923+#ifdef CONFIG_PAX_SOFTMODE
73924+enum {
73925+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
73926+};
73927+#endif
73928
73929 /* CTL_VM names: */
73930 enum
73931diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
73932index e4629b9..6958086 100644
73933--- a/include/uapi/linux/xattr.h
73934+++ b/include/uapi/linux/xattr.h
73935@@ -63,5 +63,9 @@
73936 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
73937 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
73938
73939+/* User namespace */
73940+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
73941+#define XATTR_PAX_FLAGS_SUFFIX "flags"
73942+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
73943
73944 #endif /* _UAPI_LINUX_XATTR_H */
73945diff --git a/include/video/udlfb.h b/include/video/udlfb.h
73946index f9466fa..f4e2b81 100644
73947--- a/include/video/udlfb.h
73948+++ b/include/video/udlfb.h
73949@@ -53,10 +53,10 @@ struct dlfb_data {
73950 u32 pseudo_palette[256];
73951 int blank_mode; /*one of FB_BLANK_ */
73952 /* blit-only rendering path metrics, exposed through sysfs */
73953- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73954- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
73955- atomic_t bytes_sent; /* to usb, after compression including overhead */
73956- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
73957+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
73958+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
73959+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
73960+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
73961 };
73962
73963 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
73964diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
73965index 1a91850..28573f8 100644
73966--- a/include/video/uvesafb.h
73967+++ b/include/video/uvesafb.h
73968@@ -122,6 +122,7 @@ struct uvesafb_par {
73969 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
73970 u8 pmi_setpal; /* PMI for palette changes */
73971 u16 *pmi_base; /* protected mode interface location */
73972+ u8 *pmi_code; /* protected mode code location */
73973 void *pmi_start;
73974 void *pmi_pal;
73975 u8 *vbe_state_orig; /*
73976diff --git a/init/Kconfig b/init/Kconfig
73977index 2d9b831..ae4c8ac 100644
73978--- a/init/Kconfig
73979+++ b/init/Kconfig
73980@@ -1029,6 +1029,7 @@ endif # CGROUPS
73981
73982 config CHECKPOINT_RESTORE
73983 bool "Checkpoint/restore support" if EXPERT
73984+ depends on !GRKERNSEC
73985 default n
73986 help
73987 Enables additional kernel features in a sake of checkpoint/restore.
73988@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
73989
73990 config COMPAT_BRK
73991 bool "Disable heap randomization"
73992- default y
73993+ default n
73994 help
73995 Randomizing heap placement makes heap exploits harder, but it
73996 also breaks ancient binaries (including anything libc5 based).
73997@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
73998 config STOP_MACHINE
73999 bool
74000 default y
74001- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
74002+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
74003 help
74004 Need stop_machine() primitive.
74005
74006diff --git a/init/Makefile b/init/Makefile
74007index 7bc47ee..6da2dc7 100644
74008--- a/init/Makefile
74009+++ b/init/Makefile
74010@@ -2,6 +2,9 @@
74011 # Makefile for the linux kernel.
74012 #
74013
74014+ccflags-y := $(GCC_PLUGINS_CFLAGS)
74015+asflags-y := $(GCC_PLUGINS_AFLAGS)
74016+
74017 obj-y := main.o version.o mounts.o
74018 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
74019 obj-y += noinitramfs.o
74020diff --git a/init/do_mounts.c b/init/do_mounts.c
74021index a2b49f2..03a0e17c 100644
74022--- a/init/do_mounts.c
74023+++ b/init/do_mounts.c
74024@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
74025 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
74026 {
74027 struct super_block *s;
74028- int err = sys_mount(name, "/root", fs, flags, data);
74029+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
74030 if (err)
74031 return err;
74032
74033- sys_chdir("/root");
74034+ sys_chdir((const char __force_user *)"/root");
74035 s = current->fs->pwd.dentry->d_sb;
74036 ROOT_DEV = s->s_dev;
74037 printk(KERN_INFO
74038@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
74039 va_start(args, fmt);
74040 vsprintf(buf, fmt, args);
74041 va_end(args);
74042- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
74043+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
74044 if (fd >= 0) {
74045 sys_ioctl(fd, FDEJECT, 0);
74046 sys_close(fd);
74047 }
74048 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
74049- fd = sys_open("/dev/console", O_RDWR, 0);
74050+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
74051 if (fd >= 0) {
74052 sys_ioctl(fd, TCGETS, (long)&termios);
74053 termios.c_lflag &= ~ICANON;
74054 sys_ioctl(fd, TCSETSF, (long)&termios);
74055- sys_read(fd, &c, 1);
74056+ sys_read(fd, (char __user *)&c, 1);
74057 termios.c_lflag |= ICANON;
74058 sys_ioctl(fd, TCSETSF, (long)&termios);
74059 sys_close(fd);
74060@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
74061 mount_root();
74062 out:
74063 devtmpfs_mount("dev");
74064- sys_mount(".", "/", NULL, MS_MOVE, NULL);
74065- sys_chroot(".");
74066+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
74067+ sys_chroot((const char __force_user *)".");
74068 }
74069diff --git a/init/do_mounts.h b/init/do_mounts.h
74070index f5b978a..69dbfe8 100644
74071--- a/init/do_mounts.h
74072+++ b/init/do_mounts.h
74073@@ -15,15 +15,15 @@ extern int root_mountflags;
74074
74075 static inline int create_dev(char *name, dev_t dev)
74076 {
74077- sys_unlink(name);
74078- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
74079+ sys_unlink((char __force_user *)name);
74080+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
74081 }
74082
74083 #if BITS_PER_LONG == 32
74084 static inline u32 bstat(char *name)
74085 {
74086 struct stat64 stat;
74087- if (sys_stat64(name, &stat) != 0)
74088+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
74089 return 0;
74090 if (!S_ISBLK(stat.st_mode))
74091 return 0;
74092@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
74093 static inline u32 bstat(char *name)
74094 {
74095 struct stat stat;
74096- if (sys_newstat(name, &stat) != 0)
74097+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
74098 return 0;
74099 if (!S_ISBLK(stat.st_mode))
74100 return 0;
74101diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
74102index 3e0878e..8a9d7a0 100644
74103--- a/init/do_mounts_initrd.c
74104+++ b/init/do_mounts_initrd.c
74105@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
74106 {
74107 sys_unshare(CLONE_FS | CLONE_FILES);
74108 /* stdin/stdout/stderr for /linuxrc */
74109- sys_open("/dev/console", O_RDWR, 0);
74110+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
74111 sys_dup(0);
74112 sys_dup(0);
74113 /* move initrd over / and chdir/chroot in initrd root */
74114- sys_chdir("/root");
74115- sys_mount(".", "/", NULL, MS_MOVE, NULL);
74116- sys_chroot(".");
74117+ sys_chdir((const char __force_user *)"/root");
74118+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
74119+ sys_chroot((const char __force_user *)".");
74120 sys_setsid();
74121 return 0;
74122 }
74123@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
74124 create_dev("/dev/root.old", Root_RAM0);
74125 /* mount initrd on rootfs' /root */
74126 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
74127- sys_mkdir("/old", 0700);
74128- sys_chdir("/old");
74129+ sys_mkdir((const char __force_user *)"/old", 0700);
74130+ sys_chdir((const char __force_user *)"/old");
74131
74132 /* try loading default modules from initrd */
74133 load_default_modules();
74134@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
74135 current->flags &= ~PF_FREEZER_SKIP;
74136
74137 /* move initrd to rootfs' /old */
74138- sys_mount("..", ".", NULL, MS_MOVE, NULL);
74139+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
74140 /* switch root and cwd back to / of rootfs */
74141- sys_chroot("..");
74142+ sys_chroot((const char __force_user *)"..");
74143
74144 if (new_decode_dev(real_root_dev) == Root_RAM0) {
74145- sys_chdir("/old");
74146+ sys_chdir((const char __force_user *)"/old");
74147 return;
74148 }
74149
74150- sys_chdir("/");
74151+ sys_chdir((const char __force_user *)"/");
74152 ROOT_DEV = new_decode_dev(real_root_dev);
74153 mount_root();
74154
74155 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
74156- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
74157+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
74158 if (!error)
74159 printk("okay\n");
74160 else {
74161- int fd = sys_open("/dev/root.old", O_RDWR, 0);
74162+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
74163 if (error == -ENOENT)
74164 printk("/initrd does not exist. Ignored.\n");
74165 else
74166 printk("failed\n");
74167 printk(KERN_NOTICE "Unmounting old root\n");
74168- sys_umount("/old", MNT_DETACH);
74169+ sys_umount((char __force_user *)"/old", MNT_DETACH);
74170 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
74171 if (fd < 0) {
74172 error = fd;
74173@@ -127,11 +127,11 @@ int __init initrd_load(void)
74174 * mounted in the normal path.
74175 */
74176 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
74177- sys_unlink("/initrd.image");
74178+ sys_unlink((const char __force_user *)"/initrd.image");
74179 handle_initrd();
74180 return 1;
74181 }
74182 }
74183- sys_unlink("/initrd.image");
74184+ sys_unlink((const char __force_user *)"/initrd.image");
74185 return 0;
74186 }
74187diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
74188index 8cb6db5..d729f50 100644
74189--- a/init/do_mounts_md.c
74190+++ b/init/do_mounts_md.c
74191@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
74192 partitioned ? "_d" : "", minor,
74193 md_setup_args[ent].device_names);
74194
74195- fd = sys_open(name, 0, 0);
74196+ fd = sys_open((char __force_user *)name, 0, 0);
74197 if (fd < 0) {
74198 printk(KERN_ERR "md: open failed - cannot start "
74199 "array %s\n", name);
74200@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
74201 * array without it
74202 */
74203 sys_close(fd);
74204- fd = sys_open(name, 0, 0);
74205+ fd = sys_open((char __force_user *)name, 0, 0);
74206 sys_ioctl(fd, BLKRRPART, 0);
74207 }
74208 sys_close(fd);
74209@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
74210
74211 wait_for_device_probe();
74212
74213- fd = sys_open("/dev/md0", 0, 0);
74214+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
74215 if (fd >= 0) {
74216 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
74217 sys_close(fd);
74218diff --git a/init/init_task.c b/init/init_task.c
74219index ba0a7f36..2bcf1d5 100644
74220--- a/init/init_task.c
74221+++ b/init/init_task.c
74222@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
74223 * Initial thread structure. Alignment of this is handled by a special
74224 * linker map entry.
74225 */
74226+#ifdef CONFIG_X86
74227+union thread_union init_thread_union __init_task_data;
74228+#else
74229 union thread_union init_thread_union __init_task_data =
74230 { INIT_THREAD_INFO(init_task) };
74231+#endif
74232diff --git a/init/initramfs.c b/init/initramfs.c
74233index a67ef9d..2d17ed9 100644
74234--- a/init/initramfs.c
74235+++ b/init/initramfs.c
74236@@ -84,7 +84,7 @@ static void __init free_hash(void)
74237 }
74238 }
74239
74240-static long __init do_utime(char *filename, time_t mtime)
74241+static long __init do_utime(char __force_user *filename, time_t mtime)
74242 {
74243 struct timespec t[2];
74244
74245@@ -119,7 +119,7 @@ static void __init dir_utime(void)
74246 struct dir_entry *de, *tmp;
74247 list_for_each_entry_safe(de, tmp, &dir_list, list) {
74248 list_del(&de->list);
74249- do_utime(de->name, de->mtime);
74250+ do_utime((char __force_user *)de->name, de->mtime);
74251 kfree(de->name);
74252 kfree(de);
74253 }
74254@@ -281,7 +281,7 @@ static int __init maybe_link(void)
74255 if (nlink >= 2) {
74256 char *old = find_link(major, minor, ino, mode, collected);
74257 if (old)
74258- return (sys_link(old, collected) < 0) ? -1 : 1;
74259+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
74260 }
74261 return 0;
74262 }
74263@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
74264 {
74265 struct stat st;
74266
74267- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
74268+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
74269 if (S_ISDIR(st.st_mode))
74270- sys_rmdir(path);
74271+ sys_rmdir((char __force_user *)path);
74272 else
74273- sys_unlink(path);
74274+ sys_unlink((char __force_user *)path);
74275 }
74276 }
74277
74278@@ -315,7 +315,7 @@ static int __init do_name(void)
74279 int openflags = O_WRONLY|O_CREAT;
74280 if (ml != 1)
74281 openflags |= O_TRUNC;
74282- wfd = sys_open(collected, openflags, mode);
74283+ wfd = sys_open((char __force_user *)collected, openflags, mode);
74284
74285 if (wfd >= 0) {
74286 sys_fchown(wfd, uid, gid);
74287@@ -327,17 +327,17 @@ static int __init do_name(void)
74288 }
74289 }
74290 } else if (S_ISDIR(mode)) {
74291- sys_mkdir(collected, mode);
74292- sys_chown(collected, uid, gid);
74293- sys_chmod(collected, mode);
74294+ sys_mkdir((char __force_user *)collected, mode);
74295+ sys_chown((char __force_user *)collected, uid, gid);
74296+ sys_chmod((char __force_user *)collected, mode);
74297 dir_add(collected, mtime);
74298 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
74299 S_ISFIFO(mode) || S_ISSOCK(mode)) {
74300 if (maybe_link() == 0) {
74301- sys_mknod(collected, mode, rdev);
74302- sys_chown(collected, uid, gid);
74303- sys_chmod(collected, mode);
74304- do_utime(collected, mtime);
74305+ sys_mknod((char __force_user *)collected, mode, rdev);
74306+ sys_chown((char __force_user *)collected, uid, gid);
74307+ sys_chmod((char __force_user *)collected, mode);
74308+ do_utime((char __force_user *)collected, mtime);
74309 }
74310 }
74311 return 0;
74312@@ -346,15 +346,15 @@ static int __init do_name(void)
74313 static int __init do_copy(void)
74314 {
74315 if (count >= body_len) {
74316- sys_write(wfd, victim, body_len);
74317+ sys_write(wfd, (char __force_user *)victim, body_len);
74318 sys_close(wfd);
74319- do_utime(vcollected, mtime);
74320+ do_utime((char __force_user *)vcollected, mtime);
74321 kfree(vcollected);
74322 eat(body_len);
74323 state = SkipIt;
74324 return 0;
74325 } else {
74326- sys_write(wfd, victim, count);
74327+ sys_write(wfd, (char __force_user *)victim, count);
74328 body_len -= count;
74329 eat(count);
74330 return 1;
74331@@ -365,9 +365,9 @@ static int __init do_symlink(void)
74332 {
74333 collected[N_ALIGN(name_len) + body_len] = '\0';
74334 clean_path(collected, 0);
74335- sys_symlink(collected + N_ALIGN(name_len), collected);
74336- sys_lchown(collected, uid, gid);
74337- do_utime(collected, mtime);
74338+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
74339+ sys_lchown((char __force_user *)collected, uid, gid);
74340+ do_utime((char __force_user *)collected, mtime);
74341 state = SkipIt;
74342 next_state = Reset;
74343 return 0;
74344@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
74345 {
74346 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
74347 if (err)
74348- panic(err); /* Failed to decompress INTERNAL initramfs */
74349+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
74350 if (initrd_start) {
74351 #ifdef CONFIG_BLK_DEV_RAM
74352 int fd;
74353diff --git a/init/main.c b/init/main.c
74354index 9484f4b..4c01430 100644
74355--- a/init/main.c
74356+++ b/init/main.c
74357@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
74358 extern void tc_init(void);
74359 #endif
74360
74361+extern void grsecurity_init(void);
74362+
74363 /*
74364 * Debug helper: via this flag we know that we are in 'early bootup code'
74365 * where only the boot processor is running with IRQ disabled. This means
74366@@ -153,6 +155,64 @@ static int __init set_reset_devices(char *str)
74367
74368 __setup("reset_devices", set_reset_devices);
74369
74370+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74371+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
74372+static int __init setup_grsec_proc_gid(char *str)
74373+{
74374+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
74375+ return 1;
74376+}
74377+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
74378+#endif
74379+
74380+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
74381+unsigned long pax_user_shadow_base __read_only = 1UL << TASK_SIZE_MAX_SHIFT;
74382+EXPORT_SYMBOL(pax_user_shadow_base);
74383+extern char pax_enter_kernel_user[];
74384+extern char pax_exit_kernel_user[];
74385+extern pgdval_t clone_pgd_mask;
74386+#endif
74387+
74388+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
74389+static int __init setup_pax_nouderef(char *str)
74390+{
74391+#ifdef CONFIG_X86_32
74392+ unsigned int cpu;
74393+ struct desc_struct *gdt;
74394+
74395+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
74396+ gdt = get_cpu_gdt_table(cpu);
74397+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
74398+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
74399+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
74400+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
74401+ }
74402+ loadsegment(ds, __KERNEL_DS);
74403+ loadsegment(es, __KERNEL_DS);
74404+ loadsegment(ss, __KERNEL_DS);
74405+#else
74406+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
74407+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
74408+ clone_pgd_mask = ~(pgdval_t)0UL;
74409+ pax_user_shadow_base = 0UL;
74410+#endif
74411+
74412+ return 0;
74413+}
74414+early_param("pax_nouderef", setup_pax_nouderef);
74415+#endif
74416+
74417+#ifdef CONFIG_PAX_SOFTMODE
74418+int pax_softmode;
74419+
74420+static int __init setup_pax_softmode(char *str)
74421+{
74422+ get_option(&str, &pax_softmode);
74423+ return 1;
74424+}
74425+__setup("pax_softmode=", setup_pax_softmode);
74426+#endif
74427+
74428 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
74429 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
74430 static const char *panic_later, *panic_param;
74431@@ -655,8 +715,6 @@ static void __init do_ctors(void)
74432 bool initcall_debug;
74433 core_param(initcall_debug, initcall_debug, bool, 0644);
74434
74435-static char msgbuf[64];
74436-
74437 static int __init_or_module do_one_initcall_debug(initcall_t fn)
74438 {
74439 ktime_t calltime, delta, rettime;
74440@@ -679,23 +737,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
74441 {
74442 int count = preempt_count();
74443 int ret;
74444+ const char *msg1 = "", *msg2 = "";
74445
74446 if (initcall_debug)
74447 ret = do_one_initcall_debug(fn);
74448 else
74449 ret = fn();
74450
74451- msgbuf[0] = 0;
74452-
74453 if (preempt_count() != count) {
74454- sprintf(msgbuf, "preemption imbalance ");
74455+ msg1 = " preemption imbalance";
74456 preempt_count() = count;
74457 }
74458 if (irqs_disabled()) {
74459- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
74460+ msg2 = " disabled interrupts";
74461 local_irq_enable();
74462 }
74463- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
74464+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
74465
74466 return ret;
74467 }
74468@@ -748,8 +805,14 @@ static void __init do_initcall_level(int level)
74469 level, level,
74470 &repair_env_string);
74471
74472- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
74473+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
74474 do_one_initcall(*fn);
74475+
74476+#ifdef LATENT_ENTROPY_PLUGIN
74477+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74478+#endif
74479+
74480+ }
74481 }
74482
74483 static void __init do_initcalls(void)
74484@@ -783,8 +846,14 @@ static void __init do_pre_smp_initcalls(void)
74485 {
74486 initcall_t *fn;
74487
74488- for (fn = __initcall_start; fn < __initcall0_start; fn++)
74489+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
74490 do_one_initcall(*fn);
74491+
74492+#ifdef LATENT_ENTROPY_PLUGIN
74493+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74494+#endif
74495+
74496+ }
74497 }
74498
74499 /*
74500@@ -802,8 +871,8 @@ static int run_init_process(const char *init_filename)
74501 {
74502 argv_init[0] = init_filename;
74503 return do_execve(init_filename,
74504- (const char __user *const __user *)argv_init,
74505- (const char __user *const __user *)envp_init);
74506+ (const char __user *const __force_user *)argv_init,
74507+ (const char __user *const __force_user *)envp_init);
74508 }
74509
74510 static noinline void __init kernel_init_freeable(void);
74511@@ -880,7 +949,7 @@ static noinline void __init kernel_init_freeable(void)
74512 do_basic_setup();
74513
74514 /* Open the /dev/console on the rootfs, this should never fail */
74515- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
74516+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
74517 pr_err("Warning: unable to open an initial console.\n");
74518
74519 (void) sys_dup(0);
74520@@ -893,11 +962,13 @@ static noinline void __init kernel_init_freeable(void)
74521 if (!ramdisk_execute_command)
74522 ramdisk_execute_command = "/init";
74523
74524- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
74525+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
74526 ramdisk_execute_command = NULL;
74527 prepare_namespace();
74528 }
74529
74530+ grsecurity_init();
74531+
74532 /*
74533 * Ok, we have completed the initial bootup, and
74534 * we're essentially up and running. Get rid of the
74535diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
74536index 130dfec..cc88451 100644
74537--- a/ipc/ipc_sysctl.c
74538+++ b/ipc/ipc_sysctl.c
74539@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
74540 static int proc_ipc_dointvec(ctl_table *table, int write,
74541 void __user *buffer, size_t *lenp, loff_t *ppos)
74542 {
74543- struct ctl_table ipc_table;
74544+ ctl_table_no_const ipc_table;
74545
74546 memcpy(&ipc_table, table, sizeof(ipc_table));
74547 ipc_table.data = get_ipc(table);
74548@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
74549 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
74550 void __user *buffer, size_t *lenp, loff_t *ppos)
74551 {
74552- struct ctl_table ipc_table;
74553+ ctl_table_no_const ipc_table;
74554
74555 memcpy(&ipc_table, table, sizeof(ipc_table));
74556 ipc_table.data = get_ipc(table);
74557@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
74558 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74559 void __user *buffer, size_t *lenp, loff_t *ppos)
74560 {
74561- struct ctl_table ipc_table;
74562+ ctl_table_no_const ipc_table;
74563 size_t lenp_bef = *lenp;
74564 int rc;
74565
74566@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74567 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
74568 void __user *buffer, size_t *lenp, loff_t *ppos)
74569 {
74570- struct ctl_table ipc_table;
74571+ ctl_table_no_const ipc_table;
74572 memcpy(&ipc_table, table, sizeof(ipc_table));
74573 ipc_table.data = get_ipc(table);
74574
74575@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
74576 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
74577 void __user *buffer, size_t *lenp, loff_t *ppos)
74578 {
74579- struct ctl_table ipc_table;
74580+ ctl_table_no_const ipc_table;
74581 size_t lenp_bef = *lenp;
74582 int oldval;
74583 int rc;
74584diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
74585index 383d638..943fdbb 100644
74586--- a/ipc/mq_sysctl.c
74587+++ b/ipc/mq_sysctl.c
74588@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
74589 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
74590 void __user *buffer, size_t *lenp, loff_t *ppos)
74591 {
74592- struct ctl_table mq_table;
74593+ ctl_table_no_const mq_table;
74594 memcpy(&mq_table, table, sizeof(mq_table));
74595 mq_table.data = get_mq(table);
74596
74597diff --git a/ipc/mqueue.c b/ipc/mqueue.c
74598index e4e47f6..a85e0ad 100644
74599--- a/ipc/mqueue.c
74600+++ b/ipc/mqueue.c
74601@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
74602 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
74603 info->attr.mq_msgsize);
74604
74605+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
74606 spin_lock(&mq_lock);
74607 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
74608 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
74609diff --git a/ipc/msg.c b/ipc/msg.c
74610index d0c6d96..69a893c 100644
74611--- a/ipc/msg.c
74612+++ b/ipc/msg.c
74613@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
74614 return security_msg_queue_associate(msq, msgflg);
74615 }
74616
74617+static struct ipc_ops msg_ops = {
74618+ .getnew = newque,
74619+ .associate = msg_security,
74620+ .more_checks = NULL
74621+};
74622+
74623 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
74624 {
74625 struct ipc_namespace *ns;
74626- struct ipc_ops msg_ops;
74627 struct ipc_params msg_params;
74628
74629 ns = current->nsproxy->ipc_ns;
74630
74631- msg_ops.getnew = newque;
74632- msg_ops.associate = msg_security;
74633- msg_ops.more_checks = NULL;
74634-
74635 msg_params.key = key;
74636 msg_params.flg = msgflg;
74637
74638diff --git a/ipc/sem.c b/ipc/sem.c
74639index 70480a3..f4e8262 100644
74640--- a/ipc/sem.c
74641+++ b/ipc/sem.c
74642@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
74643 return 0;
74644 }
74645
74646+static struct ipc_ops sem_ops = {
74647+ .getnew = newary,
74648+ .associate = sem_security,
74649+ .more_checks = sem_more_checks
74650+};
74651+
74652 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74653 {
74654 struct ipc_namespace *ns;
74655- struct ipc_ops sem_ops;
74656 struct ipc_params sem_params;
74657
74658 ns = current->nsproxy->ipc_ns;
74659@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74660 if (nsems < 0 || nsems > ns->sc_semmsl)
74661 return -EINVAL;
74662
74663- sem_ops.getnew = newary;
74664- sem_ops.associate = sem_security;
74665- sem_ops.more_checks = sem_more_checks;
74666-
74667 sem_params.key = key;
74668 sem_params.flg = semflg;
74669 sem_params.u.nsems = nsems;
74670diff --git a/ipc/shm.c b/ipc/shm.c
74671index 7e199fa..180a1ca 100644
74672--- a/ipc/shm.c
74673+++ b/ipc/shm.c
74674@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
74675 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74676 #endif
74677
74678+#ifdef CONFIG_GRKERNSEC
74679+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74680+ const time_t shm_createtime, const kuid_t cuid,
74681+ const int shmid);
74682+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74683+ const time_t shm_createtime);
74684+#endif
74685+
74686 void shm_init_ns(struct ipc_namespace *ns)
74687 {
74688 ns->shm_ctlmax = SHMMAX;
74689@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
74690 shp->shm_lprid = 0;
74691 shp->shm_atim = shp->shm_dtim = 0;
74692 shp->shm_ctim = get_seconds();
74693+#ifdef CONFIG_GRKERNSEC
74694+ {
74695+ struct timespec timeval;
74696+ do_posix_clock_monotonic_gettime(&timeval);
74697+
74698+ shp->shm_createtime = timeval.tv_sec;
74699+ }
74700+#endif
74701 shp->shm_segsz = size;
74702 shp->shm_nattch = 0;
74703 shp->shm_file = file;
74704@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
74705 return 0;
74706 }
74707
74708+static struct ipc_ops shm_ops = {
74709+ .getnew = newseg,
74710+ .associate = shm_security,
74711+ .more_checks = shm_more_checks
74712+};
74713+
74714 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
74715 {
74716 struct ipc_namespace *ns;
74717- struct ipc_ops shm_ops;
74718 struct ipc_params shm_params;
74719
74720 ns = current->nsproxy->ipc_ns;
74721
74722- shm_ops.getnew = newseg;
74723- shm_ops.associate = shm_security;
74724- shm_ops.more_checks = shm_more_checks;
74725-
74726 shm_params.key = key;
74727 shm_params.flg = shmflg;
74728 shm_params.u.size = size;
74729@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74730 f_mode = FMODE_READ | FMODE_WRITE;
74731 }
74732 if (shmflg & SHM_EXEC) {
74733+
74734+#ifdef CONFIG_PAX_MPROTECT
74735+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
74736+ goto out;
74737+#endif
74738+
74739 prot |= PROT_EXEC;
74740 acc_mode |= S_IXUGO;
74741 }
74742@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74743 if (err)
74744 goto out_unlock;
74745
74746+#ifdef CONFIG_GRKERNSEC
74747+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
74748+ shp->shm_perm.cuid, shmid) ||
74749+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
74750+ err = -EACCES;
74751+ goto out_unlock;
74752+ }
74753+#endif
74754+
74755 path = shp->shm_file->f_path;
74756 path_get(&path);
74757 shp->shm_nattch++;
74758+#ifdef CONFIG_GRKERNSEC
74759+ shp->shm_lapid = current->pid;
74760+#endif
74761 size = i_size_read(path.dentry->d_inode);
74762 shm_unlock(shp);
74763
74764diff --git a/kernel/acct.c b/kernel/acct.c
74765index 8d6e145..33e0b1e 100644
74766--- a/kernel/acct.c
74767+++ b/kernel/acct.c
74768@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
74769 */
74770 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
74771 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
74772- file->f_op->write(file, (char *)&ac,
74773+ file->f_op->write(file, (char __force_user *)&ac,
74774 sizeof(acct_t), &file->f_pos);
74775 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
74776 set_fs(fs);
74777diff --git a/kernel/audit.c b/kernel/audit.c
74778index 91e53d0..d9e3ec4 100644
74779--- a/kernel/audit.c
74780+++ b/kernel/audit.c
74781@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
74782 3) suppressed due to audit_rate_limit
74783 4) suppressed due to audit_backlog_limit
74784 */
74785-static atomic_t audit_lost = ATOMIC_INIT(0);
74786+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
74787
74788 /* The netlink socket. */
74789 static struct sock *audit_sock;
74790@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
74791 unsigned long now;
74792 int print;
74793
74794- atomic_inc(&audit_lost);
74795+ atomic_inc_unchecked(&audit_lost);
74796
74797 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
74798
74799@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
74800 printk(KERN_WARNING
74801 "audit: audit_lost=%d audit_rate_limit=%d "
74802 "audit_backlog_limit=%d\n",
74803- atomic_read(&audit_lost),
74804+ atomic_read_unchecked(&audit_lost),
74805 audit_rate_limit,
74806 audit_backlog_limit);
74807 audit_panic(message);
74808@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
74809 status_set.pid = audit_pid;
74810 status_set.rate_limit = audit_rate_limit;
74811 status_set.backlog_limit = audit_backlog_limit;
74812- status_set.lost = atomic_read(&audit_lost);
74813+ status_set.lost = atomic_read_unchecked(&audit_lost);
74814 status_set.backlog = skb_queue_len(&audit_skb_queue);
74815 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
74816 &status_set, sizeof(status_set));
74817diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
74818index 6bd4a90..0ee9eff 100644
74819--- a/kernel/auditfilter.c
74820+++ b/kernel/auditfilter.c
74821@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
74822 f->lsm_rule = NULL;
74823
74824 /* Support legacy tests for a valid loginuid */
74825- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
74826+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
74827 f->type = AUDIT_LOGINUID_SET;
74828 f->val = 0;
74829 }
74830diff --git a/kernel/auditsc.c b/kernel/auditsc.c
74831index 3c8a601..3a416f6 100644
74832--- a/kernel/auditsc.c
74833+++ b/kernel/auditsc.c
74834@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
74835 }
74836
74837 /* global counter which is incremented every time something logs in */
74838-static atomic_t session_id = ATOMIC_INIT(0);
74839+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
74840
74841 /**
74842 * audit_set_loginuid - set current task's audit_context loginuid
74843@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
74844 return -EPERM;
74845 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
74846
74847- sessionid = atomic_inc_return(&session_id);
74848+ sessionid = atomic_inc_return_unchecked(&session_id);
74849 if (context && context->in_syscall) {
74850 struct audit_buffer *ab;
74851
74852diff --git a/kernel/capability.c b/kernel/capability.c
74853index f6c2ce5..982c0f9 100644
74854--- a/kernel/capability.c
74855+++ b/kernel/capability.c
74856@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
74857 * before modification is attempted and the application
74858 * fails.
74859 */
74860+ if (tocopy > ARRAY_SIZE(kdata))
74861+ return -EFAULT;
74862+
74863 if (copy_to_user(dataptr, kdata, tocopy
74864 * sizeof(struct __user_cap_data_struct))) {
74865 return -EFAULT;
74866@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
74867 int ret;
74868
74869 rcu_read_lock();
74870- ret = security_capable(__task_cred(t), ns, cap);
74871+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
74872+ gr_task_is_capable(t, __task_cred(t), cap);
74873 rcu_read_unlock();
74874
74875- return (ret == 0);
74876+ return ret;
74877 }
74878
74879 /**
74880@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
74881 int ret;
74882
74883 rcu_read_lock();
74884- ret = security_capable_noaudit(__task_cred(t), ns, cap);
74885+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
74886 rcu_read_unlock();
74887
74888- return (ret == 0);
74889+ return ret;
74890 }
74891
74892 /**
74893@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
74894 BUG();
74895 }
74896
74897- if (security_capable(current_cred(), ns, cap) == 0) {
74898+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
74899 current->flags |= PF_SUPERPRIV;
74900 return true;
74901 }
74902@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
74903 }
74904 EXPORT_SYMBOL(ns_capable);
74905
74906+bool ns_capable_nolog(struct user_namespace *ns, int cap)
74907+{
74908+ if (unlikely(!cap_valid(cap))) {
74909+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
74910+ BUG();
74911+ }
74912+
74913+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
74914+ current->flags |= PF_SUPERPRIV;
74915+ return true;
74916+ }
74917+ return false;
74918+}
74919+EXPORT_SYMBOL(ns_capable_nolog);
74920+
74921 /**
74922 * file_ns_capable - Determine if the file's opener had a capability in effect
74923 * @file: The file we want to check
74924@@ -432,6 +451,12 @@ bool capable(int cap)
74925 }
74926 EXPORT_SYMBOL(capable);
74927
74928+bool capable_nolog(int cap)
74929+{
74930+ return ns_capable_nolog(&init_user_ns, cap);
74931+}
74932+EXPORT_SYMBOL(capable_nolog);
74933+
74934 /**
74935 * nsown_capable - Check superior capability to one's own user_ns
74936 * @cap: The capability in question
74937@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
74938
74939 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74940 }
74941+
74942+bool inode_capable_nolog(const struct inode *inode, int cap)
74943+{
74944+ struct user_namespace *ns = current_user_ns();
74945+
74946+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
74947+}
74948diff --git a/kernel/cgroup.c b/kernel/cgroup.c
74949index a7c9e6d..a16aa75 100644
74950--- a/kernel/cgroup.c
74951+++ b/kernel/cgroup.c
74952@@ -5378,7 +5378,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
74953 struct css_set *cg = link->cg;
74954 struct task_struct *task;
74955 int count = 0;
74956- seq_printf(seq, "css_set %p\n", cg);
74957+ seq_printf(seq, "css_set %pK\n", cg);
74958 list_for_each_entry(task, &cg->tasks, cg_list) {
74959 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
74960 seq_puts(seq, " ...\n");
74961diff --git a/kernel/compat.c b/kernel/compat.c
74962index 0a09e48..f44f3f0 100644
74963--- a/kernel/compat.c
74964+++ b/kernel/compat.c
74965@@ -13,6 +13,7 @@
74966
74967 #include <linux/linkage.h>
74968 #include <linux/compat.h>
74969+#include <linux/module.h>
74970 #include <linux/errno.h>
74971 #include <linux/time.h>
74972 #include <linux/signal.h>
74973@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
74974 mm_segment_t oldfs;
74975 long ret;
74976
74977- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
74978+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
74979 oldfs = get_fs();
74980 set_fs(KERNEL_DS);
74981 ret = hrtimer_nanosleep_restart(restart);
74982@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
74983 oldfs = get_fs();
74984 set_fs(KERNEL_DS);
74985 ret = hrtimer_nanosleep(&tu,
74986- rmtp ? (struct timespec __user *)&rmt : NULL,
74987+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
74988 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
74989 set_fs(oldfs);
74990
74991@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
74992 mm_segment_t old_fs = get_fs();
74993
74994 set_fs(KERNEL_DS);
74995- ret = sys_sigpending((old_sigset_t __user *) &s);
74996+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
74997 set_fs(old_fs);
74998 if (ret == 0)
74999 ret = put_user(s, set);
75000@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
75001 mm_segment_t old_fs = get_fs();
75002
75003 set_fs(KERNEL_DS);
75004- ret = sys_old_getrlimit(resource, &r);
75005+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
75006 set_fs(old_fs);
75007
75008 if (!ret) {
75009@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
75010 set_fs (KERNEL_DS);
75011 ret = sys_wait4(pid,
75012 (stat_addr ?
75013- (unsigned int __user *) &status : NULL),
75014- options, (struct rusage __user *) &r);
75015+ (unsigned int __force_user *) &status : NULL),
75016+ options, (struct rusage __force_user *) &r);
75017 set_fs (old_fs);
75018
75019 if (ret > 0) {
75020@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
75021 memset(&info, 0, sizeof(info));
75022
75023 set_fs(KERNEL_DS);
75024- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
75025- uru ? (struct rusage __user *)&ru : NULL);
75026+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
75027+ uru ? (struct rusage __force_user *)&ru : NULL);
75028 set_fs(old_fs);
75029
75030 if ((ret < 0) || (info.si_signo == 0))
75031@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
75032 oldfs = get_fs();
75033 set_fs(KERNEL_DS);
75034 err = sys_timer_settime(timer_id, flags,
75035- (struct itimerspec __user *) &newts,
75036- (struct itimerspec __user *) &oldts);
75037+ (struct itimerspec __force_user *) &newts,
75038+ (struct itimerspec __force_user *) &oldts);
75039 set_fs(oldfs);
75040 if (!err && old && put_compat_itimerspec(old, &oldts))
75041 return -EFAULT;
75042@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
75043 oldfs = get_fs();
75044 set_fs(KERNEL_DS);
75045 err = sys_timer_gettime(timer_id,
75046- (struct itimerspec __user *) &ts);
75047+ (struct itimerspec __force_user *) &ts);
75048 set_fs(oldfs);
75049 if (!err && put_compat_itimerspec(setting, &ts))
75050 return -EFAULT;
75051@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
75052 oldfs = get_fs();
75053 set_fs(KERNEL_DS);
75054 err = sys_clock_settime(which_clock,
75055- (struct timespec __user *) &ts);
75056+ (struct timespec __force_user *) &ts);
75057 set_fs(oldfs);
75058 return err;
75059 }
75060@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
75061 oldfs = get_fs();
75062 set_fs(KERNEL_DS);
75063 err = sys_clock_gettime(which_clock,
75064- (struct timespec __user *) &ts);
75065+ (struct timespec __force_user *) &ts);
75066 set_fs(oldfs);
75067 if (!err && put_compat_timespec(&ts, tp))
75068 return -EFAULT;
75069@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
75070
75071 oldfs = get_fs();
75072 set_fs(KERNEL_DS);
75073- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
75074+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
75075 set_fs(oldfs);
75076
75077 err = compat_put_timex(utp, &txc);
75078@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
75079 oldfs = get_fs();
75080 set_fs(KERNEL_DS);
75081 err = sys_clock_getres(which_clock,
75082- (struct timespec __user *) &ts);
75083+ (struct timespec __force_user *) &ts);
75084 set_fs(oldfs);
75085 if (!err && tp && put_compat_timespec(&ts, tp))
75086 return -EFAULT;
75087@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
75088 long err;
75089 mm_segment_t oldfs;
75090 struct timespec tu;
75091- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
75092+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
75093
75094- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
75095+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
75096 oldfs = get_fs();
75097 set_fs(KERNEL_DS);
75098 err = clock_nanosleep_restart(restart);
75099@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
75100 oldfs = get_fs();
75101 set_fs(KERNEL_DS);
75102 err = sys_clock_nanosleep(which_clock, flags,
75103- (struct timespec __user *) &in,
75104- (struct timespec __user *) &out);
75105+ (struct timespec __force_user *) &in,
75106+ (struct timespec __force_user *) &out);
75107 set_fs(oldfs);
75108
75109 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
75110diff --git a/kernel/configs.c b/kernel/configs.c
75111index c18b1f1..b9a0132 100644
75112--- a/kernel/configs.c
75113+++ b/kernel/configs.c
75114@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
75115 struct proc_dir_entry *entry;
75116
75117 /* create the current config file */
75118+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
75119+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
75120+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
75121+ &ikconfig_file_ops);
75122+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75123+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
75124+ &ikconfig_file_ops);
75125+#endif
75126+#else
75127 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
75128 &ikconfig_file_ops);
75129+#endif
75130+
75131 if (!entry)
75132 return -ENOMEM;
75133
75134diff --git a/kernel/cred.c b/kernel/cred.c
75135index e0573a4..3874e41 100644
75136--- a/kernel/cred.c
75137+++ b/kernel/cred.c
75138@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
75139 validate_creds(cred);
75140 alter_cred_subscribers(cred, -1);
75141 put_cred(cred);
75142+
75143+#ifdef CONFIG_GRKERNSEC_SETXID
75144+ cred = (struct cred *) tsk->delayed_cred;
75145+ if (cred != NULL) {
75146+ tsk->delayed_cred = NULL;
75147+ validate_creds(cred);
75148+ alter_cred_subscribers(cred, -1);
75149+ put_cred(cred);
75150+ }
75151+#endif
75152 }
75153
75154 /**
75155@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
75156 * Always returns 0 thus allowing this function to be tail-called at the end
75157 * of, say, sys_setgid().
75158 */
75159-int commit_creds(struct cred *new)
75160+static int __commit_creds(struct cred *new)
75161 {
75162 struct task_struct *task = current;
75163 const struct cred *old = task->real_cred;
75164@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
75165
75166 get_cred(new); /* we will require a ref for the subj creds too */
75167
75168+ gr_set_role_label(task, new->uid, new->gid);
75169+
75170 /* dumpability changes */
75171 if (!uid_eq(old->euid, new->euid) ||
75172 !gid_eq(old->egid, new->egid) ||
75173@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
75174 put_cred(old);
75175 return 0;
75176 }
75177+#ifdef CONFIG_GRKERNSEC_SETXID
75178+extern int set_user(struct cred *new);
75179+
75180+void gr_delayed_cred_worker(void)
75181+{
75182+ const struct cred *new = current->delayed_cred;
75183+ struct cred *ncred;
75184+
75185+ current->delayed_cred = NULL;
75186+
75187+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
75188+ // from doing get_cred on it when queueing this
75189+ put_cred(new);
75190+ return;
75191+ } else if (new == NULL)
75192+ return;
75193+
75194+ ncred = prepare_creds();
75195+ if (!ncred)
75196+ goto die;
75197+ // uids
75198+ ncred->uid = new->uid;
75199+ ncred->euid = new->euid;
75200+ ncred->suid = new->suid;
75201+ ncred->fsuid = new->fsuid;
75202+ // gids
75203+ ncred->gid = new->gid;
75204+ ncred->egid = new->egid;
75205+ ncred->sgid = new->sgid;
75206+ ncred->fsgid = new->fsgid;
75207+ // groups
75208+ if (set_groups(ncred, new->group_info) < 0) {
75209+ abort_creds(ncred);
75210+ goto die;
75211+ }
75212+ // caps
75213+ ncred->securebits = new->securebits;
75214+ ncred->cap_inheritable = new->cap_inheritable;
75215+ ncred->cap_permitted = new->cap_permitted;
75216+ ncred->cap_effective = new->cap_effective;
75217+ ncred->cap_bset = new->cap_bset;
75218+
75219+ if (set_user(ncred)) {
75220+ abort_creds(ncred);
75221+ goto die;
75222+ }
75223+
75224+ // from doing get_cred on it when queueing this
75225+ put_cred(new);
75226+
75227+ __commit_creds(ncred);
75228+ return;
75229+die:
75230+ // from doing get_cred on it when queueing this
75231+ put_cred(new);
75232+ do_group_exit(SIGKILL);
75233+}
75234+#endif
75235+
75236+int commit_creds(struct cred *new)
75237+{
75238+#ifdef CONFIG_GRKERNSEC_SETXID
75239+ int ret;
75240+ int schedule_it = 0;
75241+ struct task_struct *t;
75242+
75243+ /* we won't get called with tasklist_lock held for writing
75244+ and interrupts disabled as the cred struct in that case is
75245+ init_cred
75246+ */
75247+ if (grsec_enable_setxid && !current_is_single_threaded() &&
75248+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
75249+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
75250+ schedule_it = 1;
75251+ }
75252+ ret = __commit_creds(new);
75253+ if (schedule_it) {
75254+ rcu_read_lock();
75255+ read_lock(&tasklist_lock);
75256+ for (t = next_thread(current); t != current;
75257+ t = next_thread(t)) {
75258+ if (t->delayed_cred == NULL) {
75259+ t->delayed_cred = get_cred(new);
75260+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
75261+ set_tsk_need_resched(t);
75262+ }
75263+ }
75264+ read_unlock(&tasklist_lock);
75265+ rcu_read_unlock();
75266+ }
75267+ return ret;
75268+#else
75269+ return __commit_creds(new);
75270+#endif
75271+}
75272+
75273 EXPORT_SYMBOL(commit_creds);
75274
75275 /**
75276diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
75277index 0506d44..2c20034 100644
75278--- a/kernel/debug/debug_core.c
75279+++ b/kernel/debug/debug_core.c
75280@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
75281 */
75282 static atomic_t masters_in_kgdb;
75283 static atomic_t slaves_in_kgdb;
75284-static atomic_t kgdb_break_tasklet_var;
75285+static atomic_unchecked_t kgdb_break_tasklet_var;
75286 atomic_t kgdb_setting_breakpoint;
75287
75288 struct task_struct *kgdb_usethread;
75289@@ -133,7 +133,7 @@ int kgdb_single_step;
75290 static pid_t kgdb_sstep_pid;
75291
75292 /* to keep track of the CPU which is doing the single stepping*/
75293-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75294+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75295
75296 /*
75297 * If you are debugging a problem where roundup (the collection of
75298@@ -541,7 +541,7 @@ return_normal:
75299 * kernel will only try for the value of sstep_tries before
75300 * giving up and continuing on.
75301 */
75302- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
75303+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
75304 (kgdb_info[cpu].task &&
75305 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
75306 atomic_set(&kgdb_active, -1);
75307@@ -635,8 +635,8 @@ cpu_master_loop:
75308 }
75309
75310 kgdb_restore:
75311- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
75312- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
75313+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
75314+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
75315 if (kgdb_info[sstep_cpu].task)
75316 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
75317 else
75318@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
75319 static void kgdb_tasklet_bpt(unsigned long ing)
75320 {
75321 kgdb_breakpoint();
75322- atomic_set(&kgdb_break_tasklet_var, 0);
75323+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
75324 }
75325
75326 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
75327
75328 void kgdb_schedule_breakpoint(void)
75329 {
75330- if (atomic_read(&kgdb_break_tasklet_var) ||
75331+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
75332 atomic_read(&kgdb_active) != -1 ||
75333 atomic_read(&kgdb_setting_breakpoint))
75334 return;
75335- atomic_inc(&kgdb_break_tasklet_var);
75336+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
75337 tasklet_schedule(&kgdb_tasklet_breakpoint);
75338 }
75339 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
75340diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
75341index 00eb8f7..d7e3244 100644
75342--- a/kernel/debug/kdb/kdb_main.c
75343+++ b/kernel/debug/kdb/kdb_main.c
75344@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
75345 continue;
75346
75347 kdb_printf("%-20s%8u 0x%p ", mod->name,
75348- mod->core_size, (void *)mod);
75349+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
75350 #ifdef CONFIG_MODULE_UNLOAD
75351 kdb_printf("%4ld ", module_refcount(mod));
75352 #endif
75353@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
75354 kdb_printf(" (Loading)");
75355 else
75356 kdb_printf(" (Live)");
75357- kdb_printf(" 0x%p", mod->module_core);
75358+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
75359
75360 #ifdef CONFIG_MODULE_UNLOAD
75361 {
75362diff --git a/kernel/events/core.c b/kernel/events/core.c
75363index b391907..a0e2372 100644
75364--- a/kernel/events/core.c
75365+++ b/kernel/events/core.c
75366@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
75367 * 0 - disallow raw tracepoint access for unpriv
75368 * 1 - disallow cpu events for unpriv
75369 * 2 - disallow kernel profiling for unpriv
75370+ * 3 - disallow all unpriv perf event use
75371 */
75372-int sysctl_perf_event_paranoid __read_mostly = 1;
75373+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
75374+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
75375+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
75376+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
75377+#else
75378+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
75379+#endif
75380
75381 /* Minimum for 512 kiB + 1 user control page */
75382 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
75383@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
75384 return 0;
75385 }
75386
75387-static atomic64_t perf_event_id;
75388+static atomic64_unchecked_t perf_event_id;
75389
75390 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75391 enum event_type_t event_type);
75392@@ -2725,7 +2732,7 @@ static void __perf_event_read(void *info)
75393
75394 static inline u64 perf_event_count(struct perf_event *event)
75395 {
75396- return local64_read(&event->count) + atomic64_read(&event->child_count);
75397+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
75398 }
75399
75400 static u64 perf_event_read(struct perf_event *event)
75401@@ -3071,9 +3078,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
75402 mutex_lock(&event->child_mutex);
75403 total += perf_event_read(event);
75404 *enabled += event->total_time_enabled +
75405- atomic64_read(&event->child_total_time_enabled);
75406+ atomic64_read_unchecked(&event->child_total_time_enabled);
75407 *running += event->total_time_running +
75408- atomic64_read(&event->child_total_time_running);
75409+ atomic64_read_unchecked(&event->child_total_time_running);
75410
75411 list_for_each_entry(child, &event->child_list, child_list) {
75412 total += perf_event_read(child);
75413@@ -3459,10 +3466,10 @@ void perf_event_update_userpage(struct perf_event *event)
75414 userpg->offset -= local64_read(&event->hw.prev_count);
75415
75416 userpg->time_enabled = enabled +
75417- atomic64_read(&event->child_total_time_enabled);
75418+ atomic64_read_unchecked(&event->child_total_time_enabled);
75419
75420 userpg->time_running = running +
75421- atomic64_read(&event->child_total_time_running);
75422+ atomic64_read_unchecked(&event->child_total_time_running);
75423
75424 arch_perf_update_userpage(userpg, now);
75425
75426@@ -4012,7 +4019,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
75427
75428 /* Data. */
75429 sp = perf_user_stack_pointer(regs);
75430- rem = __output_copy_user(handle, (void *) sp, dump_size);
75431+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
75432 dyn_size = dump_size - rem;
75433
75434 perf_output_skip(handle, rem);
75435@@ -4100,11 +4107,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
75436 values[n++] = perf_event_count(event);
75437 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
75438 values[n++] = enabled +
75439- atomic64_read(&event->child_total_time_enabled);
75440+ atomic64_read_unchecked(&event->child_total_time_enabled);
75441 }
75442 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
75443 values[n++] = running +
75444- atomic64_read(&event->child_total_time_running);
75445+ atomic64_read_unchecked(&event->child_total_time_running);
75446 }
75447 if (read_format & PERF_FORMAT_ID)
75448 values[n++] = primary_event_id(event);
75449@@ -4813,12 +4820,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
75450 * need to add enough zero bytes after the string to handle
75451 * the 64bit alignment we do later.
75452 */
75453- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
75454+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
75455 if (!buf) {
75456 name = strncpy(tmp, "//enomem", sizeof(tmp));
75457 goto got_name;
75458 }
75459- name = d_path(&file->f_path, buf, PATH_MAX);
75460+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
75461 if (IS_ERR(name)) {
75462 name = strncpy(tmp, "//toolong", sizeof(tmp));
75463 goto got_name;
75464@@ -6240,7 +6247,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
75465 event->parent = parent_event;
75466
75467 event->ns = get_pid_ns(task_active_pid_ns(current));
75468- event->id = atomic64_inc_return(&perf_event_id);
75469+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
75470
75471 event->state = PERF_EVENT_STATE_INACTIVE;
75472
75473@@ -6550,6 +6557,11 @@ SYSCALL_DEFINE5(perf_event_open,
75474 if (flags & ~PERF_FLAG_ALL)
75475 return -EINVAL;
75476
75477+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
75478+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
75479+ return -EACCES;
75480+#endif
75481+
75482 err = perf_copy_attr(attr_uptr, &attr);
75483 if (err)
75484 return err;
75485@@ -6882,10 +6894,10 @@ static void sync_child_event(struct perf_event *child_event,
75486 /*
75487 * Add back the child's count to the parent's count:
75488 */
75489- atomic64_add(child_val, &parent_event->child_count);
75490- atomic64_add(child_event->total_time_enabled,
75491+ atomic64_add_unchecked(child_val, &parent_event->child_count);
75492+ atomic64_add_unchecked(child_event->total_time_enabled,
75493 &parent_event->child_total_time_enabled);
75494- atomic64_add(child_event->total_time_running,
75495+ atomic64_add_unchecked(child_event->total_time_running,
75496 &parent_event->child_total_time_running);
75497
75498 /*
75499diff --git a/kernel/events/internal.h b/kernel/events/internal.h
75500index ca65997..cc8cee4 100644
75501--- a/kernel/events/internal.h
75502+++ b/kernel/events/internal.h
75503@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
75504 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
75505 }
75506
75507-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
75508+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
75509 static inline unsigned int \
75510 func_name(struct perf_output_handle *handle, \
75511- const void *buf, unsigned int len) \
75512+ const void user *buf, unsigned int len) \
75513 { \
75514 unsigned long size, written; \
75515 \
75516@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
75517 return n;
75518 }
75519
75520-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
75521+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
75522
75523 #define MEMCPY_SKIP(dst, src, n) (n)
75524
75525-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
75526+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
75527
75528 #ifndef arch_perf_out_copy_user
75529 #define arch_perf_out_copy_user __copy_from_user_inatomic
75530 #endif
75531
75532-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
75533+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
75534
75535 /* Callchain handling */
75536 extern struct perf_callchain_entry *
75537diff --git a/kernel/exit.c b/kernel/exit.c
75538index 7bb73f9..d7978ed 100644
75539--- a/kernel/exit.c
75540+++ b/kernel/exit.c
75541@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
75542 struct task_struct *leader;
75543 int zap_leader;
75544 repeat:
75545+#ifdef CONFIG_NET
75546+ gr_del_task_from_ip_table(p);
75547+#endif
75548+
75549 /* don't need to get the RCU readlock here - the process is dead and
75550 * can't be modifying its own credentials. But shut RCU-lockdep up */
75551 rcu_read_lock();
75552@@ -340,7 +344,7 @@ int allow_signal(int sig)
75553 * know it'll be handled, so that they don't get converted to
75554 * SIGKILL or just silently dropped.
75555 */
75556- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
75557+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
75558 recalc_sigpending();
75559 spin_unlock_irq(&current->sighand->siglock);
75560 return 0;
75561@@ -709,6 +713,8 @@ void do_exit(long code)
75562 struct task_struct *tsk = current;
75563 int group_dead;
75564
75565+ set_fs(USER_DS);
75566+
75567 profile_task_exit(tsk);
75568
75569 WARN_ON(blk_needs_flush_plug(tsk));
75570@@ -725,7 +731,6 @@ void do_exit(long code)
75571 * mm_release()->clear_child_tid() from writing to a user-controlled
75572 * kernel address.
75573 */
75574- set_fs(USER_DS);
75575
75576 ptrace_event(PTRACE_EVENT_EXIT, code);
75577
75578@@ -784,6 +789,9 @@ void do_exit(long code)
75579 tsk->exit_code = code;
75580 taskstats_exit(tsk, group_dead);
75581
75582+ gr_acl_handle_psacct(tsk, code);
75583+ gr_acl_handle_exit();
75584+
75585 exit_mm(tsk);
75586
75587 if (group_dead)
75588@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
75589 * Take down every thread in the group. This is called by fatal signals
75590 * as well as by sys_exit_group (below).
75591 */
75592-void
75593+__noreturn void
75594 do_group_exit(int exit_code)
75595 {
75596 struct signal_struct *sig = current->signal;
75597diff --git a/kernel/fork.c b/kernel/fork.c
75598index 987b28a..4e03c05 100644
75599--- a/kernel/fork.c
75600+++ b/kernel/fork.c
75601@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
75602 *stackend = STACK_END_MAGIC; /* for overflow detection */
75603
75604 #ifdef CONFIG_CC_STACKPROTECTOR
75605- tsk->stack_canary = get_random_int();
75606+ tsk->stack_canary = pax_get_random_long();
75607 #endif
75608
75609 /*
75610@@ -345,13 +345,81 @@ free_tsk:
75611 }
75612
75613 #ifdef CONFIG_MMU
75614+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
75615+{
75616+ struct vm_area_struct *tmp;
75617+ unsigned long charge;
75618+ struct mempolicy *pol;
75619+ struct file *file;
75620+
75621+ charge = 0;
75622+ if (mpnt->vm_flags & VM_ACCOUNT) {
75623+ unsigned long len = vma_pages(mpnt);
75624+
75625+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75626+ goto fail_nomem;
75627+ charge = len;
75628+ }
75629+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75630+ if (!tmp)
75631+ goto fail_nomem;
75632+ *tmp = *mpnt;
75633+ tmp->vm_mm = mm;
75634+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
75635+ pol = mpol_dup(vma_policy(mpnt));
75636+ if (IS_ERR(pol))
75637+ goto fail_nomem_policy;
75638+ vma_set_policy(tmp, pol);
75639+ if (anon_vma_fork(tmp, mpnt))
75640+ goto fail_nomem_anon_vma_fork;
75641+ tmp->vm_flags &= ~VM_LOCKED;
75642+ tmp->vm_next = tmp->vm_prev = NULL;
75643+ tmp->vm_mirror = NULL;
75644+ file = tmp->vm_file;
75645+ if (file) {
75646+ struct inode *inode = file_inode(file);
75647+ struct address_space *mapping = file->f_mapping;
75648+
75649+ get_file(file);
75650+ if (tmp->vm_flags & VM_DENYWRITE)
75651+ atomic_dec(&inode->i_writecount);
75652+ mutex_lock(&mapping->i_mmap_mutex);
75653+ if (tmp->vm_flags & VM_SHARED)
75654+ mapping->i_mmap_writable++;
75655+ flush_dcache_mmap_lock(mapping);
75656+ /* insert tmp into the share list, just after mpnt */
75657+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75658+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
75659+ else
75660+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
75661+ flush_dcache_mmap_unlock(mapping);
75662+ mutex_unlock(&mapping->i_mmap_mutex);
75663+ }
75664+
75665+ /*
75666+ * Clear hugetlb-related page reserves for children. This only
75667+ * affects MAP_PRIVATE mappings. Faults generated by the child
75668+ * are not guaranteed to succeed, even if read-only
75669+ */
75670+ if (is_vm_hugetlb_page(tmp))
75671+ reset_vma_resv_huge_pages(tmp);
75672+
75673+ return tmp;
75674+
75675+fail_nomem_anon_vma_fork:
75676+ mpol_put(pol);
75677+fail_nomem_policy:
75678+ kmem_cache_free(vm_area_cachep, tmp);
75679+fail_nomem:
75680+ vm_unacct_memory(charge);
75681+ return NULL;
75682+}
75683+
75684 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75685 {
75686 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
75687 struct rb_node **rb_link, *rb_parent;
75688 int retval;
75689- unsigned long charge;
75690- struct mempolicy *pol;
75691
75692 uprobe_start_dup_mmap();
75693 down_write(&oldmm->mmap_sem);
75694@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75695 mm->locked_vm = 0;
75696 mm->mmap = NULL;
75697 mm->mmap_cache = NULL;
75698- mm->free_area_cache = oldmm->mmap_base;
75699- mm->cached_hole_size = ~0UL;
75700+ mm->free_area_cache = oldmm->free_area_cache;
75701+ mm->cached_hole_size = oldmm->cached_hole_size;
75702 mm->map_count = 0;
75703 cpumask_clear(mm_cpumask(mm));
75704 mm->mm_rb = RB_ROOT;
75705@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75706
75707 prev = NULL;
75708 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
75709- struct file *file;
75710-
75711 if (mpnt->vm_flags & VM_DONTCOPY) {
75712 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
75713 -vma_pages(mpnt));
75714 continue;
75715 }
75716- charge = 0;
75717- if (mpnt->vm_flags & VM_ACCOUNT) {
75718- unsigned long len = vma_pages(mpnt);
75719-
75720- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75721- goto fail_nomem;
75722- charge = len;
75723- }
75724- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75725- if (!tmp)
75726- goto fail_nomem;
75727- *tmp = *mpnt;
75728- INIT_LIST_HEAD(&tmp->anon_vma_chain);
75729- pol = mpol_dup(vma_policy(mpnt));
75730- retval = PTR_ERR(pol);
75731- if (IS_ERR(pol))
75732- goto fail_nomem_policy;
75733- vma_set_policy(tmp, pol);
75734- tmp->vm_mm = mm;
75735- if (anon_vma_fork(tmp, mpnt))
75736- goto fail_nomem_anon_vma_fork;
75737- tmp->vm_flags &= ~VM_LOCKED;
75738- tmp->vm_next = tmp->vm_prev = NULL;
75739- file = tmp->vm_file;
75740- if (file) {
75741- struct inode *inode = file_inode(file);
75742- struct address_space *mapping = file->f_mapping;
75743-
75744- get_file(file);
75745- if (tmp->vm_flags & VM_DENYWRITE)
75746- atomic_dec(&inode->i_writecount);
75747- mutex_lock(&mapping->i_mmap_mutex);
75748- if (tmp->vm_flags & VM_SHARED)
75749- mapping->i_mmap_writable++;
75750- flush_dcache_mmap_lock(mapping);
75751- /* insert tmp into the share list, just after mpnt */
75752- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75753- vma_nonlinear_insert(tmp,
75754- &mapping->i_mmap_nonlinear);
75755- else
75756- vma_interval_tree_insert_after(tmp, mpnt,
75757- &mapping->i_mmap);
75758- flush_dcache_mmap_unlock(mapping);
75759- mutex_unlock(&mapping->i_mmap_mutex);
75760+ tmp = dup_vma(mm, oldmm, mpnt);
75761+ if (!tmp) {
75762+ retval = -ENOMEM;
75763+ goto out;
75764 }
75765
75766 /*
75767@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75768 if (retval)
75769 goto out;
75770 }
75771+
75772+#ifdef CONFIG_PAX_SEGMEXEC
75773+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
75774+ struct vm_area_struct *mpnt_m;
75775+
75776+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
75777+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
75778+
75779+ if (!mpnt->vm_mirror)
75780+ continue;
75781+
75782+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
75783+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
75784+ mpnt->vm_mirror = mpnt_m;
75785+ } else {
75786+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
75787+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
75788+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
75789+ mpnt->vm_mirror->vm_mirror = mpnt;
75790+ }
75791+ }
75792+ BUG_ON(mpnt_m);
75793+ }
75794+#endif
75795+
75796 /* a new mm has just been created */
75797 arch_dup_mmap(oldmm, mm);
75798 retval = 0;
75799@@ -473,14 +524,6 @@ out:
75800 up_write(&oldmm->mmap_sem);
75801 uprobe_end_dup_mmap();
75802 return retval;
75803-fail_nomem_anon_vma_fork:
75804- mpol_put(pol);
75805-fail_nomem_policy:
75806- kmem_cache_free(vm_area_cachep, tmp);
75807-fail_nomem:
75808- retval = -ENOMEM;
75809- vm_unacct_memory(charge);
75810- goto out;
75811 }
75812
75813 static inline int mm_alloc_pgd(struct mm_struct *mm)
75814@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
75815 return ERR_PTR(err);
75816
75817 mm = get_task_mm(task);
75818- if (mm && mm != current->mm &&
75819- !ptrace_may_access(task, mode)) {
75820+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
75821+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
75822 mmput(mm);
75823 mm = ERR_PTR(-EACCES);
75824 }
75825@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
75826 spin_unlock(&fs->lock);
75827 return -EAGAIN;
75828 }
75829- fs->users++;
75830+ atomic_inc(&fs->users);
75831 spin_unlock(&fs->lock);
75832 return 0;
75833 }
75834 tsk->fs = copy_fs_struct(fs);
75835 if (!tsk->fs)
75836 return -ENOMEM;
75837+ /* Carry through gr_chroot_dentry and is_chrooted instead
75838+ of recomputing it here. Already copied when the task struct
75839+ is duplicated. This allows pivot_root to not be treated as
75840+ a chroot
75841+ */
75842+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
75843+
75844 return 0;
75845 }
75846
75847@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75848 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
75849 #endif
75850 retval = -EAGAIN;
75851+
75852+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
75853+
75854 if (atomic_read(&p->real_cred->user->processes) >=
75855 task_rlimit(p, RLIMIT_NPROC)) {
75856- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
75857- p->real_cred->user != INIT_USER)
75858+ if (p->real_cred->user != INIT_USER &&
75859+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
75860 goto bad_fork_free;
75861 }
75862 current->flags &= ~PF_NPROC_EXCEEDED;
75863@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75864 goto bad_fork_free_pid;
75865 }
75866
75867+ /* synchronizes with gr_set_acls()
75868+ we need to call this past the point of no return for fork()
75869+ */
75870+ gr_copy_label(p);
75871+
75872 if (clone_flags & CLONE_THREAD) {
75873 current->signal->nr_threads++;
75874 atomic_inc(&current->signal->live);
75875@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
75876 bad_fork_free:
75877 free_task(p);
75878 fork_out:
75879+ gr_log_forkfail(retval);
75880+
75881 return ERR_PTR(retval);
75882 }
75883
75884@@ -1579,6 +1639,23 @@ long do_fork(unsigned long clone_flags,
75885 return -EINVAL;
75886 }
75887
75888+#ifdef CONFIG_GRKERNSEC
75889+ if (clone_flags & CLONE_NEWUSER) {
75890+ /*
75891+ * This doesn't really inspire confidence:
75892+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
75893+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
75894+ * Increases kernel attack surface in areas developers
75895+ * previously cared little about ("low importance due
75896+ * to requiring "root" capability")
75897+ * To be removed when this code receives *proper* review
75898+ */
75899+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
75900+ !capable(CAP_SETGID))
75901+ return -EPERM;
75902+ }
75903+#endif
75904+
75905 /*
75906 * Determine whether and which event to report to ptracer. When
75907 * called from kernel_thread or CLONE_UNTRACED is explicitly
75908@@ -1613,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
75909 if (clone_flags & CLONE_PARENT_SETTID)
75910 put_user(nr, parent_tidptr);
75911
75912+ gr_handle_brute_check();
75913+
75914 if (clone_flags & CLONE_VFORK) {
75915 p->vfork_done = &vfork;
75916 init_completion(&vfork);
75917@@ -1763,7 +1842,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
75918 return 0;
75919
75920 /* don't need lock here; in the worst case we'll do useless copy */
75921- if (fs->users == 1)
75922+ if (atomic_read(&fs->users) == 1)
75923 return 0;
75924
75925 *new_fsp = copy_fs_struct(fs);
75926@@ -1875,7 +1954,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
75927 fs = current->fs;
75928 spin_lock(&fs->lock);
75929 current->fs = new_fs;
75930- if (--fs->users)
75931+ gr_set_chroot_entries(current, &current->fs->root);
75932+ if (atomic_dec_return(&fs->users))
75933 new_fs = NULL;
75934 else
75935 new_fs = fs;
75936diff --git a/kernel/futex.c b/kernel/futex.c
75937index 49dacfb..5c6b450 100644
75938--- a/kernel/futex.c
75939+++ b/kernel/futex.c
75940@@ -54,6 +54,7 @@
75941 #include <linux/mount.h>
75942 #include <linux/pagemap.h>
75943 #include <linux/syscalls.h>
75944+#include <linux/ptrace.h>
75945 #include <linux/signal.h>
75946 #include <linux/export.h>
75947 #include <linux/magic.h>
75948@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
75949 struct page *page, *page_head;
75950 int err, ro = 0;
75951
75952+#ifdef CONFIG_PAX_SEGMEXEC
75953+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
75954+ return -EFAULT;
75955+#endif
75956+
75957 /*
75958 * The futex address must be "naturally" aligned.
75959 */
75960@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
75961 {
75962 u32 curval;
75963 int i;
75964+ mm_segment_t oldfs;
75965
75966 /*
75967 * This will fail and we want it. Some arch implementations do
75968@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
75969 * implementation, the non-functional ones will return
75970 * -ENOSYS.
75971 */
75972+ oldfs = get_fs();
75973+ set_fs(USER_DS);
75974 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
75975 futex_cmpxchg_enabled = 1;
75976+ set_fs(oldfs);
75977
75978 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
75979 plist_head_init(&futex_queues[i].chain);
75980diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
75981index f9f44fd..29885e4 100644
75982--- a/kernel/futex_compat.c
75983+++ b/kernel/futex_compat.c
75984@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
75985 return 0;
75986 }
75987
75988-static void __user *futex_uaddr(struct robust_list __user *entry,
75989+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
75990 compat_long_t futex_offset)
75991 {
75992 compat_uptr_t base = ptr_to_compat(entry);
75993diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
75994index 9b22d03..6295b62 100644
75995--- a/kernel/gcov/base.c
75996+++ b/kernel/gcov/base.c
75997@@ -102,11 +102,6 @@ void gcov_enable_events(void)
75998 }
75999
76000 #ifdef CONFIG_MODULES
76001-static inline int within(void *addr, void *start, unsigned long size)
76002-{
76003- return ((addr >= start) && (addr < start + size));
76004-}
76005-
76006 /* Update list and generate events when modules are unloaded. */
76007 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
76008 void *data)
76009@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
76010 prev = NULL;
76011 /* Remove entries located in module from linked list. */
76012 for (info = gcov_info_head; info; info = info->next) {
76013- if (within(info, mod->module_core, mod->core_size)) {
76014+ if (within_module_core_rw((unsigned long)info, mod)) {
76015 if (prev)
76016 prev->next = info->next;
76017 else
76018diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
76019index fd4b13b..09a5ccb 100644
76020--- a/kernel/hrtimer.c
76021+++ b/kernel/hrtimer.c
76022@@ -1430,7 +1430,7 @@ void hrtimer_peek_ahead_timers(void)
76023 local_irq_restore(flags);
76024 }
76025
76026-static void run_hrtimer_softirq(struct softirq_action *h)
76027+static void run_hrtimer_softirq(void)
76028 {
76029 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
76030
76031@@ -1772,7 +1772,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
76032 return NOTIFY_OK;
76033 }
76034
76035-static struct notifier_block __cpuinitdata hrtimers_nb = {
76036+static struct notifier_block hrtimers_nb = {
76037 .notifier_call = hrtimer_cpu_notify,
76038 };
76039
76040diff --git a/kernel/irq_work.c b/kernel/irq_work.c
76041index 55fcce6..0e4cf34 100644
76042--- a/kernel/irq_work.c
76043+++ b/kernel/irq_work.c
76044@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
76045 return NOTIFY_OK;
76046 }
76047
76048-static struct notifier_block cpu_notify;
76049+static struct notifier_block cpu_notify = {
76050+ .notifier_call = irq_work_cpu_notify,
76051+ .priority = 0,
76052+};
76053
76054 static __init int irq_work_init_cpu_notifier(void)
76055 {
76056- cpu_notify.notifier_call = irq_work_cpu_notify;
76057- cpu_notify.priority = 0;
76058 register_cpu_notifier(&cpu_notify);
76059 return 0;
76060 }
76061diff --git a/kernel/jump_label.c b/kernel/jump_label.c
76062index 60f48fa..7f3a770 100644
76063--- a/kernel/jump_label.c
76064+++ b/kernel/jump_label.c
76065@@ -13,6 +13,7 @@
76066 #include <linux/sort.h>
76067 #include <linux/err.h>
76068 #include <linux/static_key.h>
76069+#include <linux/mm.h>
76070
76071 #ifdef HAVE_JUMP_LABEL
76072
76073@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
76074
76075 size = (((unsigned long)stop - (unsigned long)start)
76076 / sizeof(struct jump_entry));
76077+ pax_open_kernel();
76078 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
76079+ pax_close_kernel();
76080 }
76081
76082 static void jump_label_update(struct static_key *key, int enable);
76083@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
76084 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
76085 struct jump_entry *iter;
76086
76087+ pax_open_kernel();
76088 for (iter = iter_start; iter < iter_stop; iter++) {
76089 if (within_module_init(iter->code, mod))
76090 iter->code = 0;
76091 }
76092+ pax_close_kernel();
76093 }
76094
76095 static int
76096diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
76097index 3127ad5..159d880 100644
76098--- a/kernel/kallsyms.c
76099+++ b/kernel/kallsyms.c
76100@@ -11,6 +11,9 @@
76101 * Changed the compression method from stem compression to "table lookup"
76102 * compression (see scripts/kallsyms.c for a more complete description)
76103 */
76104+#ifdef CONFIG_GRKERNSEC_HIDESYM
76105+#define __INCLUDED_BY_HIDESYM 1
76106+#endif
76107 #include <linux/kallsyms.h>
76108 #include <linux/module.h>
76109 #include <linux/init.h>
76110@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
76111
76112 static inline int is_kernel_inittext(unsigned long addr)
76113 {
76114+ if (system_state != SYSTEM_BOOTING)
76115+ return 0;
76116+
76117 if (addr >= (unsigned long)_sinittext
76118 && addr <= (unsigned long)_einittext)
76119 return 1;
76120 return 0;
76121 }
76122
76123+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76124+#ifdef CONFIG_MODULES
76125+static inline int is_module_text(unsigned long addr)
76126+{
76127+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
76128+ return 1;
76129+
76130+ addr = ktla_ktva(addr);
76131+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
76132+}
76133+#else
76134+static inline int is_module_text(unsigned long addr)
76135+{
76136+ return 0;
76137+}
76138+#endif
76139+#endif
76140+
76141 static inline int is_kernel_text(unsigned long addr)
76142 {
76143 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
76144@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
76145
76146 static inline int is_kernel(unsigned long addr)
76147 {
76148+
76149+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76150+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
76151+ return 1;
76152+
76153+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
76154+#else
76155 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
76156+#endif
76157+
76158 return 1;
76159 return in_gate_area_no_mm(addr);
76160 }
76161
76162 static int is_ksym_addr(unsigned long addr)
76163 {
76164+
76165+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76166+ if (is_module_text(addr))
76167+ return 0;
76168+#endif
76169+
76170 if (all_var)
76171 return is_kernel(addr);
76172
76173@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
76174
76175 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
76176 {
76177- iter->name[0] = '\0';
76178 iter->nameoff = get_symbol_offset(new_pos);
76179 iter->pos = new_pos;
76180 }
76181@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
76182 {
76183 struct kallsym_iter *iter = m->private;
76184
76185+#ifdef CONFIG_GRKERNSEC_HIDESYM
76186+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
76187+ return 0;
76188+#endif
76189+
76190 /* Some debugging symbols have no name. Ignore them. */
76191 if (!iter->name[0])
76192 return 0;
76193@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
76194 */
76195 type = iter->exported ? toupper(iter->type) :
76196 tolower(iter->type);
76197+
76198 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
76199 type, iter->name, iter->module_name);
76200 } else
76201@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
76202 struct kallsym_iter *iter;
76203 int ret;
76204
76205- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
76206+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
76207 if (!iter)
76208 return -ENOMEM;
76209 reset_iter(iter, 0);
76210diff --git a/kernel/kcmp.c b/kernel/kcmp.c
76211index e30ac0f..3528cac 100644
76212--- a/kernel/kcmp.c
76213+++ b/kernel/kcmp.c
76214@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
76215 struct task_struct *task1, *task2;
76216 int ret;
76217
76218+#ifdef CONFIG_GRKERNSEC
76219+ return -ENOSYS;
76220+#endif
76221+
76222 rcu_read_lock();
76223
76224 /*
76225diff --git a/kernel/kexec.c b/kernel/kexec.c
76226index 59f7b55..4022f65 100644
76227--- a/kernel/kexec.c
76228+++ b/kernel/kexec.c
76229@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
76230 unsigned long flags)
76231 {
76232 struct compat_kexec_segment in;
76233- struct kexec_segment out, __user *ksegments;
76234+ struct kexec_segment out;
76235+ struct kexec_segment __user *ksegments;
76236 unsigned long i, result;
76237
76238 /* Don't allow clients that don't understand the native
76239diff --git a/kernel/kmod.c b/kernel/kmod.c
76240index 8241906..d625f2c 100644
76241--- a/kernel/kmod.c
76242+++ b/kernel/kmod.c
76243@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
76244 kfree(info->argv);
76245 }
76246
76247-static int call_modprobe(char *module_name, int wait)
76248+static int call_modprobe(char *module_name, char *module_param, int wait)
76249 {
76250 struct subprocess_info *info;
76251 static char *envp[] = {
76252@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
76253 NULL
76254 };
76255
76256- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
76257+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
76258 if (!argv)
76259 goto out;
76260
76261@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
76262 argv[1] = "-q";
76263 argv[2] = "--";
76264 argv[3] = module_name; /* check free_modprobe_argv() */
76265- argv[4] = NULL;
76266+ argv[4] = module_param;
76267+ argv[5] = NULL;
76268
76269 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
76270 NULL, free_modprobe_argv, NULL);
76271@@ -129,9 +130,8 @@ out:
76272 * If module auto-loading support is disabled then this function
76273 * becomes a no-operation.
76274 */
76275-int __request_module(bool wait, const char *fmt, ...)
76276+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
76277 {
76278- va_list args;
76279 char module_name[MODULE_NAME_LEN];
76280 unsigned int max_modprobes;
76281 int ret;
76282@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
76283 */
76284 WARN_ON_ONCE(wait && current_is_async());
76285
76286- va_start(args, fmt);
76287- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
76288- va_end(args);
76289+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
76290 if (ret >= MODULE_NAME_LEN)
76291 return -ENAMETOOLONG;
76292
76293@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
76294 if (ret)
76295 return ret;
76296
76297+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76298+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76299+ /* hack to workaround consolekit/udisks stupidity */
76300+ read_lock(&tasklist_lock);
76301+ if (!strcmp(current->comm, "mount") &&
76302+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
76303+ read_unlock(&tasklist_lock);
76304+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
76305+ return -EPERM;
76306+ }
76307+ read_unlock(&tasklist_lock);
76308+ }
76309+#endif
76310+
76311 /* If modprobe needs a service that is in a module, we get a recursive
76312 * loop. Limit the number of running kmod threads to max_threads/2 or
76313 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
76314@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
76315
76316 trace_module_request(module_name, wait, _RET_IP_);
76317
76318- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76319+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76320
76321 atomic_dec(&kmod_concurrent);
76322 return ret;
76323 }
76324+
76325+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
76326+{
76327+ va_list args;
76328+ int ret;
76329+
76330+ va_start(args, fmt);
76331+ ret = ____request_module(wait, module_param, fmt, args);
76332+ va_end(args);
76333+
76334+ return ret;
76335+}
76336+
76337+int __request_module(bool wait, const char *fmt, ...)
76338+{
76339+ va_list args;
76340+ int ret;
76341+
76342+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76343+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76344+ char module_param[MODULE_NAME_LEN];
76345+
76346+ memset(module_param, 0, sizeof(module_param));
76347+
76348+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
76349+
76350+ va_start(args, fmt);
76351+ ret = ____request_module(wait, module_param, fmt, args);
76352+ va_end(args);
76353+
76354+ return ret;
76355+ }
76356+#endif
76357+
76358+ va_start(args, fmt);
76359+ ret = ____request_module(wait, NULL, fmt, args);
76360+ va_end(args);
76361+
76362+ return ret;
76363+}
76364+
76365 EXPORT_SYMBOL(__request_module);
76366 #endif /* CONFIG_MODULES */
76367
76368@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
76369 *
76370 * Thus the __user pointer cast is valid here.
76371 */
76372- sys_wait4(pid, (int __user *)&ret, 0, NULL);
76373+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
76374
76375 /*
76376 * If ret is 0, either ____call_usermodehelper failed and the
76377@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
76378 static int proc_cap_handler(struct ctl_table *table, int write,
76379 void __user *buffer, size_t *lenp, loff_t *ppos)
76380 {
76381- struct ctl_table t;
76382+ ctl_table_no_const t;
76383 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
76384 kernel_cap_t new_cap;
76385 int err, i;
76386diff --git a/kernel/kprobes.c b/kernel/kprobes.c
76387index bddf3b2..07b90dd 100644
76388--- a/kernel/kprobes.c
76389+++ b/kernel/kprobes.c
76390@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
76391 * kernel image and loaded module images reside. This is required
76392 * so x86_64 can correctly handle the %rip-relative fixups.
76393 */
76394- kip->insns = module_alloc(PAGE_SIZE);
76395+ kip->insns = module_alloc_exec(PAGE_SIZE);
76396 if (!kip->insns) {
76397 kfree(kip);
76398 return NULL;
76399@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
76400 */
76401 if (!list_is_singular(&kip->list)) {
76402 list_del(&kip->list);
76403- module_free(NULL, kip->insns);
76404+ module_free_exec(NULL, kip->insns);
76405 kfree(kip);
76406 }
76407 return 1;
76408@@ -2083,7 +2083,7 @@ static int __init init_kprobes(void)
76409 {
76410 int i, err = 0;
76411 unsigned long offset = 0, size = 0;
76412- char *modname, namebuf[128];
76413+ char *modname, namebuf[KSYM_NAME_LEN];
76414 const char *symbol_name;
76415 void *addr;
76416 struct kprobe_blackpoint *kb;
76417@@ -2168,11 +2168,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
76418 kprobe_type = "k";
76419
76420 if (sym)
76421- seq_printf(pi, "%p %s %s+0x%x %s ",
76422+ seq_printf(pi, "%pK %s %s+0x%x %s ",
76423 p->addr, kprobe_type, sym, offset,
76424 (modname ? modname : " "));
76425 else
76426- seq_printf(pi, "%p %s %p ",
76427+ seq_printf(pi, "%pK %s %pK ",
76428 p->addr, kprobe_type, p->addr);
76429
76430 if (!pp)
76431@@ -2209,7 +2209,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
76432 const char *sym = NULL;
76433 unsigned int i = *(loff_t *) v;
76434 unsigned long offset = 0;
76435- char *modname, namebuf[128];
76436+ char *modname, namebuf[KSYM_NAME_LEN];
76437
76438 head = &kprobe_table[i];
76439 preempt_disable();
76440diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
76441index 6ada93c..dce7d5d 100644
76442--- a/kernel/ksysfs.c
76443+++ b/kernel/ksysfs.c
76444@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
76445 {
76446 if (count+1 > UEVENT_HELPER_PATH_LEN)
76447 return -ENOENT;
76448+ if (!capable(CAP_SYS_ADMIN))
76449+ return -EPERM;
76450 memcpy(uevent_helper, buf, count);
76451 uevent_helper[count] = '\0';
76452 if (count && uevent_helper[count-1] == '\n')
76453@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
76454 return count;
76455 }
76456
76457-static struct bin_attribute notes_attr = {
76458+static bin_attribute_no_const notes_attr __read_only = {
76459 .attr = {
76460 .name = "notes",
76461 .mode = S_IRUGO,
76462diff --git a/kernel/lockdep.c b/kernel/lockdep.c
76463index 1f3186b..bb7dbc6 100644
76464--- a/kernel/lockdep.c
76465+++ b/kernel/lockdep.c
76466@@ -596,6 +596,10 @@ static int static_obj(void *obj)
76467 end = (unsigned long) &_end,
76468 addr = (unsigned long) obj;
76469
76470+#ifdef CONFIG_PAX_KERNEXEC
76471+ start = ktla_ktva(start);
76472+#endif
76473+
76474 /*
76475 * static variable?
76476 */
76477@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
76478 if (!static_obj(lock->key)) {
76479 debug_locks_off();
76480 printk("INFO: trying to register non-static key.\n");
76481+ printk("lock:%pS key:%pS.\n", lock, lock->key);
76482 printk("the code is fine but needs lockdep annotation.\n");
76483 printk("turning off the locking correctness validator.\n");
76484 dump_stack();
76485@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
76486 if (!class)
76487 return 0;
76488 }
76489- atomic_inc((atomic_t *)&class->ops);
76490+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
76491 if (very_verbose(class)) {
76492 printk("\nacquire class [%p] %s", class->key, class->name);
76493 if (class->name_version > 1)
76494diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
76495index b2c71c5..7b88d63 100644
76496--- a/kernel/lockdep_proc.c
76497+++ b/kernel/lockdep_proc.c
76498@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
76499 return 0;
76500 }
76501
76502- seq_printf(m, "%p", class->key);
76503+ seq_printf(m, "%pK", class->key);
76504 #ifdef CONFIG_DEBUG_LOCKDEP
76505 seq_printf(m, " OPS:%8ld", class->ops);
76506 #endif
76507@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
76508
76509 list_for_each_entry(entry, &class->locks_after, entry) {
76510 if (entry->distance == 1) {
76511- seq_printf(m, " -> [%p] ", entry->class->key);
76512+ seq_printf(m, " -> [%pK] ", entry->class->key);
76513 print_name(m, entry->class);
76514 seq_puts(m, "\n");
76515 }
76516@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
76517 if (!class->key)
76518 continue;
76519
76520- seq_printf(m, "[%p] ", class->key);
76521+ seq_printf(m, "[%pK] ", class->key);
76522 print_name(m, class);
76523 seq_puts(m, "\n");
76524 }
76525@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76526 if (!i)
76527 seq_line(m, '-', 40-namelen, namelen);
76528
76529- snprintf(ip, sizeof(ip), "[<%p>]",
76530+ snprintf(ip, sizeof(ip), "[<%pK>]",
76531 (void *)class->contention_point[i]);
76532 seq_printf(m, "%40s %14lu %29s %pS\n",
76533 name, stats->contention_point[i],
76534@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76535 if (!i)
76536 seq_line(m, '-', 40-namelen, namelen);
76537
76538- snprintf(ip, sizeof(ip), "[<%p>]",
76539+ snprintf(ip, sizeof(ip), "[<%pK>]",
76540 (void *)class->contending_point[i]);
76541 seq_printf(m, "%40s %14lu %29s %pS\n",
76542 name, stats->contending_point[i],
76543diff --git a/kernel/module.c b/kernel/module.c
76544index fa53db8..6f17200 100644
76545--- a/kernel/module.c
76546+++ b/kernel/module.c
76547@@ -61,6 +61,7 @@
76548 #include <linux/pfn.h>
76549 #include <linux/bsearch.h>
76550 #include <linux/fips.h>
76551+#include <linux/grsecurity.h>
76552 #include <uapi/linux/module.h>
76553 #include "module-internal.h"
76554
76555@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
76556
76557 /* Bounds of module allocation, for speeding __module_address.
76558 * Protected by module_mutex. */
76559-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
76560+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
76561+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
76562
76563 int register_module_notifier(struct notifier_block * nb)
76564 {
76565@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76566 return true;
76567
76568 list_for_each_entry_rcu(mod, &modules, list) {
76569- struct symsearch arr[] = {
76570+ struct symsearch modarr[] = {
76571 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
76572 NOT_GPL_ONLY, false },
76573 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
76574@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76575 if (mod->state == MODULE_STATE_UNFORMED)
76576 continue;
76577
76578- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
76579+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
76580 return true;
76581 }
76582 return false;
76583@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
76584 static int percpu_modalloc(struct module *mod,
76585 unsigned long size, unsigned long align)
76586 {
76587- if (align > PAGE_SIZE) {
76588+ if (align-1 >= PAGE_SIZE) {
76589 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
76590 mod->name, align, PAGE_SIZE);
76591 align = PAGE_SIZE;
76592@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
76593 static ssize_t show_coresize(struct module_attribute *mattr,
76594 struct module_kobject *mk, char *buffer)
76595 {
76596- return sprintf(buffer, "%u\n", mk->mod->core_size);
76597+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
76598 }
76599
76600 static struct module_attribute modinfo_coresize =
76601@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
76602 static ssize_t show_initsize(struct module_attribute *mattr,
76603 struct module_kobject *mk, char *buffer)
76604 {
76605- return sprintf(buffer, "%u\n", mk->mod->init_size);
76606+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
76607 }
76608
76609 static struct module_attribute modinfo_initsize =
76610@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
76611 */
76612 #ifdef CONFIG_SYSFS
76613
76614-#ifdef CONFIG_KALLSYMS
76615+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76616 static inline bool sect_empty(const Elf_Shdr *sect)
76617 {
76618 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
76619@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
76620 {
76621 unsigned int notes, loaded, i;
76622 struct module_notes_attrs *notes_attrs;
76623- struct bin_attribute *nattr;
76624+ bin_attribute_no_const *nattr;
76625
76626 /* failed to create section attributes, so can't create notes */
76627 if (!mod->sect_attrs)
76628@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
76629 static int module_add_modinfo_attrs(struct module *mod)
76630 {
76631 struct module_attribute *attr;
76632- struct module_attribute *temp_attr;
76633+ module_attribute_no_const *temp_attr;
76634 int error = 0;
76635 int i;
76636
76637@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
76638
76639 static void unset_module_core_ro_nx(struct module *mod)
76640 {
76641- set_page_attributes(mod->module_core + mod->core_text_size,
76642- mod->module_core + mod->core_size,
76643+ set_page_attributes(mod->module_core_rw,
76644+ mod->module_core_rw + mod->core_size_rw,
76645 set_memory_x);
76646- set_page_attributes(mod->module_core,
76647- mod->module_core + mod->core_ro_size,
76648+ set_page_attributes(mod->module_core_rx,
76649+ mod->module_core_rx + mod->core_size_rx,
76650 set_memory_rw);
76651 }
76652
76653 static void unset_module_init_ro_nx(struct module *mod)
76654 {
76655- set_page_attributes(mod->module_init + mod->init_text_size,
76656- mod->module_init + mod->init_size,
76657+ set_page_attributes(mod->module_init_rw,
76658+ mod->module_init_rw + mod->init_size_rw,
76659 set_memory_x);
76660- set_page_attributes(mod->module_init,
76661- mod->module_init + mod->init_ro_size,
76662+ set_page_attributes(mod->module_init_rx,
76663+ mod->module_init_rx + mod->init_size_rx,
76664 set_memory_rw);
76665 }
76666
76667@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
76668 list_for_each_entry_rcu(mod, &modules, list) {
76669 if (mod->state == MODULE_STATE_UNFORMED)
76670 continue;
76671- if ((mod->module_core) && (mod->core_text_size)) {
76672- set_page_attributes(mod->module_core,
76673- mod->module_core + mod->core_text_size,
76674+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76675+ set_page_attributes(mod->module_core_rx,
76676+ mod->module_core_rx + mod->core_size_rx,
76677 set_memory_rw);
76678 }
76679- if ((mod->module_init) && (mod->init_text_size)) {
76680- set_page_attributes(mod->module_init,
76681- mod->module_init + mod->init_text_size,
76682+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76683+ set_page_attributes(mod->module_init_rx,
76684+ mod->module_init_rx + mod->init_size_rx,
76685 set_memory_rw);
76686 }
76687 }
76688@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
76689 list_for_each_entry_rcu(mod, &modules, list) {
76690 if (mod->state == MODULE_STATE_UNFORMED)
76691 continue;
76692- if ((mod->module_core) && (mod->core_text_size)) {
76693- set_page_attributes(mod->module_core,
76694- mod->module_core + mod->core_text_size,
76695+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76696+ set_page_attributes(mod->module_core_rx,
76697+ mod->module_core_rx + mod->core_size_rx,
76698 set_memory_ro);
76699 }
76700- if ((mod->module_init) && (mod->init_text_size)) {
76701- set_page_attributes(mod->module_init,
76702- mod->module_init + mod->init_text_size,
76703+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76704+ set_page_attributes(mod->module_init_rx,
76705+ mod->module_init_rx + mod->init_size_rx,
76706 set_memory_ro);
76707 }
76708 }
76709@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
76710
76711 /* This may be NULL, but that's OK */
76712 unset_module_init_ro_nx(mod);
76713- module_free(mod, mod->module_init);
76714+ module_free(mod, mod->module_init_rw);
76715+ module_free_exec(mod, mod->module_init_rx);
76716 kfree(mod->args);
76717 percpu_modfree(mod);
76718
76719 /* Free lock-classes: */
76720- lockdep_free_key_range(mod->module_core, mod->core_size);
76721+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
76722+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
76723
76724 /* Finally, free the core (containing the module structure) */
76725 unset_module_core_ro_nx(mod);
76726- module_free(mod, mod->module_core);
76727+ module_free_exec(mod, mod->module_core_rx);
76728+ module_free(mod, mod->module_core_rw);
76729
76730 #ifdef CONFIG_MPU
76731 update_protections(current->mm);
76732@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76733 int ret = 0;
76734 const struct kernel_symbol *ksym;
76735
76736+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76737+ int is_fs_load = 0;
76738+ int register_filesystem_found = 0;
76739+ char *p;
76740+
76741+ p = strstr(mod->args, "grsec_modharden_fs");
76742+ if (p) {
76743+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
76744+ /* copy \0 as well */
76745+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
76746+ is_fs_load = 1;
76747+ }
76748+#endif
76749+
76750 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
76751 const char *name = info->strtab + sym[i].st_name;
76752
76753+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76754+ /* it's a real shame this will never get ripped and copied
76755+ upstream! ;(
76756+ */
76757+ if (is_fs_load && !strcmp(name, "register_filesystem"))
76758+ register_filesystem_found = 1;
76759+#endif
76760+
76761 switch (sym[i].st_shndx) {
76762 case SHN_COMMON:
76763 /* We compiled with -fno-common. These are not
76764@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76765 ksym = resolve_symbol_wait(mod, info, name);
76766 /* Ok if resolved. */
76767 if (ksym && !IS_ERR(ksym)) {
76768+ pax_open_kernel();
76769 sym[i].st_value = ksym->value;
76770+ pax_close_kernel();
76771 break;
76772 }
76773
76774@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76775 secbase = (unsigned long)mod_percpu(mod);
76776 else
76777 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
76778+ pax_open_kernel();
76779 sym[i].st_value += secbase;
76780+ pax_close_kernel();
76781 break;
76782 }
76783 }
76784
76785+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76786+ if (is_fs_load && !register_filesystem_found) {
76787+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
76788+ ret = -EPERM;
76789+ }
76790+#endif
76791+
76792 return ret;
76793 }
76794
76795@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
76796 || s->sh_entsize != ~0UL
76797 || strstarts(sname, ".init"))
76798 continue;
76799- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
76800+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76801+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
76802+ else
76803+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
76804 pr_debug("\t%s\n", sname);
76805 }
76806- switch (m) {
76807- case 0: /* executable */
76808- mod->core_size = debug_align(mod->core_size);
76809- mod->core_text_size = mod->core_size;
76810- break;
76811- case 1: /* RO: text and ro-data */
76812- mod->core_size = debug_align(mod->core_size);
76813- mod->core_ro_size = mod->core_size;
76814- break;
76815- case 3: /* whole core */
76816- mod->core_size = debug_align(mod->core_size);
76817- break;
76818- }
76819 }
76820
76821 pr_debug("Init section allocation order:\n");
76822@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
76823 || s->sh_entsize != ~0UL
76824 || !strstarts(sname, ".init"))
76825 continue;
76826- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
76827- | INIT_OFFSET_MASK);
76828+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76829+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
76830+ else
76831+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
76832+ s->sh_entsize |= INIT_OFFSET_MASK;
76833 pr_debug("\t%s\n", sname);
76834 }
76835- switch (m) {
76836- case 0: /* executable */
76837- mod->init_size = debug_align(mod->init_size);
76838- mod->init_text_size = mod->init_size;
76839- break;
76840- case 1: /* RO: text and ro-data */
76841- mod->init_size = debug_align(mod->init_size);
76842- mod->init_ro_size = mod->init_size;
76843- break;
76844- case 3: /* whole init */
76845- mod->init_size = debug_align(mod->init_size);
76846- break;
76847- }
76848 }
76849 }
76850
76851@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76852
76853 /* Put symbol section at end of init part of module. */
76854 symsect->sh_flags |= SHF_ALLOC;
76855- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
76856+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
76857 info->index.sym) | INIT_OFFSET_MASK;
76858 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
76859
76860@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76861 }
76862
76863 /* Append room for core symbols at end of core part. */
76864- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
76865- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
76866- mod->core_size += strtab_size;
76867+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
76868+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
76869+ mod->core_size_rx += strtab_size;
76870
76871 /* Put string table section at end of init part of module. */
76872 strsect->sh_flags |= SHF_ALLOC;
76873- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
76874+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
76875 info->index.str) | INIT_OFFSET_MASK;
76876 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
76877 }
76878@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76879 /* Make sure we get permanent strtab: don't use info->strtab. */
76880 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
76881
76882+ pax_open_kernel();
76883+
76884 /* Set types up while we still have access to sections. */
76885 for (i = 0; i < mod->num_symtab; i++)
76886 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
76887
76888- mod->core_symtab = dst = mod->module_core + info->symoffs;
76889- mod->core_strtab = s = mod->module_core + info->stroffs;
76890+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
76891+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
76892 src = mod->symtab;
76893 for (ndst = i = 0; i < mod->num_symtab; i++) {
76894 if (i == 0 ||
76895@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
76896 }
76897 }
76898 mod->core_num_syms = ndst;
76899+
76900+ pax_close_kernel();
76901 }
76902 #else
76903 static inline void layout_symtab(struct module *mod, struct load_info *info)
76904@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
76905 return vmalloc_exec(size);
76906 }
76907
76908-static void *module_alloc_update_bounds(unsigned long size)
76909+static void *module_alloc_update_bounds_rw(unsigned long size)
76910 {
76911 void *ret = module_alloc(size);
76912
76913 if (ret) {
76914 mutex_lock(&module_mutex);
76915 /* Update module bounds. */
76916- if ((unsigned long)ret < module_addr_min)
76917- module_addr_min = (unsigned long)ret;
76918- if ((unsigned long)ret + size > module_addr_max)
76919- module_addr_max = (unsigned long)ret + size;
76920+ if ((unsigned long)ret < module_addr_min_rw)
76921+ module_addr_min_rw = (unsigned long)ret;
76922+ if ((unsigned long)ret + size > module_addr_max_rw)
76923+ module_addr_max_rw = (unsigned long)ret + size;
76924+ mutex_unlock(&module_mutex);
76925+ }
76926+ return ret;
76927+}
76928+
76929+static void *module_alloc_update_bounds_rx(unsigned long size)
76930+{
76931+ void *ret = module_alloc_exec(size);
76932+
76933+ if (ret) {
76934+ mutex_lock(&module_mutex);
76935+ /* Update module bounds. */
76936+ if ((unsigned long)ret < module_addr_min_rx)
76937+ module_addr_min_rx = (unsigned long)ret;
76938+ if ((unsigned long)ret + size > module_addr_max_rx)
76939+ module_addr_max_rx = (unsigned long)ret + size;
76940 mutex_unlock(&module_mutex);
76941 }
76942 return ret;
76943@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
76944 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76945 {
76946 const char *modmagic = get_modinfo(info, "vermagic");
76947+ const char *license = get_modinfo(info, "license");
76948 int err;
76949
76950+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
76951+ if (!license || !license_is_gpl_compatible(license))
76952+ return -ENOEXEC;
76953+#endif
76954+
76955 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
76956 modmagic = NULL;
76957
76958@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
76959 }
76960
76961 /* Set up license info based on the info section */
76962- set_license(mod, get_modinfo(info, "license"));
76963+ set_license(mod, license);
76964
76965 return 0;
76966 }
76967@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
76968 void *ptr;
76969
76970 /* Do the allocs. */
76971- ptr = module_alloc_update_bounds(mod->core_size);
76972+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
76973 /*
76974 * The pointer to this block is stored in the module structure
76975 * which is inside the block. Just mark it as not being a
76976@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
76977 if (!ptr)
76978 return -ENOMEM;
76979
76980- memset(ptr, 0, mod->core_size);
76981- mod->module_core = ptr;
76982+ memset(ptr, 0, mod->core_size_rw);
76983+ mod->module_core_rw = ptr;
76984
76985- if (mod->init_size) {
76986- ptr = module_alloc_update_bounds(mod->init_size);
76987+ if (mod->init_size_rw) {
76988+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
76989 /*
76990 * The pointer to this block is stored in the module structure
76991 * which is inside the block. This block doesn't need to be
76992@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
76993 */
76994 kmemleak_ignore(ptr);
76995 if (!ptr) {
76996- module_free(mod, mod->module_core);
76997+ module_free(mod, mod->module_core_rw);
76998 return -ENOMEM;
76999 }
77000- memset(ptr, 0, mod->init_size);
77001- mod->module_init = ptr;
77002+ memset(ptr, 0, mod->init_size_rw);
77003+ mod->module_init_rw = ptr;
77004 } else
77005- mod->module_init = NULL;
77006+ mod->module_init_rw = NULL;
77007+
77008+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
77009+ kmemleak_not_leak(ptr);
77010+ if (!ptr) {
77011+ if (mod->module_init_rw)
77012+ module_free(mod, mod->module_init_rw);
77013+ module_free(mod, mod->module_core_rw);
77014+ return -ENOMEM;
77015+ }
77016+
77017+ pax_open_kernel();
77018+ memset(ptr, 0, mod->core_size_rx);
77019+ pax_close_kernel();
77020+ mod->module_core_rx = ptr;
77021+
77022+ if (mod->init_size_rx) {
77023+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
77024+ kmemleak_ignore(ptr);
77025+ if (!ptr && mod->init_size_rx) {
77026+ module_free_exec(mod, mod->module_core_rx);
77027+ if (mod->module_init_rw)
77028+ module_free(mod, mod->module_init_rw);
77029+ module_free(mod, mod->module_core_rw);
77030+ return -ENOMEM;
77031+ }
77032+
77033+ pax_open_kernel();
77034+ memset(ptr, 0, mod->init_size_rx);
77035+ pax_close_kernel();
77036+ mod->module_init_rx = ptr;
77037+ } else
77038+ mod->module_init_rx = NULL;
77039
77040 /* Transfer each section which specifies SHF_ALLOC */
77041 pr_debug("final section addresses:\n");
77042@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
77043 if (!(shdr->sh_flags & SHF_ALLOC))
77044 continue;
77045
77046- if (shdr->sh_entsize & INIT_OFFSET_MASK)
77047- dest = mod->module_init
77048- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77049- else
77050- dest = mod->module_core + shdr->sh_entsize;
77051+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
77052+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
77053+ dest = mod->module_init_rw
77054+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77055+ else
77056+ dest = mod->module_init_rx
77057+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77058+ } else {
77059+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
77060+ dest = mod->module_core_rw + shdr->sh_entsize;
77061+ else
77062+ dest = mod->module_core_rx + shdr->sh_entsize;
77063+ }
77064+
77065+ if (shdr->sh_type != SHT_NOBITS) {
77066+
77067+#ifdef CONFIG_PAX_KERNEXEC
77068+#ifdef CONFIG_X86_64
77069+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
77070+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
77071+#endif
77072+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
77073+ pax_open_kernel();
77074+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
77075+ pax_close_kernel();
77076+ } else
77077+#endif
77078
77079- if (shdr->sh_type != SHT_NOBITS)
77080 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
77081+ }
77082 /* Update sh_addr to point to copy in image. */
77083- shdr->sh_addr = (unsigned long)dest;
77084+
77085+#ifdef CONFIG_PAX_KERNEXEC
77086+ if (shdr->sh_flags & SHF_EXECINSTR)
77087+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
77088+ else
77089+#endif
77090+
77091+ shdr->sh_addr = (unsigned long)dest;
77092 pr_debug("\t0x%lx %s\n",
77093 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
77094 }
77095@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
77096 * Do it before processing of module parameters, so the module
77097 * can provide parameter accessor functions of its own.
77098 */
77099- if (mod->module_init)
77100- flush_icache_range((unsigned long)mod->module_init,
77101- (unsigned long)mod->module_init
77102- + mod->init_size);
77103- flush_icache_range((unsigned long)mod->module_core,
77104- (unsigned long)mod->module_core + mod->core_size);
77105+ if (mod->module_init_rx)
77106+ flush_icache_range((unsigned long)mod->module_init_rx,
77107+ (unsigned long)mod->module_init_rx
77108+ + mod->init_size_rx);
77109+ flush_icache_range((unsigned long)mod->module_core_rx,
77110+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
77111
77112 set_fs(old_fs);
77113 }
77114@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
77115 static void module_deallocate(struct module *mod, struct load_info *info)
77116 {
77117 percpu_modfree(mod);
77118- module_free(mod, mod->module_init);
77119- module_free(mod, mod->module_core);
77120+ module_free_exec(mod, mod->module_init_rx);
77121+ module_free_exec(mod, mod->module_core_rx);
77122+ module_free(mod, mod->module_init_rw);
77123+ module_free(mod, mod->module_core_rw);
77124 }
77125
77126 int __weak module_finalize(const Elf_Ehdr *hdr,
77127@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
77128 static int post_relocation(struct module *mod, const struct load_info *info)
77129 {
77130 /* Sort exception table now relocations are done. */
77131+ pax_open_kernel();
77132 sort_extable(mod->extable, mod->extable + mod->num_exentries);
77133+ pax_close_kernel();
77134
77135 /* Copy relocated percpu area over. */
77136 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
77137@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
77138 MODULE_STATE_COMING, mod);
77139
77140 /* Set RO and NX regions for core */
77141- set_section_ro_nx(mod->module_core,
77142- mod->core_text_size,
77143- mod->core_ro_size,
77144- mod->core_size);
77145+ set_section_ro_nx(mod->module_core_rx,
77146+ mod->core_size_rx,
77147+ mod->core_size_rx,
77148+ mod->core_size_rx);
77149
77150 /* Set RO and NX regions for init */
77151- set_section_ro_nx(mod->module_init,
77152- mod->init_text_size,
77153- mod->init_ro_size,
77154- mod->init_size);
77155+ set_section_ro_nx(mod->module_init_rx,
77156+ mod->init_size_rx,
77157+ mod->init_size_rx,
77158+ mod->init_size_rx);
77159
77160 do_mod_ctors(mod);
77161 /* Start the module */
77162@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
77163 mod->strtab = mod->core_strtab;
77164 #endif
77165 unset_module_init_ro_nx(mod);
77166- module_free(mod, mod->module_init);
77167- mod->module_init = NULL;
77168- mod->init_size = 0;
77169- mod->init_ro_size = 0;
77170- mod->init_text_size = 0;
77171+ module_free(mod, mod->module_init_rw);
77172+ module_free_exec(mod, mod->module_init_rx);
77173+ mod->module_init_rw = NULL;
77174+ mod->module_init_rx = NULL;
77175+ mod->init_size_rw = 0;
77176+ mod->init_size_rx = 0;
77177 mutex_unlock(&module_mutex);
77178 wake_up_all(&module_wq);
77179
77180@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
77181 if (err)
77182 goto free_unload;
77183
77184+ /* Now copy in args */
77185+ mod->args = strndup_user(uargs, ~0UL >> 1);
77186+ if (IS_ERR(mod->args)) {
77187+ err = PTR_ERR(mod->args);
77188+ goto free_unload;
77189+ }
77190+
77191 /* Set up MODINFO_ATTR fields */
77192 setup_modinfo(mod, info);
77193
77194+#ifdef CONFIG_GRKERNSEC_MODHARDEN
77195+ {
77196+ char *p, *p2;
77197+
77198+ if (strstr(mod->args, "grsec_modharden_netdev")) {
77199+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
77200+ err = -EPERM;
77201+ goto free_modinfo;
77202+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
77203+ p += sizeof("grsec_modharden_normal") - 1;
77204+ p2 = strstr(p, "_");
77205+ if (p2) {
77206+ *p2 = '\0';
77207+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
77208+ *p2 = '_';
77209+ }
77210+ err = -EPERM;
77211+ goto free_modinfo;
77212+ }
77213+ }
77214+#endif
77215+
77216 /* Fix up syms, so that st_value is a pointer to location. */
77217 err = simplify_symbols(mod, info);
77218 if (err < 0)
77219@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
77220
77221 flush_module_icache(mod);
77222
77223- /* Now copy in args */
77224- mod->args = strndup_user(uargs, ~0UL >> 1);
77225- if (IS_ERR(mod->args)) {
77226- err = PTR_ERR(mod->args);
77227- goto free_arch_cleanup;
77228- }
77229-
77230 dynamic_debug_setup(info->debug, info->num_debug);
77231
77232 /* Finally it's fully formed, ready to start executing. */
77233@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
77234 ddebug_cleanup:
77235 dynamic_debug_remove(info->debug);
77236 synchronize_sched();
77237- kfree(mod->args);
77238- free_arch_cleanup:
77239 module_arch_cleanup(mod);
77240 free_modinfo:
77241 free_modinfo(mod);
77242+ kfree(mod->args);
77243 free_unload:
77244 module_unload_free(mod);
77245 unlink_mod:
77246@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
77247 unsigned long nextval;
77248
77249 /* At worse, next value is at end of module */
77250- if (within_module_init(addr, mod))
77251- nextval = (unsigned long)mod->module_init+mod->init_text_size;
77252+ if (within_module_init_rx(addr, mod))
77253+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
77254+ else if (within_module_init_rw(addr, mod))
77255+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
77256+ else if (within_module_core_rx(addr, mod))
77257+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
77258+ else if (within_module_core_rw(addr, mod))
77259+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
77260 else
77261- nextval = (unsigned long)mod->module_core+mod->core_text_size;
77262+ return NULL;
77263
77264 /* Scan for closest preceding symbol, and next symbol. (ELF
77265 starts real symbols at 1). */
77266@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
77267 return 0;
77268
77269 seq_printf(m, "%s %u",
77270- mod->name, mod->init_size + mod->core_size);
77271+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
77272 print_unload_info(m, mod);
77273
77274 /* Informative for users. */
77275@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
77276 mod->state == MODULE_STATE_COMING ? "Loading":
77277 "Live");
77278 /* Used by oprofile and other similar tools. */
77279- seq_printf(m, " 0x%pK", mod->module_core);
77280+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
77281
77282 /* Taints info */
77283 if (mod->taints)
77284@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
77285
77286 static int __init proc_modules_init(void)
77287 {
77288+#ifndef CONFIG_GRKERNSEC_HIDESYM
77289+#ifdef CONFIG_GRKERNSEC_PROC_USER
77290+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77291+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77292+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
77293+#else
77294 proc_create("modules", 0, NULL, &proc_modules_operations);
77295+#endif
77296+#else
77297+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77298+#endif
77299 return 0;
77300 }
77301 module_init(proc_modules_init);
77302@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
77303 {
77304 struct module *mod;
77305
77306- if (addr < module_addr_min || addr > module_addr_max)
77307+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
77308+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
77309 return NULL;
77310
77311 list_for_each_entry_rcu(mod, &modules, list) {
77312 if (mod->state == MODULE_STATE_UNFORMED)
77313 continue;
77314- if (within_module_core(addr, mod)
77315- || within_module_init(addr, mod))
77316+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
77317 return mod;
77318 }
77319 return NULL;
77320@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
77321 */
77322 struct module *__module_text_address(unsigned long addr)
77323 {
77324- struct module *mod = __module_address(addr);
77325+ struct module *mod;
77326+
77327+#ifdef CONFIG_X86_32
77328+ addr = ktla_ktva(addr);
77329+#endif
77330+
77331+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
77332+ return NULL;
77333+
77334+ mod = __module_address(addr);
77335+
77336 if (mod) {
77337 /* Make sure it's within the text section. */
77338- if (!within(addr, mod->module_init, mod->init_text_size)
77339- && !within(addr, mod->module_core, mod->core_text_size))
77340+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
77341 mod = NULL;
77342 }
77343 return mod;
77344diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
77345index 7e3443f..b2a1e6b 100644
77346--- a/kernel/mutex-debug.c
77347+++ b/kernel/mutex-debug.c
77348@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
77349 }
77350
77351 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77352- struct thread_info *ti)
77353+ struct task_struct *task)
77354 {
77355 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
77356
77357 /* Mark the current thread as blocked on the lock: */
77358- ti->task->blocked_on = waiter;
77359+ task->blocked_on = waiter;
77360 }
77361
77362 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77363- struct thread_info *ti)
77364+ struct task_struct *task)
77365 {
77366 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
77367- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
77368- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
77369- ti->task->blocked_on = NULL;
77370+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
77371+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
77372+ task->blocked_on = NULL;
77373
77374 list_del_init(&waiter->list);
77375 waiter->task = NULL;
77376diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
77377index 0799fd3..d06ae3b 100644
77378--- a/kernel/mutex-debug.h
77379+++ b/kernel/mutex-debug.h
77380@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
77381 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
77382 extern void debug_mutex_add_waiter(struct mutex *lock,
77383 struct mutex_waiter *waiter,
77384- struct thread_info *ti);
77385+ struct task_struct *task);
77386 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77387- struct thread_info *ti);
77388+ struct task_struct *task);
77389 extern void debug_mutex_unlock(struct mutex *lock);
77390 extern void debug_mutex_init(struct mutex *lock, const char *name,
77391 struct lock_class_key *key);
77392diff --git a/kernel/mutex.c b/kernel/mutex.c
77393index ad53a66..f1bf8bc 100644
77394--- a/kernel/mutex.c
77395+++ b/kernel/mutex.c
77396@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
77397 node->locked = 1;
77398 return;
77399 }
77400- ACCESS_ONCE(prev->next) = node;
77401+ ACCESS_ONCE_RW(prev->next) = node;
77402 smp_wmb();
77403 /* Wait until the lock holder passes the lock down */
77404 while (!ACCESS_ONCE(node->locked))
77405@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
77406 while (!(next = ACCESS_ONCE(node->next)))
77407 arch_mutex_cpu_relax();
77408 }
77409- ACCESS_ONCE(next->locked) = 1;
77410+ ACCESS_ONCE_RW(next->locked) = 1;
77411 smp_wmb();
77412 }
77413
77414@@ -341,7 +341,7 @@ slowpath:
77415 spin_lock_mutex(&lock->wait_lock, flags);
77416
77417 debug_mutex_lock_common(lock, &waiter);
77418- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
77419+ debug_mutex_add_waiter(lock, &waiter, task);
77420
77421 /* add waiting tasks to the end of the waitqueue (FIFO): */
77422 list_add_tail(&waiter.list, &lock->wait_list);
77423@@ -371,8 +371,7 @@ slowpath:
77424 * TASK_UNINTERRUPTIBLE case.)
77425 */
77426 if (unlikely(signal_pending_state(state, task))) {
77427- mutex_remove_waiter(lock, &waiter,
77428- task_thread_info(task));
77429+ mutex_remove_waiter(lock, &waiter, task);
77430 mutex_release(&lock->dep_map, 1, ip);
77431 spin_unlock_mutex(&lock->wait_lock, flags);
77432
77433@@ -391,7 +390,7 @@ slowpath:
77434 done:
77435 lock_acquired(&lock->dep_map, ip);
77436 /* got the lock - rejoice! */
77437- mutex_remove_waiter(lock, &waiter, current_thread_info());
77438+ mutex_remove_waiter(lock, &waiter, task);
77439 mutex_set_owner(lock);
77440
77441 /* set it to 0 if there are no waiters left: */
77442diff --git a/kernel/notifier.c b/kernel/notifier.c
77443index 2d5cc4c..d9ea600 100644
77444--- a/kernel/notifier.c
77445+++ b/kernel/notifier.c
77446@@ -5,6 +5,7 @@
77447 #include <linux/rcupdate.h>
77448 #include <linux/vmalloc.h>
77449 #include <linux/reboot.h>
77450+#include <linux/mm.h>
77451
77452 /*
77453 * Notifier list for kernel code which wants to be called
77454@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
77455 while ((*nl) != NULL) {
77456 if (n->priority > (*nl)->priority)
77457 break;
77458- nl = &((*nl)->next);
77459+ nl = (struct notifier_block **)&((*nl)->next);
77460 }
77461- n->next = *nl;
77462+ pax_open_kernel();
77463+ *(const void **)&n->next = *nl;
77464 rcu_assign_pointer(*nl, n);
77465+ pax_close_kernel();
77466 return 0;
77467 }
77468
77469@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
77470 return 0;
77471 if (n->priority > (*nl)->priority)
77472 break;
77473- nl = &((*nl)->next);
77474+ nl = (struct notifier_block **)&((*nl)->next);
77475 }
77476- n->next = *nl;
77477+ pax_open_kernel();
77478+ *(const void **)&n->next = *nl;
77479 rcu_assign_pointer(*nl, n);
77480+ pax_close_kernel();
77481 return 0;
77482 }
77483
77484@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
77485 {
77486 while ((*nl) != NULL) {
77487 if ((*nl) == n) {
77488+ pax_open_kernel();
77489 rcu_assign_pointer(*nl, n->next);
77490+ pax_close_kernel();
77491 return 0;
77492 }
77493- nl = &((*nl)->next);
77494+ nl = (struct notifier_block **)&((*nl)->next);
77495 }
77496 return -ENOENT;
77497 }
77498diff --git a/kernel/panic.c b/kernel/panic.c
77499index 167ec09..0dda5f9 100644
77500--- a/kernel/panic.c
77501+++ b/kernel/panic.c
77502@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
77503 unsigned taint, struct slowpath_args *args)
77504 {
77505 printk(KERN_WARNING "------------[ cut here ]------------\n");
77506- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
77507+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
77508
77509 if (args)
77510 vprintk(args->fmt, args->args);
77511@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
77512 */
77513 void __stack_chk_fail(void)
77514 {
77515- panic("stack-protector: Kernel stack is corrupted in: %p\n",
77516+ dump_stack();
77517+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
77518 __builtin_return_address(0));
77519 }
77520 EXPORT_SYMBOL(__stack_chk_fail);
77521diff --git a/kernel/pid.c b/kernel/pid.c
77522index 0db3e79..95b9dc2 100644
77523--- a/kernel/pid.c
77524+++ b/kernel/pid.c
77525@@ -33,6 +33,7 @@
77526 #include <linux/rculist.h>
77527 #include <linux/bootmem.h>
77528 #include <linux/hash.h>
77529+#include <linux/security.h>
77530 #include <linux/pid_namespace.h>
77531 #include <linux/init_task.h>
77532 #include <linux/syscalls.h>
77533@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
77534
77535 int pid_max = PID_MAX_DEFAULT;
77536
77537-#define RESERVED_PIDS 300
77538+#define RESERVED_PIDS 500
77539
77540 int pid_max_min = RESERVED_PIDS + 1;
77541 int pid_max_max = PID_MAX_LIMIT;
77542@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
77543 */
77544 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
77545 {
77546+ struct task_struct *task;
77547+
77548 rcu_lockdep_assert(rcu_read_lock_held(),
77549 "find_task_by_pid_ns() needs rcu_read_lock()"
77550 " protection");
77551- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77552+
77553+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77554+
77555+ if (gr_pid_is_chrooted(task))
77556+ return NULL;
77557+
77558+ return task;
77559 }
77560
77561 struct task_struct *find_task_by_vpid(pid_t vnr)
77562@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
77563 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
77564 }
77565
77566+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
77567+{
77568+ rcu_lockdep_assert(rcu_read_lock_held(),
77569+ "find_task_by_pid_ns() needs rcu_read_lock()"
77570+ " protection");
77571+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
77572+}
77573+
77574 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
77575 {
77576 struct pid *pid;
77577diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
77578index 6917e8e..9909aeb 100644
77579--- a/kernel/pid_namespace.c
77580+++ b/kernel/pid_namespace.c
77581@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
77582 void __user *buffer, size_t *lenp, loff_t *ppos)
77583 {
77584 struct pid_namespace *pid_ns = task_active_pid_ns(current);
77585- struct ctl_table tmp = *table;
77586+ ctl_table_no_const tmp = *table;
77587
77588 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
77589 return -EPERM;
77590diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
77591index 42670e9..8719c2f 100644
77592--- a/kernel/posix-cpu-timers.c
77593+++ b/kernel/posix-cpu-timers.c
77594@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
77595
77596 static __init int init_posix_cpu_timers(void)
77597 {
77598- struct k_clock process = {
77599+ static struct k_clock process = {
77600 .clock_getres = process_cpu_clock_getres,
77601 .clock_get = process_cpu_clock_get,
77602 .timer_create = process_cpu_timer_create,
77603 .nsleep = process_cpu_nsleep,
77604 .nsleep_restart = process_cpu_nsleep_restart,
77605 };
77606- struct k_clock thread = {
77607+ static struct k_clock thread = {
77608 .clock_getres = thread_cpu_clock_getres,
77609 .clock_get = thread_cpu_clock_get,
77610 .timer_create = thread_cpu_timer_create,
77611diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
77612index 424c2d4..a9194f7 100644
77613--- a/kernel/posix-timers.c
77614+++ b/kernel/posix-timers.c
77615@@ -43,6 +43,7 @@
77616 #include <linux/hash.h>
77617 #include <linux/posix-clock.h>
77618 #include <linux/posix-timers.h>
77619+#include <linux/grsecurity.h>
77620 #include <linux/syscalls.h>
77621 #include <linux/wait.h>
77622 #include <linux/workqueue.h>
77623@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
77624 * which we beg off on and pass to do_sys_settimeofday().
77625 */
77626
77627-static struct k_clock posix_clocks[MAX_CLOCKS];
77628+static struct k_clock *posix_clocks[MAX_CLOCKS];
77629
77630 /*
77631 * These ones are defined below.
77632@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
77633 */
77634 static __init int init_posix_timers(void)
77635 {
77636- struct k_clock clock_realtime = {
77637+ static struct k_clock clock_realtime = {
77638 .clock_getres = hrtimer_get_res,
77639 .clock_get = posix_clock_realtime_get,
77640 .clock_set = posix_clock_realtime_set,
77641@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
77642 .timer_get = common_timer_get,
77643 .timer_del = common_timer_del,
77644 };
77645- struct k_clock clock_monotonic = {
77646+ static struct k_clock clock_monotonic = {
77647 .clock_getres = hrtimer_get_res,
77648 .clock_get = posix_ktime_get_ts,
77649 .nsleep = common_nsleep,
77650@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
77651 .timer_get = common_timer_get,
77652 .timer_del = common_timer_del,
77653 };
77654- struct k_clock clock_monotonic_raw = {
77655+ static struct k_clock clock_monotonic_raw = {
77656 .clock_getres = hrtimer_get_res,
77657 .clock_get = posix_get_monotonic_raw,
77658 };
77659- struct k_clock clock_realtime_coarse = {
77660+ static struct k_clock clock_realtime_coarse = {
77661 .clock_getres = posix_get_coarse_res,
77662 .clock_get = posix_get_realtime_coarse,
77663 };
77664- struct k_clock clock_monotonic_coarse = {
77665+ static struct k_clock clock_monotonic_coarse = {
77666 .clock_getres = posix_get_coarse_res,
77667 .clock_get = posix_get_monotonic_coarse,
77668 };
77669- struct k_clock clock_tai = {
77670+ static struct k_clock clock_tai = {
77671 .clock_getres = hrtimer_get_res,
77672 .clock_get = posix_get_tai,
77673 .nsleep = common_nsleep,
77674@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
77675 .timer_get = common_timer_get,
77676 .timer_del = common_timer_del,
77677 };
77678- struct k_clock clock_boottime = {
77679+ static struct k_clock clock_boottime = {
77680 .clock_getres = hrtimer_get_res,
77681 .clock_get = posix_get_boottime,
77682 .nsleep = common_nsleep,
77683@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
77684 return;
77685 }
77686
77687- posix_clocks[clock_id] = *new_clock;
77688+ posix_clocks[clock_id] = new_clock;
77689 }
77690 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
77691
77692@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
77693 return (id & CLOCKFD_MASK) == CLOCKFD ?
77694 &clock_posix_dynamic : &clock_posix_cpu;
77695
77696- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
77697+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
77698 return NULL;
77699- return &posix_clocks[id];
77700+ return posix_clocks[id];
77701 }
77702
77703 static int common_timer_create(struct k_itimer *new_timer)
77704@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
77705 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
77706 return -EFAULT;
77707
77708+ /* only the CLOCK_REALTIME clock can be set, all other clocks
77709+ have their clock_set fptr set to a nosettime dummy function
77710+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
77711+ call common_clock_set, which calls do_sys_settimeofday, which
77712+ we hook
77713+ */
77714+
77715 return kc->clock_set(which_clock, &new_tp);
77716 }
77717
77718diff --git a/kernel/power/process.c b/kernel/power/process.c
77719index 98088e0..aaf95c0 100644
77720--- a/kernel/power/process.c
77721+++ b/kernel/power/process.c
77722@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
77723 u64 elapsed_csecs64;
77724 unsigned int elapsed_csecs;
77725 bool wakeup = false;
77726+ bool timedout = false;
77727
77728 do_gettimeofday(&start);
77729
77730@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
77731
77732 while (true) {
77733 todo = 0;
77734+ if (time_after(jiffies, end_time))
77735+ timedout = true;
77736 read_lock(&tasklist_lock);
77737 do_each_thread(g, p) {
77738 if (p == current || !freeze_task(p))
77739 continue;
77740
77741- if (!freezer_should_skip(p))
77742+ if (!freezer_should_skip(p)) {
77743 todo++;
77744+ if (timedout) {
77745+ printk(KERN_ERR "Task refusing to freeze:\n");
77746+ sched_show_task(p);
77747+ }
77748+ }
77749 } while_each_thread(g, p);
77750 read_unlock(&tasklist_lock);
77751
77752@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
77753 todo += wq_busy;
77754 }
77755
77756- if (!todo || time_after(jiffies, end_time))
77757+ if (!todo || timedout)
77758 break;
77759
77760 if (pm_wakeup_pending()) {
77761diff --git a/kernel/printk.c b/kernel/printk.c
77762index 8212c1a..eb61021 100644
77763--- a/kernel/printk.c
77764+++ b/kernel/printk.c
77765@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
77766 if (from_file && type != SYSLOG_ACTION_OPEN)
77767 return 0;
77768
77769+#ifdef CONFIG_GRKERNSEC_DMESG
77770+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
77771+ return -EPERM;
77772+#endif
77773+
77774 if (syslog_action_restricted(type)) {
77775 if (capable(CAP_SYSLOG))
77776 return 0;
77777diff --git a/kernel/profile.c b/kernel/profile.c
77778index 0bf4007..6234708 100644
77779--- a/kernel/profile.c
77780+++ b/kernel/profile.c
77781@@ -37,7 +37,7 @@ struct profile_hit {
77782 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
77783 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
77784
77785-static atomic_t *prof_buffer;
77786+static atomic_unchecked_t *prof_buffer;
77787 static unsigned long prof_len, prof_shift;
77788
77789 int prof_on __read_mostly;
77790@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
77791 hits[i].pc = 0;
77792 continue;
77793 }
77794- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77795+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77796 hits[i].hits = hits[i].pc = 0;
77797 }
77798 }
77799@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77800 * Add the current hit(s) and flush the write-queue out
77801 * to the global buffer:
77802 */
77803- atomic_add(nr_hits, &prof_buffer[pc]);
77804+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
77805 for (i = 0; i < NR_PROFILE_HIT; ++i) {
77806- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77807+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77808 hits[i].pc = hits[i].hits = 0;
77809 }
77810 out:
77811@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77812 {
77813 unsigned long pc;
77814 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
77815- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77816+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77817 }
77818 #endif /* !CONFIG_SMP */
77819
77820@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
77821 return -EFAULT;
77822 buf++; p++; count--; read++;
77823 }
77824- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
77825+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
77826 if (copy_to_user(buf, (void *)pnt, count))
77827 return -EFAULT;
77828 read += count;
77829@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
77830 }
77831 #endif
77832 profile_discard_flip_buffers();
77833- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
77834+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
77835 return count;
77836 }
77837
77838diff --git a/kernel/ptrace.c b/kernel/ptrace.c
77839index 335a7ae..3bbbceb 100644
77840--- a/kernel/ptrace.c
77841+++ b/kernel/ptrace.c
77842@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
77843 if (seize)
77844 flags |= PT_SEIZED;
77845 rcu_read_lock();
77846- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77847+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77848 flags |= PT_PTRACE_CAP;
77849 rcu_read_unlock();
77850 task->ptrace = flags;
77851@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
77852 break;
77853 return -EIO;
77854 }
77855- if (copy_to_user(dst, buf, retval))
77856+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
77857 return -EFAULT;
77858 copied += retval;
77859 src += retval;
77860@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
77861 bool seized = child->ptrace & PT_SEIZED;
77862 int ret = -EIO;
77863 siginfo_t siginfo, *si;
77864- void __user *datavp = (void __user *) data;
77865+ void __user *datavp = (__force void __user *) data;
77866 unsigned long __user *datalp = datavp;
77867 unsigned long flags;
77868
77869@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
77870 goto out;
77871 }
77872
77873+ if (gr_handle_ptrace(child, request)) {
77874+ ret = -EPERM;
77875+ goto out_put_task_struct;
77876+ }
77877+
77878 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77879 ret = ptrace_attach(child, request, addr, data);
77880 /*
77881 * Some architectures need to do book-keeping after
77882 * a ptrace attach.
77883 */
77884- if (!ret)
77885+ if (!ret) {
77886 arch_ptrace_attach(child);
77887+ gr_audit_ptrace(child);
77888+ }
77889 goto out_put_task_struct;
77890 }
77891
77892@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
77893 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
77894 if (copied != sizeof(tmp))
77895 return -EIO;
77896- return put_user(tmp, (unsigned long __user *)data);
77897+ return put_user(tmp, (__force unsigned long __user *)data);
77898 }
77899
77900 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
77901@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
77902 }
77903
77904 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77905- compat_long_t addr, compat_long_t data)
77906+ compat_ulong_t addr, compat_ulong_t data)
77907 {
77908 struct task_struct *child;
77909 long ret;
77910@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
77911 goto out;
77912 }
77913
77914+ if (gr_handle_ptrace(child, request)) {
77915+ ret = -EPERM;
77916+ goto out_put_task_struct;
77917+ }
77918+
77919 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
77920 ret = ptrace_attach(child, request, addr, data);
77921 /*
77922 * Some architectures need to do book-keeping after
77923 * a ptrace attach.
77924 */
77925- if (!ret)
77926+ if (!ret) {
77927 arch_ptrace_attach(child);
77928+ gr_audit_ptrace(child);
77929+ }
77930 goto out_put_task_struct;
77931 }
77932
77933diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
77934index 48ab703..07561d4 100644
77935--- a/kernel/rcupdate.c
77936+++ b/kernel/rcupdate.c
77937@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
77938 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
77939 */
77940 if (till_stall_check < 3) {
77941- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
77942+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
77943 till_stall_check = 3;
77944 } else if (till_stall_check > 300) {
77945- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
77946+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
77947 till_stall_check = 300;
77948 }
77949 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
77950diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
77951index a0714a5..2ab5e34 100644
77952--- a/kernel/rcutiny.c
77953+++ b/kernel/rcutiny.c
77954@@ -46,7 +46,7 @@
77955 struct rcu_ctrlblk;
77956 static void invoke_rcu_callbacks(void);
77957 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
77958-static void rcu_process_callbacks(struct softirq_action *unused);
77959+static void rcu_process_callbacks(void);
77960 static void __call_rcu(struct rcu_head *head,
77961 void (*func)(struct rcu_head *rcu),
77962 struct rcu_ctrlblk *rcp);
77963@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
77964 rcu_is_callbacks_kthread()));
77965 }
77966
77967-static void rcu_process_callbacks(struct softirq_action *unused)
77968+static void rcu_process_callbacks(void)
77969 {
77970 __rcu_process_callbacks(&rcu_sched_ctrlblk);
77971 __rcu_process_callbacks(&rcu_bh_ctrlblk);
77972diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
77973index 8a23300..4255818 100644
77974--- a/kernel/rcutiny_plugin.h
77975+++ b/kernel/rcutiny_plugin.h
77976@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
77977 have_rcu_kthread_work = morework;
77978 local_irq_restore(flags);
77979 if (work)
77980- rcu_process_callbacks(NULL);
77981+ rcu_process_callbacks();
77982 schedule_timeout_interruptible(1); /* Leave CPU for others. */
77983 }
77984
77985diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
77986index e1f3a8c..42c94a2 100644
77987--- a/kernel/rcutorture.c
77988+++ b/kernel/rcutorture.c
77989@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
77990 { 0 };
77991 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
77992 { 0 };
77993-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
77994-static atomic_t n_rcu_torture_alloc;
77995-static atomic_t n_rcu_torture_alloc_fail;
77996-static atomic_t n_rcu_torture_free;
77997-static atomic_t n_rcu_torture_mberror;
77998-static atomic_t n_rcu_torture_error;
77999+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
78000+static atomic_unchecked_t n_rcu_torture_alloc;
78001+static atomic_unchecked_t n_rcu_torture_alloc_fail;
78002+static atomic_unchecked_t n_rcu_torture_free;
78003+static atomic_unchecked_t n_rcu_torture_mberror;
78004+static atomic_unchecked_t n_rcu_torture_error;
78005 static long n_rcu_torture_barrier_error;
78006 static long n_rcu_torture_boost_ktrerror;
78007 static long n_rcu_torture_boost_rterror;
78008@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
78009
78010 spin_lock_bh(&rcu_torture_lock);
78011 if (list_empty(&rcu_torture_freelist)) {
78012- atomic_inc(&n_rcu_torture_alloc_fail);
78013+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
78014 spin_unlock_bh(&rcu_torture_lock);
78015 return NULL;
78016 }
78017- atomic_inc(&n_rcu_torture_alloc);
78018+ atomic_inc_unchecked(&n_rcu_torture_alloc);
78019 p = rcu_torture_freelist.next;
78020 list_del_init(p);
78021 spin_unlock_bh(&rcu_torture_lock);
78022@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
78023 static void
78024 rcu_torture_free(struct rcu_torture *p)
78025 {
78026- atomic_inc(&n_rcu_torture_free);
78027+ atomic_inc_unchecked(&n_rcu_torture_free);
78028 spin_lock_bh(&rcu_torture_lock);
78029 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
78030 spin_unlock_bh(&rcu_torture_lock);
78031@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
78032 i = rp->rtort_pipe_count;
78033 if (i > RCU_TORTURE_PIPE_LEN)
78034 i = RCU_TORTURE_PIPE_LEN;
78035- atomic_inc(&rcu_torture_wcount[i]);
78036+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78037 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
78038 rp->rtort_mbtest = 0;
78039 rcu_torture_free(rp);
78040@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
78041 i = rp->rtort_pipe_count;
78042 if (i > RCU_TORTURE_PIPE_LEN)
78043 i = RCU_TORTURE_PIPE_LEN;
78044- atomic_inc(&rcu_torture_wcount[i]);
78045+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78046 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
78047 rp->rtort_mbtest = 0;
78048 list_del(&rp->rtort_free);
78049@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
78050 i = old_rp->rtort_pipe_count;
78051 if (i > RCU_TORTURE_PIPE_LEN)
78052 i = RCU_TORTURE_PIPE_LEN;
78053- atomic_inc(&rcu_torture_wcount[i]);
78054+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78055 old_rp->rtort_pipe_count++;
78056 cur_ops->deferred_free(old_rp);
78057 }
78058@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
78059 return;
78060 }
78061 if (p->rtort_mbtest == 0)
78062- atomic_inc(&n_rcu_torture_mberror);
78063+ atomic_inc_unchecked(&n_rcu_torture_mberror);
78064 spin_lock(&rand_lock);
78065 cur_ops->read_delay(&rand);
78066 n_rcu_torture_timers++;
78067@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
78068 continue;
78069 }
78070 if (p->rtort_mbtest == 0)
78071- atomic_inc(&n_rcu_torture_mberror);
78072+ atomic_inc_unchecked(&n_rcu_torture_mberror);
78073 cur_ops->read_delay(&rand);
78074 preempt_disable();
78075 pipe_count = p->rtort_pipe_count;
78076@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
78077 rcu_torture_current,
78078 rcu_torture_current_version,
78079 list_empty(&rcu_torture_freelist),
78080- atomic_read(&n_rcu_torture_alloc),
78081- atomic_read(&n_rcu_torture_alloc_fail),
78082- atomic_read(&n_rcu_torture_free));
78083+ atomic_read_unchecked(&n_rcu_torture_alloc),
78084+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
78085+ atomic_read_unchecked(&n_rcu_torture_free));
78086 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
78087- atomic_read(&n_rcu_torture_mberror),
78088+ atomic_read_unchecked(&n_rcu_torture_mberror),
78089 n_rcu_torture_boost_ktrerror,
78090 n_rcu_torture_boost_rterror);
78091 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
78092@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
78093 n_barrier_attempts,
78094 n_rcu_torture_barrier_error);
78095 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
78096- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
78097+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
78098 n_rcu_torture_barrier_error != 0 ||
78099 n_rcu_torture_boost_ktrerror != 0 ||
78100 n_rcu_torture_boost_rterror != 0 ||
78101 n_rcu_torture_boost_failure != 0 ||
78102 i > 1) {
78103 cnt += sprintf(&page[cnt], "!!! ");
78104- atomic_inc(&n_rcu_torture_error);
78105+ atomic_inc_unchecked(&n_rcu_torture_error);
78106 WARN_ON_ONCE(1);
78107 }
78108 cnt += sprintf(&page[cnt], "Reader Pipe: ");
78109@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
78110 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
78111 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
78112 cnt += sprintf(&page[cnt], " %d",
78113- atomic_read(&rcu_torture_wcount[i]));
78114+ atomic_read_unchecked(&rcu_torture_wcount[i]));
78115 }
78116 cnt += sprintf(&page[cnt], "\n");
78117 if (cur_ops->stats)
78118@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
78119
78120 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
78121
78122- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
78123+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
78124 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
78125 else if (n_online_successes != n_online_attempts ||
78126 n_offline_successes != n_offline_attempts)
78127@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
78128
78129 rcu_torture_current = NULL;
78130 rcu_torture_current_version = 0;
78131- atomic_set(&n_rcu_torture_alloc, 0);
78132- atomic_set(&n_rcu_torture_alloc_fail, 0);
78133- atomic_set(&n_rcu_torture_free, 0);
78134- atomic_set(&n_rcu_torture_mberror, 0);
78135- atomic_set(&n_rcu_torture_error, 0);
78136+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
78137+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
78138+ atomic_set_unchecked(&n_rcu_torture_free, 0);
78139+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
78140+ atomic_set_unchecked(&n_rcu_torture_error, 0);
78141 n_rcu_torture_barrier_error = 0;
78142 n_rcu_torture_boost_ktrerror = 0;
78143 n_rcu_torture_boost_rterror = 0;
78144 n_rcu_torture_boost_failure = 0;
78145 n_rcu_torture_boosts = 0;
78146 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
78147- atomic_set(&rcu_torture_wcount[i], 0);
78148+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
78149 for_each_possible_cpu(cpu) {
78150 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
78151 per_cpu(rcu_torture_count, cpu)[i] = 0;
78152diff --git a/kernel/rcutree.c b/kernel/rcutree.c
78153index 3538001..e379e0b 100644
78154--- a/kernel/rcutree.c
78155+++ b/kernel/rcutree.c
78156@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
78157 rcu_prepare_for_idle(smp_processor_id());
78158 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78159 smp_mb__before_atomic_inc(); /* See above. */
78160- atomic_inc(&rdtp->dynticks);
78161+ atomic_inc_unchecked(&rdtp->dynticks);
78162 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
78163- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78164+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78165
78166 /*
78167 * It is illegal to enter an extended quiescent state while
78168@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
78169 int user)
78170 {
78171 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
78172- atomic_inc(&rdtp->dynticks);
78173+ atomic_inc_unchecked(&rdtp->dynticks);
78174 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78175 smp_mb__after_atomic_inc(); /* See above. */
78176- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78177+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78178 rcu_cleanup_after_idle(smp_processor_id());
78179 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
78180 if (!user && !is_idle_task(current)) {
78181@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
78182 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
78183
78184 if (rdtp->dynticks_nmi_nesting == 0 &&
78185- (atomic_read(&rdtp->dynticks) & 0x1))
78186+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
78187 return;
78188 rdtp->dynticks_nmi_nesting++;
78189 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
78190- atomic_inc(&rdtp->dynticks);
78191+ atomic_inc_unchecked(&rdtp->dynticks);
78192 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78193 smp_mb__after_atomic_inc(); /* See above. */
78194- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78195+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78196 }
78197
78198 /**
78199@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
78200 return;
78201 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78202 smp_mb__before_atomic_inc(); /* See above. */
78203- atomic_inc(&rdtp->dynticks);
78204+ atomic_inc_unchecked(&rdtp->dynticks);
78205 smp_mb__after_atomic_inc(); /* Force delay to next write. */
78206- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78207+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78208 }
78209
78210 /**
78211@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
78212 int ret;
78213
78214 preempt_disable();
78215- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78216+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78217 preempt_enable();
78218 return ret;
78219 }
78220@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
78221 */
78222 static int dyntick_save_progress_counter(struct rcu_data *rdp)
78223 {
78224- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
78225+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78226 return (rdp->dynticks_snap & 0x1) == 0;
78227 }
78228
78229@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
78230 unsigned int curr;
78231 unsigned int snap;
78232
78233- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
78234+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78235 snap = (unsigned int)rdp->dynticks_snap;
78236
78237 /*
78238@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
78239 rdp = this_cpu_ptr(rsp->rda);
78240 rcu_preempt_check_blocked_tasks(rnp);
78241 rnp->qsmask = rnp->qsmaskinit;
78242- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
78243+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
78244 WARN_ON_ONCE(rnp->completed != rsp->completed);
78245- ACCESS_ONCE(rnp->completed) = rsp->completed;
78246+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
78247 if (rnp == rdp->mynode)
78248 rcu_start_gp_per_cpu(rsp, rnp, rdp);
78249 rcu_preempt_boost_start_gp(rnp);
78250@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
78251 */
78252 rcu_for_each_node_breadth_first(rsp, rnp) {
78253 raw_spin_lock_irq(&rnp->lock);
78254- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
78255+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
78256 rdp = this_cpu_ptr(rsp->rda);
78257 if (rnp == rdp->mynode)
78258 __rcu_process_gp_end(rsp, rnp, rdp);
78259@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
78260 rsp->qlen += rdp->qlen;
78261 rdp->n_cbs_orphaned += rdp->qlen;
78262 rdp->qlen_lazy = 0;
78263- ACCESS_ONCE(rdp->qlen) = 0;
78264+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78265 }
78266
78267 /*
78268@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
78269 }
78270 smp_mb(); /* List handling before counting for rcu_barrier(). */
78271 rdp->qlen_lazy -= count_lazy;
78272- ACCESS_ONCE(rdp->qlen) -= count;
78273+ ACCESS_ONCE_RW(rdp->qlen) -= count;
78274 rdp->n_cbs_invoked += count;
78275
78276 /* Reinstate batch limit if we have worked down the excess. */
78277@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
78278 /*
78279 * Do RCU core processing for the current CPU.
78280 */
78281-static void rcu_process_callbacks(struct softirq_action *unused)
78282+static void rcu_process_callbacks(void)
78283 {
78284 struct rcu_state *rsp;
78285
78286@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
78287 local_irq_restore(flags);
78288 return;
78289 }
78290- ACCESS_ONCE(rdp->qlen)++;
78291+ ACCESS_ONCE_RW(rdp->qlen)++;
78292 if (lazy)
78293 rdp->qlen_lazy++;
78294 else
78295@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
78296 * counter wrap on a 32-bit system. Quite a few more CPUs would of
78297 * course be required on a 64-bit system.
78298 */
78299- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
78300+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
78301 (ulong)atomic_long_read(&rsp->expedited_done) +
78302 ULONG_MAX / 8)) {
78303 synchronize_sched();
78304- atomic_long_inc(&rsp->expedited_wrap);
78305+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
78306 return;
78307 }
78308
78309@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
78310 * Take a ticket. Note that atomic_inc_return() implies a
78311 * full memory barrier.
78312 */
78313- snap = atomic_long_inc_return(&rsp->expedited_start);
78314+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
78315 firstsnap = snap;
78316 get_online_cpus();
78317 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
78318@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
78319 synchronize_sched_expedited_cpu_stop,
78320 NULL) == -EAGAIN) {
78321 put_online_cpus();
78322- atomic_long_inc(&rsp->expedited_tryfail);
78323+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
78324
78325 /* Check to see if someone else did our work for us. */
78326 s = atomic_long_read(&rsp->expedited_done);
78327 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78328 /* ensure test happens before caller kfree */
78329 smp_mb__before_atomic_inc(); /* ^^^ */
78330- atomic_long_inc(&rsp->expedited_workdone1);
78331+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
78332 return;
78333 }
78334
78335@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
78336 udelay(trycount * num_online_cpus());
78337 } else {
78338 wait_rcu_gp(call_rcu_sched);
78339- atomic_long_inc(&rsp->expedited_normal);
78340+ atomic_long_inc_unchecked(&rsp->expedited_normal);
78341 return;
78342 }
78343
78344@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
78345 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78346 /* ensure test happens before caller kfree */
78347 smp_mb__before_atomic_inc(); /* ^^^ */
78348- atomic_long_inc(&rsp->expedited_workdone2);
78349+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
78350 return;
78351 }
78352
78353@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
78354 * period works for us.
78355 */
78356 get_online_cpus();
78357- snap = atomic_long_read(&rsp->expedited_start);
78358+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
78359 smp_mb(); /* ensure read is before try_stop_cpus(). */
78360 }
78361- atomic_long_inc(&rsp->expedited_stoppedcpus);
78362+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
78363
78364 /*
78365 * Everyone up to our most recent fetch is covered by our grace
78366@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
78367 * than we did already did their update.
78368 */
78369 do {
78370- atomic_long_inc(&rsp->expedited_done_tries);
78371+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
78372 s = atomic_long_read(&rsp->expedited_done);
78373 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
78374 /* ensure test happens before caller kfree */
78375 smp_mb__before_atomic_inc(); /* ^^^ */
78376- atomic_long_inc(&rsp->expedited_done_lost);
78377+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
78378 break;
78379 }
78380 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
78381- atomic_long_inc(&rsp->expedited_done_exit);
78382+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
78383
78384 put_online_cpus();
78385 }
78386@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78387 * ACCESS_ONCE() to prevent the compiler from speculating
78388 * the increment to precede the early-exit check.
78389 */
78390- ACCESS_ONCE(rsp->n_barrier_done)++;
78391+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78392 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
78393 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
78394 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
78395@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78396
78397 /* Increment ->n_barrier_done to prevent duplicate work. */
78398 smp_mb(); /* Keep increment after above mechanism. */
78399- ACCESS_ONCE(rsp->n_barrier_done)++;
78400+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78401 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
78402 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
78403 smp_mb(); /* Keep increment before caller's subsequent code. */
78404@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
78405 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
78406 init_callback_list(rdp);
78407 rdp->qlen_lazy = 0;
78408- ACCESS_ONCE(rdp->qlen) = 0;
78409+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78410 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
78411 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
78412- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
78413+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
78414 rdp->cpu = cpu;
78415 rdp->rsp = rsp;
78416 rcu_boot_init_nocb_percpu_data(rdp);
78417@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
78418 rdp->blimit = blimit;
78419 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
78420 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
78421- atomic_set(&rdp->dynticks->dynticks,
78422- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
78423+ atomic_set_unchecked(&rdp->dynticks->dynticks,
78424+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
78425 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
78426
78427 /* Add CPU to rcu_node bitmasks. */
78428@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
78429 struct task_struct *t;
78430
78431 for_each_rcu_flavor(rsp) {
78432- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
78433+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
78434 BUG_ON(IS_ERR(t));
78435 rnp = rcu_get_root(rsp);
78436 raw_spin_lock_irqsave(&rnp->lock, flags);
78437diff --git a/kernel/rcutree.h b/kernel/rcutree.h
78438index 4df5034..5ee93f2 100644
78439--- a/kernel/rcutree.h
78440+++ b/kernel/rcutree.h
78441@@ -87,7 +87,7 @@ struct rcu_dynticks {
78442 long long dynticks_nesting; /* Track irq/process nesting level. */
78443 /* Process level is worth LLONG_MAX/2. */
78444 int dynticks_nmi_nesting; /* Track NMI nesting level. */
78445- atomic_t dynticks; /* Even value for idle, else odd. */
78446+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
78447 #ifdef CONFIG_RCU_FAST_NO_HZ
78448 bool all_lazy; /* Are all CPU's CBs lazy? */
78449 unsigned long nonlazy_posted;
78450@@ -414,17 +414,17 @@ struct rcu_state {
78451 /* _rcu_barrier(). */
78452 /* End of fields guarded by barrier_mutex. */
78453
78454- atomic_long_t expedited_start; /* Starting ticket. */
78455- atomic_long_t expedited_done; /* Done ticket. */
78456- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
78457- atomic_long_t expedited_tryfail; /* # acquisition failures. */
78458- atomic_long_t expedited_workdone1; /* # done by others #1. */
78459- atomic_long_t expedited_workdone2; /* # done by others #2. */
78460- atomic_long_t expedited_normal; /* # fallbacks to normal. */
78461- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
78462- atomic_long_t expedited_done_tries; /* # tries to update _done. */
78463- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
78464- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
78465+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
78466+ atomic_long_t expedited_done; /* Done ticket. */
78467+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
78468+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
78469+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
78470+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
78471+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
78472+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
78473+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
78474+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
78475+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
78476
78477 unsigned long jiffies_force_qs; /* Time at which to invoke */
78478 /* force_quiescent_state(). */
78479diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
78480index 3db5a37..b395fb35 100644
78481--- a/kernel/rcutree_plugin.h
78482+++ b/kernel/rcutree_plugin.h
78483@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
78484
78485 /* Clean up and exit. */
78486 smp_mb(); /* ensure expedited GP seen before counter increment. */
78487- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
78488+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
78489 unlock_mb_ret:
78490 mutex_unlock(&sync_rcu_preempt_exp_mutex);
78491 mb_ret:
78492@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
78493 free_cpumask_var(cm);
78494 }
78495
78496-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
78497+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
78498 .store = &rcu_cpu_kthread_task,
78499 .thread_should_run = rcu_cpu_kthread_should_run,
78500 .thread_fn = rcu_cpu_kthread,
78501@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
78502 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
78503 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
78504 cpu, ticks_value, ticks_title,
78505- atomic_read(&rdtp->dynticks) & 0xfff,
78506+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
78507 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
78508 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
78509 fast_no_hz);
78510@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
78511
78512 /* Enqueue the callback on the nocb list and update counts. */
78513 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
78514- ACCESS_ONCE(*old_rhpp) = rhp;
78515+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
78516 atomic_long_add(rhcount, &rdp->nocb_q_count);
78517 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
78518
78519@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
78520 * Extract queued callbacks, update counts, and wait
78521 * for a grace period to elapse.
78522 */
78523- ACCESS_ONCE(rdp->nocb_head) = NULL;
78524+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
78525 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
78526 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
78527 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
78528- ACCESS_ONCE(rdp->nocb_p_count) += c;
78529- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
78530+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
78531+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
78532 rcu_nocb_wait_gp(rdp);
78533
78534 /* Each pass through the following loop invokes a callback. */
78535@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
78536 list = next;
78537 }
78538 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
78539- ACCESS_ONCE(rdp->nocb_p_count) -= c;
78540- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
78541+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
78542+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
78543 rdp->n_nocbs_invoked += c;
78544 }
78545 return 0;
78546@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
78547 t = kthread_run(rcu_nocb_kthread, rdp,
78548 "rcuo%c/%d", rsp->abbr, cpu);
78549 BUG_ON(IS_ERR(t));
78550- ACCESS_ONCE(rdp->nocb_kthread) = t;
78551+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
78552 }
78553 }
78554
78555diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
78556index cf6c174..a8f4b50 100644
78557--- a/kernel/rcutree_trace.c
78558+++ b/kernel/rcutree_trace.c
78559@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
78560 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
78561 rdp->passed_quiesce, rdp->qs_pending);
78562 seq_printf(m, " dt=%d/%llx/%d df=%lu",
78563- atomic_read(&rdp->dynticks->dynticks),
78564+ atomic_read_unchecked(&rdp->dynticks->dynticks),
78565 rdp->dynticks->dynticks_nesting,
78566 rdp->dynticks->dynticks_nmi_nesting,
78567 rdp->dynticks_fqs);
78568@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
78569 struct rcu_state *rsp = (struct rcu_state *)m->private;
78570
78571 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
78572- atomic_long_read(&rsp->expedited_start),
78573+ atomic_long_read_unchecked(&rsp->expedited_start),
78574 atomic_long_read(&rsp->expedited_done),
78575- atomic_long_read(&rsp->expedited_wrap),
78576- atomic_long_read(&rsp->expedited_tryfail),
78577- atomic_long_read(&rsp->expedited_workdone1),
78578- atomic_long_read(&rsp->expedited_workdone2),
78579- atomic_long_read(&rsp->expedited_normal),
78580- atomic_long_read(&rsp->expedited_stoppedcpus),
78581- atomic_long_read(&rsp->expedited_done_tries),
78582- atomic_long_read(&rsp->expedited_done_lost),
78583- atomic_long_read(&rsp->expedited_done_exit));
78584+ atomic_long_read_unchecked(&rsp->expedited_wrap),
78585+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
78586+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
78587+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
78588+ atomic_long_read_unchecked(&rsp->expedited_normal),
78589+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
78590+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
78591+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
78592+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
78593 return 0;
78594 }
78595
78596diff --git a/kernel/resource.c b/kernel/resource.c
78597index d738698..5f8e60a 100644
78598--- a/kernel/resource.c
78599+++ b/kernel/resource.c
78600@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
78601
78602 static int __init ioresources_init(void)
78603 {
78604+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78605+#ifdef CONFIG_GRKERNSEC_PROC_USER
78606+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
78607+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
78608+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78609+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
78610+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
78611+#endif
78612+#else
78613 proc_create("ioports", 0, NULL, &proc_ioports_operations);
78614 proc_create("iomem", 0, NULL, &proc_iomem_operations);
78615+#endif
78616 return 0;
78617 }
78618 __initcall(ioresources_init);
78619diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
78620index 1d96dd0..994ff19 100644
78621--- a/kernel/rtmutex-tester.c
78622+++ b/kernel/rtmutex-tester.c
78623@@ -22,7 +22,7 @@
78624 #define MAX_RT_TEST_MUTEXES 8
78625
78626 static spinlock_t rttest_lock;
78627-static atomic_t rttest_event;
78628+static atomic_unchecked_t rttest_event;
78629
78630 struct test_thread_data {
78631 int opcode;
78632@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78633
78634 case RTTEST_LOCKCONT:
78635 td->mutexes[td->opdata] = 1;
78636- td->event = atomic_add_return(1, &rttest_event);
78637+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78638 return 0;
78639
78640 case RTTEST_RESET:
78641@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78642 return 0;
78643
78644 case RTTEST_RESETEVENT:
78645- atomic_set(&rttest_event, 0);
78646+ atomic_set_unchecked(&rttest_event, 0);
78647 return 0;
78648
78649 default:
78650@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78651 return ret;
78652
78653 td->mutexes[id] = 1;
78654- td->event = atomic_add_return(1, &rttest_event);
78655+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78656 rt_mutex_lock(&mutexes[id]);
78657- td->event = atomic_add_return(1, &rttest_event);
78658+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78659 td->mutexes[id] = 4;
78660 return 0;
78661
78662@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78663 return ret;
78664
78665 td->mutexes[id] = 1;
78666- td->event = atomic_add_return(1, &rttest_event);
78667+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78668 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
78669- td->event = atomic_add_return(1, &rttest_event);
78670+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78671 td->mutexes[id] = ret ? 0 : 4;
78672 return ret ? -EINTR : 0;
78673
78674@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78675 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
78676 return ret;
78677
78678- td->event = atomic_add_return(1, &rttest_event);
78679+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78680 rt_mutex_unlock(&mutexes[id]);
78681- td->event = atomic_add_return(1, &rttest_event);
78682+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78683 td->mutexes[id] = 0;
78684 return 0;
78685
78686@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78687 break;
78688
78689 td->mutexes[dat] = 2;
78690- td->event = atomic_add_return(1, &rttest_event);
78691+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78692 break;
78693
78694 default:
78695@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78696 return;
78697
78698 td->mutexes[dat] = 3;
78699- td->event = atomic_add_return(1, &rttest_event);
78700+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78701 break;
78702
78703 case RTTEST_LOCKNOWAIT:
78704@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78705 return;
78706
78707 td->mutexes[dat] = 1;
78708- td->event = atomic_add_return(1, &rttest_event);
78709+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78710 return;
78711
78712 default:
78713diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
78714index 64de5f8..7735e12 100644
78715--- a/kernel/sched/auto_group.c
78716+++ b/kernel/sched/auto_group.c
78717@@ -11,7 +11,7 @@
78718
78719 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
78720 static struct autogroup autogroup_default;
78721-static atomic_t autogroup_seq_nr;
78722+static atomic_unchecked_t autogroup_seq_nr;
78723
78724 void __init autogroup_init(struct task_struct *init_task)
78725 {
78726@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
78727
78728 kref_init(&ag->kref);
78729 init_rwsem(&ag->lock);
78730- ag->id = atomic_inc_return(&autogroup_seq_nr);
78731+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
78732 ag->tg = tg;
78733 #ifdef CONFIG_RT_GROUP_SCHED
78734 /*
78735diff --git a/kernel/sched/core.c b/kernel/sched/core.c
78736index e8b3350..d83d44e 100644
78737--- a/kernel/sched/core.c
78738+++ b/kernel/sched/core.c
78739@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
78740 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78741 * positive (at least 1, or number of jiffies left till timeout) if completed.
78742 */
78743-long __sched
78744+long __sched __intentional_overflow(-1)
78745 wait_for_completion_interruptible_timeout(struct completion *x,
78746 unsigned long timeout)
78747 {
78748@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
78749 *
78750 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
78751 */
78752-int __sched wait_for_completion_killable(struct completion *x)
78753+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
78754 {
78755 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
78756 if (t == -ERESTARTSYS)
78757@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
78758 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78759 * positive (at least 1, or number of jiffies left till timeout) if completed.
78760 */
78761-long __sched
78762+long __sched __intentional_overflow(-1)
78763 wait_for_completion_killable_timeout(struct completion *x,
78764 unsigned long timeout)
78765 {
78766@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
78767 /* convert nice value [19,-20] to rlimit style value [1,40] */
78768 int nice_rlim = 20 - nice;
78769
78770+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
78771+
78772 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
78773 capable(CAP_SYS_NICE));
78774 }
78775@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
78776 if (nice > 19)
78777 nice = 19;
78778
78779- if (increment < 0 && !can_nice(current, nice))
78780+ if (increment < 0 && (!can_nice(current, nice) ||
78781+ gr_handle_chroot_nice()))
78782 return -EPERM;
78783
78784 retval = security_task_setnice(current, nice);
78785@@ -3891,6 +3894,7 @@ recheck:
78786 unsigned long rlim_rtprio =
78787 task_rlimit(p, RLIMIT_RTPRIO);
78788
78789+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
78790 /* can't set/change the rt policy */
78791 if (policy != p->policy && !rlim_rtprio)
78792 return -EPERM;
78793@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
78794
78795 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
78796
78797-static struct ctl_table sd_ctl_dir[] = {
78798+static ctl_table_no_const sd_ctl_dir[] __read_only = {
78799 {
78800 .procname = "sched_domain",
78801 .mode = 0555,
78802@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
78803 {}
78804 };
78805
78806-static struct ctl_table *sd_alloc_ctl_entry(int n)
78807+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
78808 {
78809- struct ctl_table *entry =
78810+ ctl_table_no_const *entry =
78811 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
78812
78813 return entry;
78814 }
78815
78816-static void sd_free_ctl_entry(struct ctl_table **tablep)
78817+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
78818 {
78819- struct ctl_table *entry;
78820+ ctl_table_no_const *entry;
78821
78822 /*
78823 * In the intermediate directories, both the child directory and
78824@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
78825 * will always be set. In the lowest directory the names are
78826 * static strings and all have proc handlers.
78827 */
78828- for (entry = *tablep; entry->mode; entry++) {
78829- if (entry->child)
78830- sd_free_ctl_entry(&entry->child);
78831+ for (entry = tablep; entry->mode; entry++) {
78832+ if (entry->child) {
78833+ sd_free_ctl_entry(entry->child);
78834+ pax_open_kernel();
78835+ entry->child = NULL;
78836+ pax_close_kernel();
78837+ }
78838 if (entry->proc_handler == NULL)
78839 kfree(entry->procname);
78840 }
78841
78842- kfree(*tablep);
78843- *tablep = NULL;
78844+ kfree(tablep);
78845 }
78846
78847 static int min_load_idx = 0;
78848 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
78849
78850 static void
78851-set_table_entry(struct ctl_table *entry,
78852+set_table_entry(ctl_table_no_const *entry,
78853 const char *procname, void *data, int maxlen,
78854 umode_t mode, proc_handler *proc_handler,
78855 bool load_idx)
78856@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
78857 static struct ctl_table *
78858 sd_alloc_ctl_domain_table(struct sched_domain *sd)
78859 {
78860- struct ctl_table *table = sd_alloc_ctl_entry(13);
78861+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
78862
78863 if (table == NULL)
78864 return NULL;
78865@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
78866 return table;
78867 }
78868
78869-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
78870+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
78871 {
78872- struct ctl_table *entry, *table;
78873+ ctl_table_no_const *entry, *table;
78874 struct sched_domain *sd;
78875 int domain_num = 0, i;
78876 char buf[32];
78877@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
78878 static void register_sched_domain_sysctl(void)
78879 {
78880 int i, cpu_num = num_possible_cpus();
78881- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
78882+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
78883 char buf[32];
78884
78885 WARN_ON(sd_ctl_dir[0].child);
78886+ pax_open_kernel();
78887 sd_ctl_dir[0].child = entry;
78888+ pax_close_kernel();
78889
78890 if (entry == NULL)
78891 return;
78892@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
78893 if (sd_sysctl_header)
78894 unregister_sysctl_table(sd_sysctl_header);
78895 sd_sysctl_header = NULL;
78896- if (sd_ctl_dir[0].child)
78897- sd_free_ctl_entry(&sd_ctl_dir[0].child);
78898+ if (sd_ctl_dir[0].child) {
78899+ sd_free_ctl_entry(sd_ctl_dir[0].child);
78900+ pax_open_kernel();
78901+ sd_ctl_dir[0].child = NULL;
78902+ pax_close_kernel();
78903+ }
78904 }
78905 #else
78906 static void register_sched_domain_sysctl(void)
78907@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
78908 * happens before everything else. This has to be lower priority than
78909 * the notifier in the perf_event subsystem, though.
78910 */
78911-static struct notifier_block __cpuinitdata migration_notifier = {
78912+static struct notifier_block migration_notifier = {
78913 .notifier_call = migration_call,
78914 .priority = CPU_PRI_MIGRATION,
78915 };
78916diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
78917index c61a614..d7f3d7e 100644
78918--- a/kernel/sched/fair.c
78919+++ b/kernel/sched/fair.c
78920@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
78921
78922 static void reset_ptenuma_scan(struct task_struct *p)
78923 {
78924- ACCESS_ONCE(p->mm->numa_scan_seq)++;
78925+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
78926 p->mm->numa_scan_offset = 0;
78927 }
78928
78929@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
78930 * run_rebalance_domains is triggered when needed from the scheduler tick.
78931 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
78932 */
78933-static void run_rebalance_domains(struct softirq_action *h)
78934+static void run_rebalance_domains(void)
78935 {
78936 int this_cpu = smp_processor_id();
78937 struct rq *this_rq = cpu_rq(this_cpu);
78938diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
78939index ce39224..0e09343 100644
78940--- a/kernel/sched/sched.h
78941+++ b/kernel/sched/sched.h
78942@@ -1009,7 +1009,7 @@ struct sched_class {
78943 #ifdef CONFIG_FAIR_GROUP_SCHED
78944 void (*task_move_group) (struct task_struct *p, int on_rq);
78945 #endif
78946-};
78947+} __do_const;
78948
78949 #define sched_class_highest (&stop_sched_class)
78950 #define for_each_class(class) \
78951diff --git a/kernel/signal.c b/kernel/signal.c
78952index 113411b..17190e2 100644
78953--- a/kernel/signal.c
78954+++ b/kernel/signal.c
78955@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
78956
78957 int print_fatal_signals __read_mostly;
78958
78959-static void __user *sig_handler(struct task_struct *t, int sig)
78960+static __sighandler_t sig_handler(struct task_struct *t, int sig)
78961 {
78962 return t->sighand->action[sig - 1].sa.sa_handler;
78963 }
78964
78965-static int sig_handler_ignored(void __user *handler, int sig)
78966+static int sig_handler_ignored(__sighandler_t handler, int sig)
78967 {
78968 /* Is it explicitly or implicitly ignored? */
78969 return handler == SIG_IGN ||
78970@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
78971
78972 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
78973 {
78974- void __user *handler;
78975+ __sighandler_t handler;
78976
78977 handler = sig_handler(t, sig);
78978
78979@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
78980 atomic_inc(&user->sigpending);
78981 rcu_read_unlock();
78982
78983+ if (!override_rlimit)
78984+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
78985+
78986 if (override_rlimit ||
78987 atomic_read(&user->sigpending) <=
78988 task_rlimit(t, RLIMIT_SIGPENDING)) {
78989@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
78990
78991 int unhandled_signal(struct task_struct *tsk, int sig)
78992 {
78993- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
78994+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
78995 if (is_global_init(tsk))
78996 return 1;
78997 if (handler != SIG_IGN && handler != SIG_DFL)
78998@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
78999 }
79000 }
79001
79002+ /* allow glibc communication via tgkill to other threads in our
79003+ thread group */
79004+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
79005+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
79006+ && gr_handle_signal(t, sig))
79007+ return -EPERM;
79008+
79009 return security_task_kill(t, info, sig, 0);
79010 }
79011
79012@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
79013 return send_signal(sig, info, p, 1);
79014 }
79015
79016-static int
79017+int
79018 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79019 {
79020 return send_signal(sig, info, t, 0);
79021@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79022 unsigned long int flags;
79023 int ret, blocked, ignored;
79024 struct k_sigaction *action;
79025+ int is_unhandled = 0;
79026
79027 spin_lock_irqsave(&t->sighand->siglock, flags);
79028 action = &t->sighand->action[sig-1];
79029@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79030 }
79031 if (action->sa.sa_handler == SIG_DFL)
79032 t->signal->flags &= ~SIGNAL_UNKILLABLE;
79033+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
79034+ is_unhandled = 1;
79035 ret = specific_send_sig_info(sig, info, t);
79036 spin_unlock_irqrestore(&t->sighand->siglock, flags);
79037
79038+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
79039+ normal operation */
79040+ if (is_unhandled) {
79041+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
79042+ gr_handle_crash(t, sig);
79043+ }
79044+
79045 return ret;
79046 }
79047
79048@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
79049 ret = check_kill_permission(sig, info, p);
79050 rcu_read_unlock();
79051
79052- if (!ret && sig)
79053+ if (!ret && sig) {
79054 ret = do_send_sig_info(sig, info, p, true);
79055+ if (!ret)
79056+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
79057+ }
79058
79059 return ret;
79060 }
79061@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
79062 int error = -ESRCH;
79063
79064 rcu_read_lock();
79065- p = find_task_by_vpid(pid);
79066+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79067+ /* allow glibc communication via tgkill to other threads in our
79068+ thread group */
79069+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
79070+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
79071+ p = find_task_by_vpid_unrestricted(pid);
79072+ else
79073+#endif
79074+ p = find_task_by_vpid(pid);
79075 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
79076 error = check_kill_permission(sig, info, p);
79077 /*
79078@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
79079 }
79080 seg = get_fs();
79081 set_fs(KERNEL_DS);
79082- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
79083- (stack_t __force __user *) &uoss,
79084+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
79085+ (stack_t __force_user *) &uoss,
79086 compat_user_stack_pointer());
79087 set_fs(seg);
79088 if (ret >= 0 && uoss_ptr) {
79089diff --git a/kernel/smp.c b/kernel/smp.c
79090index 4dba0f7..fe9f773 100644
79091--- a/kernel/smp.c
79092+++ b/kernel/smp.c
79093@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
79094 return NOTIFY_OK;
79095 }
79096
79097-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
79098+static struct notifier_block hotplug_cfd_notifier = {
79099 .notifier_call = hotplug_cfd,
79100 };
79101
79102diff --git a/kernel/smpboot.c b/kernel/smpboot.c
79103index 02fc5c9..e54c335 100644
79104--- a/kernel/smpboot.c
79105+++ b/kernel/smpboot.c
79106@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
79107 }
79108 smpboot_unpark_thread(plug_thread, cpu);
79109 }
79110- list_add(&plug_thread->list, &hotplug_threads);
79111+ pax_list_add(&plug_thread->list, &hotplug_threads);
79112 out:
79113 mutex_unlock(&smpboot_threads_lock);
79114 return ret;
79115@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
79116 {
79117 get_online_cpus();
79118 mutex_lock(&smpboot_threads_lock);
79119- list_del(&plug_thread->list);
79120+ pax_list_del(&plug_thread->list);
79121 smpboot_destroy_threads(plug_thread);
79122 mutex_unlock(&smpboot_threads_lock);
79123 put_online_cpus();
79124diff --git a/kernel/softirq.c b/kernel/softirq.c
79125index 3d6833f..da6d93d 100644
79126--- a/kernel/softirq.c
79127+++ b/kernel/softirq.c
79128@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
79129 EXPORT_SYMBOL(irq_stat);
79130 #endif
79131
79132-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
79133+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
79134
79135 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
79136
79137-char *softirq_to_name[NR_SOFTIRQS] = {
79138+const char * const softirq_to_name[NR_SOFTIRQS] = {
79139 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
79140 "TASKLET", "SCHED", "HRTIMER", "RCU"
79141 };
79142@@ -250,7 +250,7 @@ restart:
79143 kstat_incr_softirqs_this_cpu(vec_nr);
79144
79145 trace_softirq_entry(vec_nr);
79146- h->action(h);
79147+ h->action();
79148 trace_softirq_exit(vec_nr);
79149 if (unlikely(prev_count != preempt_count())) {
79150 printk(KERN_ERR "huh, entered softirq %u %s %p"
79151@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
79152 or_softirq_pending(1UL << nr);
79153 }
79154
79155-void open_softirq(int nr, void (*action)(struct softirq_action *))
79156+void __init open_softirq(int nr, void (*action)(void))
79157 {
79158 softirq_vec[nr].action = action;
79159 }
79160@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
79161
79162 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
79163
79164-static void tasklet_action(struct softirq_action *a)
79165+static void tasklet_action(void)
79166 {
79167 struct tasklet_struct *list;
79168
79169@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
79170 }
79171 }
79172
79173-static void tasklet_hi_action(struct softirq_action *a)
79174+static void tasklet_hi_action(void)
79175 {
79176 struct tasklet_struct *list;
79177
79178@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
79179 return NOTIFY_OK;
79180 }
79181
79182-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
79183+static struct notifier_block remote_softirq_cpu_notifier = {
79184 .notifier_call = remote_softirq_cpu_notify,
79185 };
79186
79187@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
79188 return NOTIFY_OK;
79189 }
79190
79191-static struct notifier_block __cpuinitdata cpu_nfb = {
79192+static struct notifier_block cpu_nfb = {
79193 .notifier_call = cpu_callback
79194 };
79195
79196-static struct smp_hotplug_thread softirq_threads = {
79197+static struct smp_hotplug_thread softirq_threads __read_only = {
79198 .store = &ksoftirqd,
79199 .thread_should_run = ksoftirqd_should_run,
79200 .thread_fn = run_ksoftirqd,
79201diff --git a/kernel/srcu.c b/kernel/srcu.c
79202index 01d5ccb..cdcbee6 100644
79203--- a/kernel/srcu.c
79204+++ b/kernel/srcu.c
79205@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
79206
79207 idx = ACCESS_ONCE(sp->completed) & 0x1;
79208 preempt_disable();
79209- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
79210+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
79211 smp_mb(); /* B */ /* Avoid leaking the critical section. */
79212- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79213+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79214 preempt_enable();
79215 return idx;
79216 }
79217diff --git a/kernel/sys.c b/kernel/sys.c
79218index 2bbd9a7..0875671 100644
79219--- a/kernel/sys.c
79220+++ b/kernel/sys.c
79221@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
79222 error = -EACCES;
79223 goto out;
79224 }
79225+
79226+ if (gr_handle_chroot_setpriority(p, niceval)) {
79227+ error = -EACCES;
79228+ goto out;
79229+ }
79230+
79231 no_nice = security_task_setnice(p, niceval);
79232 if (no_nice) {
79233 error = no_nice;
79234@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
79235 goto error;
79236 }
79237
79238+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
79239+ goto error;
79240+
79241 if (rgid != (gid_t) -1 ||
79242 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
79243 new->sgid = new->egid;
79244@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
79245 old = current_cred();
79246
79247 retval = -EPERM;
79248+
79249+ if (gr_check_group_change(kgid, kgid, kgid))
79250+ goto error;
79251+
79252 if (nsown_capable(CAP_SETGID))
79253 new->gid = new->egid = new->sgid = new->fsgid = kgid;
79254 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
79255@@ -678,7 +691,7 @@ error:
79256 /*
79257 * change the user struct in a credentials set to match the new UID
79258 */
79259-static int set_user(struct cred *new)
79260+int set_user(struct cred *new)
79261 {
79262 struct user_struct *new_user;
79263
79264@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
79265 goto error;
79266 }
79267
79268+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
79269+ goto error;
79270+
79271 if (!uid_eq(new->uid, old->uid)) {
79272 retval = set_user(new);
79273 if (retval < 0)
79274@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
79275 old = current_cred();
79276
79277 retval = -EPERM;
79278+
79279+ if (gr_check_crash_uid(kuid))
79280+ goto error;
79281+ if (gr_check_user_change(kuid, kuid, kuid))
79282+ goto error;
79283+
79284 if (nsown_capable(CAP_SETUID)) {
79285 new->suid = new->uid = kuid;
79286 if (!uid_eq(kuid, old->uid)) {
79287@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
79288 goto error;
79289 }
79290
79291+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
79292+ goto error;
79293+
79294 if (ruid != (uid_t) -1) {
79295 new->uid = kruid;
79296 if (!uid_eq(kruid, old->uid)) {
79297@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
79298 goto error;
79299 }
79300
79301+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
79302+ goto error;
79303+
79304 if (rgid != (gid_t) -1)
79305 new->gid = krgid;
79306 if (egid != (gid_t) -1)
79307@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
79308 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
79309 nsown_capable(CAP_SETUID)) {
79310 if (!uid_eq(kuid, old->fsuid)) {
79311+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
79312+ goto error;
79313+
79314 new->fsuid = kuid;
79315 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
79316 goto change_okay;
79317 }
79318 }
79319
79320+error:
79321 abort_creds(new);
79322 return old_fsuid;
79323
79324@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
79325 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
79326 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
79327 nsown_capable(CAP_SETGID)) {
79328+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
79329+ goto error;
79330+
79331 if (!gid_eq(kgid, old->fsgid)) {
79332 new->fsgid = kgid;
79333 goto change_okay;
79334 }
79335 }
79336
79337+error:
79338 abort_creds(new);
79339 return old_fsgid;
79340
79341@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
79342 return -EFAULT;
79343
79344 down_read(&uts_sem);
79345- error = __copy_to_user(&name->sysname, &utsname()->sysname,
79346+ error = __copy_to_user(name->sysname, &utsname()->sysname,
79347 __OLD_UTS_LEN);
79348 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
79349- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
79350+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
79351 __OLD_UTS_LEN);
79352 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
79353- error |= __copy_to_user(&name->release, &utsname()->release,
79354+ error |= __copy_to_user(name->release, &utsname()->release,
79355 __OLD_UTS_LEN);
79356 error |= __put_user(0, name->release + __OLD_UTS_LEN);
79357- error |= __copy_to_user(&name->version, &utsname()->version,
79358+ error |= __copy_to_user(name->version, &utsname()->version,
79359 __OLD_UTS_LEN);
79360 error |= __put_user(0, name->version + __OLD_UTS_LEN);
79361- error |= __copy_to_user(&name->machine, &utsname()->machine,
79362+ error |= __copy_to_user(name->machine, &utsname()->machine,
79363 __OLD_UTS_LEN);
79364 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
79365 up_read(&uts_sem);
79366@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
79367 */
79368 new_rlim->rlim_cur = 1;
79369 }
79370+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
79371+ is changed to a lower value. Since tasks can be created by the same
79372+ user in between this limit change and an execve by this task, force
79373+ a recheck only for this task by setting PF_NPROC_EXCEEDED
79374+ */
79375+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
79376+ tsk->flags |= PF_NPROC_EXCEEDED;
79377 }
79378 if (!retval) {
79379 if (old_rlim)
79380diff --git a/kernel/sysctl.c b/kernel/sysctl.c
79381index 9edcf45..713c960 100644
79382--- a/kernel/sysctl.c
79383+++ b/kernel/sysctl.c
79384@@ -93,7 +93,6 @@
79385
79386
79387 #if defined(CONFIG_SYSCTL)
79388-
79389 /* External variables not in a header file. */
79390 extern int sysctl_overcommit_memory;
79391 extern int sysctl_overcommit_ratio;
79392@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
79393
79394 /* Constants used for minimum and maximum */
79395 #ifdef CONFIG_LOCKUP_DETECTOR
79396-static int sixty = 60;
79397-static int neg_one = -1;
79398+static int sixty __read_only = 60;
79399 #endif
79400
79401-static int zero;
79402-static int __maybe_unused one = 1;
79403-static int __maybe_unused two = 2;
79404-static int __maybe_unused three = 3;
79405-static unsigned long one_ul = 1;
79406-static int one_hundred = 100;
79407+static int neg_one __read_only = -1;
79408+static int zero __read_only = 0;
79409+static int __maybe_unused one __read_only = 1;
79410+static int __maybe_unused two __read_only = 2;
79411+static int __maybe_unused three __read_only = 3;
79412+static unsigned long one_ul __read_only = 1;
79413+static int one_hundred __read_only = 100;
79414 #ifdef CONFIG_PRINTK
79415-static int ten_thousand = 10000;
79416+static int ten_thousand __read_only = 10000;
79417 #endif
79418
79419 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
79420@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
79421 void __user *buffer, size_t *lenp, loff_t *ppos);
79422 #endif
79423
79424-#ifdef CONFIG_PRINTK
79425 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79426 void __user *buffer, size_t *lenp, loff_t *ppos);
79427-#endif
79428
79429 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
79430 void __user *buffer, size_t *lenp, loff_t *ppos);
79431@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
79432
79433 #endif
79434
79435+extern struct ctl_table grsecurity_table[];
79436+
79437 static struct ctl_table kern_table[];
79438 static struct ctl_table vm_table[];
79439 static struct ctl_table fs_table[];
79440@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
79441 int sysctl_legacy_va_layout;
79442 #endif
79443
79444+#ifdef CONFIG_PAX_SOFTMODE
79445+static ctl_table pax_table[] = {
79446+ {
79447+ .procname = "softmode",
79448+ .data = &pax_softmode,
79449+ .maxlen = sizeof(unsigned int),
79450+ .mode = 0600,
79451+ .proc_handler = &proc_dointvec,
79452+ },
79453+
79454+ { }
79455+};
79456+#endif
79457+
79458 /* The default sysctl tables: */
79459
79460 static struct ctl_table sysctl_base_table[] = {
79461@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
79462 #endif
79463
79464 static struct ctl_table kern_table[] = {
79465+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
79466+ {
79467+ .procname = "grsecurity",
79468+ .mode = 0500,
79469+ .child = grsecurity_table,
79470+ },
79471+#endif
79472+
79473+#ifdef CONFIG_PAX_SOFTMODE
79474+ {
79475+ .procname = "pax",
79476+ .mode = 0500,
79477+ .child = pax_table,
79478+ },
79479+#endif
79480+
79481 {
79482 .procname = "sched_child_runs_first",
79483 .data = &sysctl_sched_child_runs_first,
79484@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
79485 .data = &modprobe_path,
79486 .maxlen = KMOD_PATH_LEN,
79487 .mode = 0644,
79488- .proc_handler = proc_dostring,
79489+ .proc_handler = proc_dostring_modpriv,
79490 },
79491 {
79492 .procname = "modules_disabled",
79493@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
79494 .extra1 = &zero,
79495 .extra2 = &one,
79496 },
79497+#endif
79498 {
79499 .procname = "kptr_restrict",
79500 .data = &kptr_restrict,
79501 .maxlen = sizeof(int),
79502 .mode = 0644,
79503 .proc_handler = proc_dointvec_minmax_sysadmin,
79504+#ifdef CONFIG_GRKERNSEC_HIDESYM
79505+ .extra1 = &two,
79506+#else
79507 .extra1 = &zero,
79508+#endif
79509 .extra2 = &two,
79510 },
79511-#endif
79512 {
79513 .procname = "ngroups_max",
79514 .data = &ngroups_max,
79515@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
79516 */
79517 {
79518 .procname = "perf_event_paranoid",
79519- .data = &sysctl_perf_event_paranoid,
79520- .maxlen = sizeof(sysctl_perf_event_paranoid),
79521+ .data = &sysctl_perf_event_legitimately_concerned,
79522+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
79523 .mode = 0644,
79524- .proc_handler = proc_dointvec,
79525+ /* go ahead, be a hero */
79526+ .proc_handler = proc_dointvec_minmax_sysadmin,
79527+ .extra1 = &neg_one,
79528+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
79529+ .extra2 = &three,
79530+#else
79531+ .extra2 = &two,
79532+#endif
79533 },
79534 {
79535 .procname = "perf_event_mlock_kb",
79536@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
79537 .proc_handler = proc_dointvec_minmax,
79538 .extra1 = &zero,
79539 },
79540+ {
79541+ .procname = "heap_stack_gap",
79542+ .data = &sysctl_heap_stack_gap,
79543+ .maxlen = sizeof(sysctl_heap_stack_gap),
79544+ .mode = 0644,
79545+ .proc_handler = proc_doulongvec_minmax,
79546+ },
79547 #else
79548 {
79549 .procname = "nr_trim_pages",
79550@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
79551 buffer, lenp, ppos);
79552 }
79553
79554+int proc_dostring_modpriv(struct ctl_table *table, int write,
79555+ void __user *buffer, size_t *lenp, loff_t *ppos)
79556+{
79557+ if (write && !capable(CAP_SYS_MODULE))
79558+ return -EPERM;
79559+
79560+ return _proc_do_string(table->data, table->maxlen, write,
79561+ buffer, lenp, ppos);
79562+}
79563+
79564 static size_t proc_skip_spaces(char **buf)
79565 {
79566 size_t ret;
79567@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
79568 len = strlen(tmp);
79569 if (len > *size)
79570 len = *size;
79571+ if (len > sizeof(tmp))
79572+ len = sizeof(tmp);
79573 if (copy_to_user(*buf, tmp, len))
79574 return -EFAULT;
79575 *size -= len;
79576@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
79577 static int proc_taint(struct ctl_table *table, int write,
79578 void __user *buffer, size_t *lenp, loff_t *ppos)
79579 {
79580- struct ctl_table t;
79581+ ctl_table_no_const t;
79582 unsigned long tmptaint = get_taint();
79583 int err;
79584
79585@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
79586 return err;
79587 }
79588
79589-#ifdef CONFIG_PRINTK
79590 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79591 void __user *buffer, size_t *lenp, loff_t *ppos)
79592 {
79593@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79594
79595 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
79596 }
79597-#endif
79598
79599 struct do_proc_dointvec_minmax_conv_param {
79600 int *min;
79601@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
79602 *i = val;
79603 } else {
79604 val = convdiv * (*i) / convmul;
79605- if (!first)
79606+ if (!first) {
79607 err = proc_put_char(&buffer, &left, '\t');
79608+ if (err)
79609+ break;
79610+ }
79611 err = proc_put_long(&buffer, &left, val, false);
79612 if (err)
79613 break;
79614@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
79615 return -ENOSYS;
79616 }
79617
79618+int proc_dostring_modpriv(struct ctl_table *table, int write,
79619+ void __user *buffer, size_t *lenp, loff_t *ppos)
79620+{
79621+ return -ENOSYS;
79622+}
79623+
79624 int proc_dointvec(struct ctl_table *table, int write,
79625 void __user *buffer, size_t *lenp, loff_t *ppos)
79626 {
79627@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
79628 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
79629 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
79630 EXPORT_SYMBOL(proc_dostring);
79631+EXPORT_SYMBOL(proc_dostring_modpriv);
79632 EXPORT_SYMBOL(proc_doulongvec_minmax);
79633 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
79634diff --git a/kernel/taskstats.c b/kernel/taskstats.c
79635index 145bb4d..b2aa969 100644
79636--- a/kernel/taskstats.c
79637+++ b/kernel/taskstats.c
79638@@ -28,9 +28,12 @@
79639 #include <linux/fs.h>
79640 #include <linux/file.h>
79641 #include <linux/pid_namespace.h>
79642+#include <linux/grsecurity.h>
79643 #include <net/genetlink.h>
79644 #include <linux/atomic.h>
79645
79646+extern int gr_is_taskstats_denied(int pid);
79647+
79648 /*
79649 * Maximum length of a cpumask that can be specified in
79650 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
79651@@ -570,6 +573,9 @@ err:
79652
79653 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
79654 {
79655+ if (gr_is_taskstats_denied(current->pid))
79656+ return -EACCES;
79657+
79658 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
79659 return cmd_attr_register_cpumask(info);
79660 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
79661diff --git a/kernel/time.c b/kernel/time.c
79662index d3617db..c98bbe9 100644
79663--- a/kernel/time.c
79664+++ b/kernel/time.c
79665@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
79666 return error;
79667
79668 if (tz) {
79669+ /* we log in do_settimeofday called below, so don't log twice
79670+ */
79671+ if (!tv)
79672+ gr_log_timechange();
79673+
79674 sys_tz = *tz;
79675 update_vsyscall_tz();
79676 if (firsttime) {
79677@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
79678 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
79679 * value to a scaled second value.
79680 */
79681-unsigned long
79682+unsigned long __intentional_overflow(-1)
79683 timespec_to_jiffies(const struct timespec *value)
79684 {
79685 unsigned long sec = value->tv_sec;
79686diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
79687index f11d83b..d016d91 100644
79688--- a/kernel/time/alarmtimer.c
79689+++ b/kernel/time/alarmtimer.c
79690@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
79691 struct platform_device *pdev;
79692 int error = 0;
79693 int i;
79694- struct k_clock alarm_clock = {
79695+ static struct k_clock alarm_clock = {
79696 .clock_getres = alarm_clock_getres,
79697 .clock_get = alarm_clock_get,
79698 .timer_create = alarm_timer_create,
79699diff --git a/kernel/time/tick-broadcast.c b/kernel/time/tick-broadcast.c
79700index 20d6fba..09e103a 100644
79701--- a/kernel/time/tick-broadcast.c
79702+++ b/kernel/time/tick-broadcast.c
79703@@ -147,7 +147,7 @@ int tick_device_uses_broadcast(struct clock_event_device *dev, int cpu)
79704 * then clear the broadcast bit.
79705 */
79706 if (!(dev->features & CLOCK_EVT_FEAT_C3STOP)) {
79707- int cpu = smp_processor_id();
79708+ cpu = smp_processor_id();
79709 cpumask_clear_cpu(cpu, tick_broadcast_mask);
79710 tick_broadcast_clear_oneshot(cpu);
79711 } else {
79712diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
79713index baeeb5c..c22704a 100644
79714--- a/kernel/time/timekeeping.c
79715+++ b/kernel/time/timekeeping.c
79716@@ -15,6 +15,7 @@
79717 #include <linux/init.h>
79718 #include <linux/mm.h>
79719 #include <linux/sched.h>
79720+#include <linux/grsecurity.h>
79721 #include <linux/syscore_ops.h>
79722 #include <linux/clocksource.h>
79723 #include <linux/jiffies.h>
79724@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
79725 if (!timespec_valid_strict(tv))
79726 return -EINVAL;
79727
79728+ gr_log_timechange();
79729+
79730 raw_spin_lock_irqsave(&timekeeper_lock, flags);
79731 write_seqcount_begin(&timekeeper_seq);
79732
79733diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
79734index 3bdf283..cc68d83 100644
79735--- a/kernel/time/timer_list.c
79736+++ b/kernel/time/timer_list.c
79737@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
79738
79739 static void print_name_offset(struct seq_file *m, void *sym)
79740 {
79741+#ifdef CONFIG_GRKERNSEC_HIDESYM
79742+ SEQ_printf(m, "<%p>", NULL);
79743+#else
79744 char symname[KSYM_NAME_LEN];
79745
79746 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
79747 SEQ_printf(m, "<%pK>", sym);
79748 else
79749 SEQ_printf(m, "%s", symname);
79750+#endif
79751 }
79752
79753 static void
79754@@ -119,7 +123,11 @@ next_one:
79755 static void
79756 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
79757 {
79758+#ifdef CONFIG_GRKERNSEC_HIDESYM
79759+ SEQ_printf(m, " .base: %p\n", NULL);
79760+#else
79761 SEQ_printf(m, " .base: %pK\n", base);
79762+#endif
79763 SEQ_printf(m, " .index: %d\n",
79764 base->index);
79765 SEQ_printf(m, " .resolution: %Lu nsecs\n",
79766@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
79767 {
79768 struct proc_dir_entry *pe;
79769
79770+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79771+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
79772+#else
79773 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
79774+#endif
79775 if (!pe)
79776 return -ENOMEM;
79777 return 0;
79778diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
79779index 0b537f2..40d6c20 100644
79780--- a/kernel/time/timer_stats.c
79781+++ b/kernel/time/timer_stats.c
79782@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
79783 static unsigned long nr_entries;
79784 static struct entry entries[MAX_ENTRIES];
79785
79786-static atomic_t overflow_count;
79787+static atomic_unchecked_t overflow_count;
79788
79789 /*
79790 * The entries are in a hash-table, for fast lookup:
79791@@ -140,7 +140,7 @@ static void reset_entries(void)
79792 nr_entries = 0;
79793 memset(entries, 0, sizeof(entries));
79794 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
79795- atomic_set(&overflow_count, 0);
79796+ atomic_set_unchecked(&overflow_count, 0);
79797 }
79798
79799 static struct entry *alloc_entry(void)
79800@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79801 if (likely(entry))
79802 entry->count++;
79803 else
79804- atomic_inc(&overflow_count);
79805+ atomic_inc_unchecked(&overflow_count);
79806
79807 out_unlock:
79808 raw_spin_unlock_irqrestore(lock, flags);
79809@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79810
79811 static void print_name_offset(struct seq_file *m, unsigned long addr)
79812 {
79813+#ifdef CONFIG_GRKERNSEC_HIDESYM
79814+ seq_printf(m, "<%p>", NULL);
79815+#else
79816 char symname[KSYM_NAME_LEN];
79817
79818 if (lookup_symbol_name(addr, symname) < 0)
79819- seq_printf(m, "<%p>", (void *)addr);
79820+ seq_printf(m, "<%pK>", (void *)addr);
79821 else
79822 seq_printf(m, "%s", symname);
79823+#endif
79824 }
79825
79826 static int tstats_show(struct seq_file *m, void *v)
79827@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
79828
79829 seq_puts(m, "Timer Stats Version: v0.2\n");
79830 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
79831- if (atomic_read(&overflow_count))
79832+ if (atomic_read_unchecked(&overflow_count))
79833 seq_printf(m, "Overflow: %d entries\n",
79834- atomic_read(&overflow_count));
79835+ atomic_read_unchecked(&overflow_count));
79836
79837 for (i = 0; i < nr_entries; i++) {
79838 entry = entries + i;
79839@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
79840 {
79841 struct proc_dir_entry *pe;
79842
79843+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79844+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
79845+#else
79846 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
79847+#endif
79848 if (!pe)
79849 return -ENOMEM;
79850 return 0;
79851diff --git a/kernel/timer.c b/kernel/timer.c
79852index 15ffdb3..62d885c 100644
79853--- a/kernel/timer.c
79854+++ b/kernel/timer.c
79855@@ -1364,7 +1364,7 @@ void update_process_times(int user_tick)
79856 /*
79857 * This function runs timers and the timer-tq in bottom half context.
79858 */
79859-static void run_timer_softirq(struct softirq_action *h)
79860+static void run_timer_softirq(void)
79861 {
79862 struct tvec_base *base = __this_cpu_read(tvec_bases);
79863
79864@@ -1427,7 +1427,7 @@ static void process_timeout(unsigned long __data)
79865 *
79866 * In all cases the return value is guaranteed to be non-negative.
79867 */
79868-signed long __sched schedule_timeout(signed long timeout)
79869+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
79870 {
79871 struct timer_list timer;
79872 unsigned long expire;
79873@@ -1633,7 +1633,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
79874 return NOTIFY_OK;
79875 }
79876
79877-static struct notifier_block __cpuinitdata timers_nb = {
79878+static struct notifier_block timers_nb = {
79879 .notifier_call = timer_cpu_notify,
79880 };
79881
79882diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
79883index b8b8560..75b1a09 100644
79884--- a/kernel/trace/blktrace.c
79885+++ b/kernel/trace/blktrace.c
79886@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
79887 struct blk_trace *bt = filp->private_data;
79888 char buf[16];
79889
79890- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
79891+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
79892
79893 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
79894 }
79895@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
79896 return 1;
79897
79898 bt = buf->chan->private_data;
79899- atomic_inc(&bt->dropped);
79900+ atomic_inc_unchecked(&bt->dropped);
79901 return 0;
79902 }
79903
79904@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
79905
79906 bt->dir = dir;
79907 bt->dev = dev;
79908- atomic_set(&bt->dropped, 0);
79909+ atomic_set_unchecked(&bt->dropped, 0);
79910
79911 ret = -EIO;
79912 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
79913diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
79914index 6c508ff..ee55a13 100644
79915--- a/kernel/trace/ftrace.c
79916+++ b/kernel/trace/ftrace.c
79917@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
79918 if (unlikely(ftrace_disabled))
79919 return 0;
79920
79921+ ret = ftrace_arch_code_modify_prepare();
79922+ FTRACE_WARN_ON(ret);
79923+ if (ret)
79924+ return 0;
79925+
79926 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
79927+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
79928 if (ret) {
79929 ftrace_bug(ret, ip);
79930- return 0;
79931 }
79932- return 1;
79933+ return ret ? 0 : 1;
79934 }
79935
79936 /*
79937@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
79938 if (!count)
79939 return 0;
79940
79941+ pax_open_kernel();
79942 sort(start, count, sizeof(*start),
79943 ftrace_cmp_ips, ftrace_swap_ips);
79944+ pax_close_kernel();
79945
79946 start_pg = ftrace_allocate_pages(count);
79947 if (!start_pg)
79948@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
79949 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
79950
79951 static int ftrace_graph_active;
79952-static struct notifier_block ftrace_suspend_notifier;
79953-
79954 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
79955 {
79956 return 0;
79957@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
79958 return NOTIFY_DONE;
79959 }
79960
79961+static struct notifier_block ftrace_suspend_notifier = {
79962+ .notifier_call = ftrace_suspend_notifier_call
79963+};
79964+
79965 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79966 trace_func_graph_ent_t entryfunc)
79967 {
79968@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
79969 goto out;
79970 }
79971
79972- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
79973 register_pm_notifier(&ftrace_suspend_notifier);
79974
79975 ftrace_graph_active++;
79976diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
79977index e444ff8..438b8f4 100644
79978--- a/kernel/trace/ring_buffer.c
79979+++ b/kernel/trace/ring_buffer.c
79980@@ -352,9 +352,9 @@ struct buffer_data_page {
79981 */
79982 struct buffer_page {
79983 struct list_head list; /* list of buffer pages */
79984- local_t write; /* index for next write */
79985+ local_unchecked_t write; /* index for next write */
79986 unsigned read; /* index for next read */
79987- local_t entries; /* entries on this page */
79988+ local_unchecked_t entries; /* entries on this page */
79989 unsigned long real_end; /* real end of data */
79990 struct buffer_data_page *page; /* Actual data page */
79991 };
79992@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
79993 unsigned long last_overrun;
79994 local_t entries_bytes;
79995 local_t entries;
79996- local_t overrun;
79997- local_t commit_overrun;
79998+ local_unchecked_t overrun;
79999+ local_unchecked_t commit_overrun;
80000 local_t dropped_events;
80001 local_t committing;
80002 local_t commits;
80003@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
80004 *
80005 * We add a counter to the write field to denote this.
80006 */
80007- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
80008- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
80009+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
80010+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
80011
80012 /*
80013 * Just make sure we have seen our old_write and synchronize
80014@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
80015 * cmpxchg to only update if an interrupt did not already
80016 * do it for us. If the cmpxchg fails, we don't care.
80017 */
80018- (void)local_cmpxchg(&next_page->write, old_write, val);
80019- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
80020+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
80021+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
80022
80023 /*
80024 * No need to worry about races with clearing out the commit.
80025@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
80026
80027 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
80028 {
80029- return local_read(&bpage->entries) & RB_WRITE_MASK;
80030+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
80031 }
80032
80033 static inline unsigned long rb_page_write(struct buffer_page *bpage)
80034 {
80035- return local_read(&bpage->write) & RB_WRITE_MASK;
80036+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
80037 }
80038
80039 static int
80040@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
80041 * bytes consumed in ring buffer from here.
80042 * Increment overrun to account for the lost events.
80043 */
80044- local_add(page_entries, &cpu_buffer->overrun);
80045+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
80046 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
80047 }
80048
80049@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
80050 * it is our responsibility to update
80051 * the counters.
80052 */
80053- local_add(entries, &cpu_buffer->overrun);
80054+ local_add_unchecked(entries, &cpu_buffer->overrun);
80055 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
80056
80057 /*
80058@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80059 if (tail == BUF_PAGE_SIZE)
80060 tail_page->real_end = 0;
80061
80062- local_sub(length, &tail_page->write);
80063+ local_sub_unchecked(length, &tail_page->write);
80064 return;
80065 }
80066
80067@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80068 rb_event_set_padding(event);
80069
80070 /* Set the write back to the previous setting */
80071- local_sub(length, &tail_page->write);
80072+ local_sub_unchecked(length, &tail_page->write);
80073 return;
80074 }
80075
80076@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80077
80078 /* Set write to end of buffer */
80079 length = (tail + length) - BUF_PAGE_SIZE;
80080- local_sub(length, &tail_page->write);
80081+ local_sub_unchecked(length, &tail_page->write);
80082 }
80083
80084 /*
80085@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
80086 * about it.
80087 */
80088 if (unlikely(next_page == commit_page)) {
80089- local_inc(&cpu_buffer->commit_overrun);
80090+ local_inc_unchecked(&cpu_buffer->commit_overrun);
80091 goto out_reset;
80092 }
80093
80094@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
80095 cpu_buffer->tail_page) &&
80096 (cpu_buffer->commit_page ==
80097 cpu_buffer->reader_page))) {
80098- local_inc(&cpu_buffer->commit_overrun);
80099+ local_inc_unchecked(&cpu_buffer->commit_overrun);
80100 goto out_reset;
80101 }
80102 }
80103@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
80104 length += RB_LEN_TIME_EXTEND;
80105
80106 tail_page = cpu_buffer->tail_page;
80107- write = local_add_return(length, &tail_page->write);
80108+ write = local_add_return_unchecked(length, &tail_page->write);
80109
80110 /* set write to only the index of the write */
80111 write &= RB_WRITE_MASK;
80112@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
80113 kmemcheck_annotate_bitfield(event, bitfield);
80114 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
80115
80116- local_inc(&tail_page->entries);
80117+ local_inc_unchecked(&tail_page->entries);
80118
80119 /*
80120 * If this is the first commit on the page, then update
80121@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
80122
80123 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
80124 unsigned long write_mask =
80125- local_read(&bpage->write) & ~RB_WRITE_MASK;
80126+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
80127 unsigned long event_length = rb_event_length(event);
80128 /*
80129 * This is on the tail page. It is possible that
80130@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
80131 */
80132 old_index += write_mask;
80133 new_index += write_mask;
80134- index = local_cmpxchg(&bpage->write, old_index, new_index);
80135+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
80136 if (index == old_index) {
80137 /* update counters */
80138 local_sub(event_length, &cpu_buffer->entries_bytes);
80139@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
80140
80141 /* Do the likely case first */
80142 if (likely(bpage->page == (void *)addr)) {
80143- local_dec(&bpage->entries);
80144+ local_dec_unchecked(&bpage->entries);
80145 return;
80146 }
80147
80148@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
80149 start = bpage;
80150 do {
80151 if (bpage->page == (void *)addr) {
80152- local_dec(&bpage->entries);
80153+ local_dec_unchecked(&bpage->entries);
80154 return;
80155 }
80156 rb_inc_page(cpu_buffer, &bpage);
80157@@ -3138,7 +3138,7 @@ static inline unsigned long
80158 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
80159 {
80160 return local_read(&cpu_buffer->entries) -
80161- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
80162+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
80163 }
80164
80165 /**
80166@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
80167 return 0;
80168
80169 cpu_buffer = buffer->buffers[cpu];
80170- ret = local_read(&cpu_buffer->overrun);
80171+ ret = local_read_unchecked(&cpu_buffer->overrun);
80172
80173 return ret;
80174 }
80175@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
80176 return 0;
80177
80178 cpu_buffer = buffer->buffers[cpu];
80179- ret = local_read(&cpu_buffer->commit_overrun);
80180+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
80181
80182 return ret;
80183 }
80184@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
80185 /* if you care about this being correct, lock the buffer */
80186 for_each_buffer_cpu(buffer, cpu) {
80187 cpu_buffer = buffer->buffers[cpu];
80188- overruns += local_read(&cpu_buffer->overrun);
80189+ overruns += local_read_unchecked(&cpu_buffer->overrun);
80190 }
80191
80192 return overruns;
80193@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80194 /*
80195 * Reset the reader page to size zero.
80196 */
80197- local_set(&cpu_buffer->reader_page->write, 0);
80198- local_set(&cpu_buffer->reader_page->entries, 0);
80199+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80200+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80201 local_set(&cpu_buffer->reader_page->page->commit, 0);
80202 cpu_buffer->reader_page->real_end = 0;
80203
80204@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80205 * want to compare with the last_overrun.
80206 */
80207 smp_mb();
80208- overwrite = local_read(&(cpu_buffer->overrun));
80209+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
80210
80211 /*
80212 * Here's the tricky part.
80213@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80214
80215 cpu_buffer->head_page
80216 = list_entry(cpu_buffer->pages, struct buffer_page, list);
80217- local_set(&cpu_buffer->head_page->write, 0);
80218- local_set(&cpu_buffer->head_page->entries, 0);
80219+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
80220+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
80221 local_set(&cpu_buffer->head_page->page->commit, 0);
80222
80223 cpu_buffer->head_page->read = 0;
80224@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80225
80226 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
80227 INIT_LIST_HEAD(&cpu_buffer->new_pages);
80228- local_set(&cpu_buffer->reader_page->write, 0);
80229- local_set(&cpu_buffer->reader_page->entries, 0);
80230+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80231+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80232 local_set(&cpu_buffer->reader_page->page->commit, 0);
80233 cpu_buffer->reader_page->read = 0;
80234
80235 local_set(&cpu_buffer->entries_bytes, 0);
80236- local_set(&cpu_buffer->overrun, 0);
80237- local_set(&cpu_buffer->commit_overrun, 0);
80238+ local_set_unchecked(&cpu_buffer->overrun, 0);
80239+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
80240 local_set(&cpu_buffer->dropped_events, 0);
80241 local_set(&cpu_buffer->entries, 0);
80242 local_set(&cpu_buffer->committing, 0);
80243@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
80244 rb_init_page(bpage);
80245 bpage = reader->page;
80246 reader->page = *data_page;
80247- local_set(&reader->write, 0);
80248- local_set(&reader->entries, 0);
80249+ local_set_unchecked(&reader->write, 0);
80250+ local_set_unchecked(&reader->entries, 0);
80251 reader->read = 0;
80252 *data_page = bpage;
80253
80254diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
80255index e71a8be..948710a 100644
80256--- a/kernel/trace/trace.c
80257+++ b/kernel/trace/trace.c
80258@@ -3201,7 +3201,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
80259 return 0;
80260 }
80261
80262-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
80263+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
80264 {
80265 /* do nothing if flag is already set */
80266 if (!!(trace_flags & mask) == !!enabled)
80267diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
80268index 20572ed..fe55cf3 100644
80269--- a/kernel/trace/trace.h
80270+++ b/kernel/trace/trace.h
80271@@ -1030,7 +1030,7 @@ extern const char *__stop___trace_bprintk_fmt[];
80272 void trace_printk_init_buffers(void);
80273 void trace_printk_start_comm(void);
80274 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
80275-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
80276+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
80277
80278 /*
80279 * Normal trace_printk() and friends allocates special buffers
80280diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
80281index 27963e2..5a6936f 100644
80282--- a/kernel/trace/trace_events.c
80283+++ b/kernel/trace/trace_events.c
80284@@ -1637,10 +1637,6 @@ static LIST_HEAD(ftrace_module_file_list);
80285 struct ftrace_module_file_ops {
80286 struct list_head list;
80287 struct module *mod;
80288- struct file_operations id;
80289- struct file_operations enable;
80290- struct file_operations format;
80291- struct file_operations filter;
80292 };
80293
80294 static struct ftrace_module_file_ops *
80295@@ -1681,17 +1677,12 @@ trace_create_file_ops(struct module *mod)
80296
80297 file_ops->mod = mod;
80298
80299- file_ops->id = ftrace_event_id_fops;
80300- file_ops->id.owner = mod;
80301-
80302- file_ops->enable = ftrace_enable_fops;
80303- file_ops->enable.owner = mod;
80304-
80305- file_ops->filter = ftrace_event_filter_fops;
80306- file_ops->filter.owner = mod;
80307-
80308- file_ops->format = ftrace_event_format_fops;
80309- file_ops->format.owner = mod;
80310+ pax_open_kernel();
80311+ mod->trace_id.owner = mod;
80312+ mod->trace_enable.owner = mod;
80313+ mod->trace_filter.owner = mod;
80314+ mod->trace_format.owner = mod;
80315+ pax_close_kernel();
80316
80317 list_add(&file_ops->list, &ftrace_module_file_list);
80318
80319@@ -1782,8 +1773,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
80320 struct ftrace_module_file_ops *file_ops)
80321 {
80322 return __trace_add_new_event(call, tr,
80323- &file_ops->id, &file_ops->enable,
80324- &file_ops->filter, &file_ops->format);
80325+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
80326+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
80327 }
80328
80329 #else
80330diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
80331index a5e8f48..a9690d2 100644
80332--- a/kernel/trace/trace_mmiotrace.c
80333+++ b/kernel/trace/trace_mmiotrace.c
80334@@ -24,7 +24,7 @@ struct header_iter {
80335 static struct trace_array *mmio_trace_array;
80336 static bool overrun_detected;
80337 static unsigned long prev_overruns;
80338-static atomic_t dropped_count;
80339+static atomic_unchecked_t dropped_count;
80340
80341 static void mmio_reset_data(struct trace_array *tr)
80342 {
80343@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
80344
80345 static unsigned long count_overruns(struct trace_iterator *iter)
80346 {
80347- unsigned long cnt = atomic_xchg(&dropped_count, 0);
80348+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
80349 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
80350
80351 if (over > prev_overruns)
80352@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
80353 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
80354 sizeof(*entry), 0, pc);
80355 if (!event) {
80356- atomic_inc(&dropped_count);
80357+ atomic_inc_unchecked(&dropped_count);
80358 return;
80359 }
80360 entry = ring_buffer_event_data(event);
80361@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
80362 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
80363 sizeof(*entry), 0, pc);
80364 if (!event) {
80365- atomic_inc(&dropped_count);
80366+ atomic_inc_unchecked(&dropped_count);
80367 return;
80368 }
80369 entry = ring_buffer_event_data(event);
80370diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
80371index bb922d9..2a54a257 100644
80372--- a/kernel/trace/trace_output.c
80373+++ b/kernel/trace/trace_output.c
80374@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
80375
80376 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
80377 if (!IS_ERR(p)) {
80378- p = mangle_path(s->buffer + s->len, p, "\n");
80379+ p = mangle_path(s->buffer + s->len, p, "\n\\");
80380 if (p) {
80381 s->len = p - s->buffer;
80382 return 1;
80383@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
80384 goto out;
80385 }
80386
80387+ pax_open_kernel();
80388 if (event->funcs->trace == NULL)
80389- event->funcs->trace = trace_nop_print;
80390+ *(void **)&event->funcs->trace = trace_nop_print;
80391 if (event->funcs->raw == NULL)
80392- event->funcs->raw = trace_nop_print;
80393+ *(void **)&event->funcs->raw = trace_nop_print;
80394 if (event->funcs->hex == NULL)
80395- event->funcs->hex = trace_nop_print;
80396+ *(void **)&event->funcs->hex = trace_nop_print;
80397 if (event->funcs->binary == NULL)
80398- event->funcs->binary = trace_nop_print;
80399+ *(void **)&event->funcs->binary = trace_nop_print;
80400+ pax_close_kernel();
80401
80402 key = event->type & (EVENT_HASHSIZE - 1);
80403
80404diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
80405index b20428c..4845a10 100644
80406--- a/kernel/trace/trace_stack.c
80407+++ b/kernel/trace/trace_stack.c
80408@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
80409 return;
80410
80411 /* we do not handle interrupt stacks yet */
80412- if (!object_is_on_stack(stack))
80413+ if (!object_starts_on_stack(stack))
80414 return;
80415
80416 local_irq_save(flags);
80417diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
80418index d8c30db..e065e89 100644
80419--- a/kernel/user_namespace.c
80420+++ b/kernel/user_namespace.c
80421@@ -853,7 +853,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
80422 if (atomic_read(&current->mm->mm_users) > 1)
80423 return -EINVAL;
80424
80425- if (current->fs->users != 1)
80426+ if (atomic_read(&current->fs->users) != 1)
80427 return -EINVAL;
80428
80429 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
80430diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
80431index 4f69f9a..7c6f8f8 100644
80432--- a/kernel/utsname_sysctl.c
80433+++ b/kernel/utsname_sysctl.c
80434@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
80435 static int proc_do_uts_string(ctl_table *table, int write,
80436 void __user *buffer, size_t *lenp, loff_t *ppos)
80437 {
80438- struct ctl_table uts_table;
80439+ ctl_table_no_const uts_table;
80440 int r;
80441 memcpy(&uts_table, table, sizeof(uts_table));
80442 uts_table.data = get_uts(table, write);
80443diff --git a/kernel/watchdog.c b/kernel/watchdog.c
80444index 05039e3..17490c7 100644
80445--- a/kernel/watchdog.c
80446+++ b/kernel/watchdog.c
80447@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
80448 }
80449 #endif /* CONFIG_SYSCTL */
80450
80451-static struct smp_hotplug_thread watchdog_threads = {
80452+static struct smp_hotplug_thread watchdog_threads __read_only = {
80453 .store = &softlockup_watchdog,
80454 .thread_should_run = watchdog_should_run,
80455 .thread_fn = watchdog,
80456diff --git a/kernel/workqueue.c b/kernel/workqueue.c
80457index ee8e29a..410568e 100644
80458--- a/kernel/workqueue.c
80459+++ b/kernel/workqueue.c
80460@@ -4584,7 +4584,7 @@ static void rebind_workers(struct worker_pool *pool)
80461 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
80462 worker_flags |= WORKER_REBOUND;
80463 worker_flags &= ~WORKER_UNBOUND;
80464- ACCESS_ONCE(worker->flags) = worker_flags;
80465+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
80466 }
80467
80468 spin_unlock_irq(&pool->lock);
80469diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
80470index 566cf2b..fdaa52c 100644
80471--- a/lib/Kconfig.debug
80472+++ b/lib/Kconfig.debug
80473@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
80474
80475 config DEBUG_LOCK_ALLOC
80476 bool "Lock debugging: detect incorrect freeing of live locks"
80477- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80478+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80479 select DEBUG_SPINLOCK
80480 select DEBUG_MUTEXES
80481 select LOCKDEP
80482@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
80483
80484 config PROVE_LOCKING
80485 bool "Lock debugging: prove locking correctness"
80486- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80487+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80488 select LOCKDEP
80489 select DEBUG_SPINLOCK
80490 select DEBUG_MUTEXES
80491@@ -614,7 +614,7 @@ config LOCKDEP
80492
80493 config LOCK_STAT
80494 bool "Lock usage statistics"
80495- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80496+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80497 select LOCKDEP
80498 select DEBUG_SPINLOCK
80499 select DEBUG_MUTEXES
80500@@ -1282,6 +1282,7 @@ config LATENCYTOP
80501 depends on DEBUG_KERNEL
80502 depends on STACKTRACE_SUPPORT
80503 depends on PROC_FS
80504+ depends on !GRKERNSEC_HIDESYM
80505 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
80506 select KALLSYMS
80507 select KALLSYMS_ALL
80508@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
80509 config DEBUG_STRICT_USER_COPY_CHECKS
80510 bool "Strict user copy size checks"
80511 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
80512- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
80513+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
80514 help
80515 Enabling this option turns a certain set of sanity checks for user
80516 copy operations into compile time failures.
80517@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
80518
80519 config PROVIDE_OHCI1394_DMA_INIT
80520 bool "Remote debugging over FireWire early on boot"
80521- depends on PCI && X86
80522+ depends on PCI && X86 && !GRKERNSEC
80523 help
80524 If you want to debug problems which hang or crash the kernel early
80525 on boot and the crashing machine has a FireWire port, you can use
80526@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
80527
80528 config FIREWIRE_OHCI_REMOTE_DMA
80529 bool "Remote debugging over FireWire with firewire-ohci"
80530- depends on FIREWIRE_OHCI
80531+ depends on FIREWIRE_OHCI && !GRKERNSEC
80532 help
80533 This option lets you use the FireWire bus for remote debugging
80534 with help of the firewire-ohci driver. It enables unfiltered
80535diff --git a/lib/Makefile b/lib/Makefile
80536index c55a037..fb46e3b 100644
80537--- a/lib/Makefile
80538+++ b/lib/Makefile
80539@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
80540
80541 obj-$(CONFIG_BTREE) += btree.o
80542 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
80543-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
80544+obj-y += list_debug.o
80545 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
80546
80547 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
80548diff --git a/lib/bitmap.c b/lib/bitmap.c
80549index 06f7e4f..f3cf2b0 100644
80550--- a/lib/bitmap.c
80551+++ b/lib/bitmap.c
80552@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
80553 {
80554 int c, old_c, totaldigits, ndigits, nchunks, nbits;
80555 u32 chunk;
80556- const char __user __force *ubuf = (const char __user __force *)buf;
80557+ const char __user *ubuf = (const char __force_user *)buf;
80558
80559 bitmap_zero(maskp, nmaskbits);
80560
80561@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
80562 {
80563 if (!access_ok(VERIFY_READ, ubuf, ulen))
80564 return -EFAULT;
80565- return __bitmap_parse((const char __force *)ubuf,
80566+ return __bitmap_parse((const char __force_kernel *)ubuf,
80567 ulen, 1, maskp, nmaskbits);
80568
80569 }
80570@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
80571 {
80572 unsigned a, b;
80573 int c, old_c, totaldigits;
80574- const char __user __force *ubuf = (const char __user __force *)buf;
80575+ const char __user *ubuf = (const char __force_user *)buf;
80576 int exp_digit, in_range;
80577
80578 totaldigits = c = 0;
80579@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
80580 {
80581 if (!access_ok(VERIFY_READ, ubuf, ulen))
80582 return -EFAULT;
80583- return __bitmap_parselist((const char __force *)ubuf,
80584+ return __bitmap_parselist((const char __force_kernel *)ubuf,
80585 ulen, 1, maskp, nmaskbits);
80586 }
80587 EXPORT_SYMBOL(bitmap_parselist_user);
80588diff --git a/lib/bug.c b/lib/bug.c
80589index 1686034..a9c00c8 100644
80590--- a/lib/bug.c
80591+++ b/lib/bug.c
80592@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
80593 return BUG_TRAP_TYPE_NONE;
80594
80595 bug = find_bug(bugaddr);
80596+ if (!bug)
80597+ return BUG_TRAP_TYPE_NONE;
80598
80599 file = NULL;
80600 line = 0;
80601diff --git a/lib/debugobjects.c b/lib/debugobjects.c
80602index 37061ed..da83f48 100644
80603--- a/lib/debugobjects.c
80604+++ b/lib/debugobjects.c
80605@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
80606 if (limit > 4)
80607 return;
80608
80609- is_on_stack = object_is_on_stack(addr);
80610+ is_on_stack = object_starts_on_stack(addr);
80611 if (is_on_stack == onstack)
80612 return;
80613
80614diff --git a/lib/devres.c b/lib/devres.c
80615index 8235331..5881053 100644
80616--- a/lib/devres.c
80617+++ b/lib/devres.c
80618@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
80619 void devm_iounmap(struct device *dev, void __iomem *addr)
80620 {
80621 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
80622- (void *)addr));
80623+ (void __force *)addr));
80624 iounmap(addr);
80625 }
80626 EXPORT_SYMBOL(devm_iounmap);
80627@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
80628 {
80629 ioport_unmap(addr);
80630 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
80631- devm_ioport_map_match, (void *)addr));
80632+ devm_ioport_map_match, (void __force *)addr));
80633 }
80634 EXPORT_SYMBOL(devm_ioport_unmap);
80635 #endif /* CONFIG_HAS_IOPORT */
80636diff --git a/lib/div64.c b/lib/div64.c
80637index a163b6c..9618fa5 100644
80638--- a/lib/div64.c
80639+++ b/lib/div64.c
80640@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
80641 EXPORT_SYMBOL(__div64_32);
80642
80643 #ifndef div_s64_rem
80644-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80645+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80646 {
80647 u64 quotient;
80648
80649@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
80650 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
80651 */
80652 #ifndef div64_u64
80653-u64 div64_u64(u64 dividend, u64 divisor)
80654+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
80655 {
80656 u32 high = divisor >> 32;
80657 u64 quot;
80658diff --git a/lib/dma-debug.c b/lib/dma-debug.c
80659index d87a17a..ac0d79a 100644
80660--- a/lib/dma-debug.c
80661+++ b/lib/dma-debug.c
80662@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
80663
80664 void dma_debug_add_bus(struct bus_type *bus)
80665 {
80666- struct notifier_block *nb;
80667+ notifier_block_no_const *nb;
80668
80669 if (global_disable)
80670 return;
80671@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
80672
80673 static void check_for_stack(struct device *dev, void *addr)
80674 {
80675- if (object_is_on_stack(addr))
80676+ if (object_starts_on_stack(addr))
80677 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
80678 "stack [addr=%p]\n", addr);
80679 }
80680diff --git a/lib/inflate.c b/lib/inflate.c
80681index 013a761..c28f3fc 100644
80682--- a/lib/inflate.c
80683+++ b/lib/inflate.c
80684@@ -269,7 +269,7 @@ static void free(void *where)
80685 malloc_ptr = free_mem_ptr;
80686 }
80687 #else
80688-#define malloc(a) kmalloc(a, GFP_KERNEL)
80689+#define malloc(a) kmalloc((a), GFP_KERNEL)
80690 #define free(a) kfree(a)
80691 #endif
80692
80693diff --git a/lib/ioremap.c b/lib/ioremap.c
80694index 0c9216c..863bd89 100644
80695--- a/lib/ioremap.c
80696+++ b/lib/ioremap.c
80697@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
80698 unsigned long next;
80699
80700 phys_addr -= addr;
80701- pmd = pmd_alloc(&init_mm, pud, addr);
80702+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
80703 if (!pmd)
80704 return -ENOMEM;
80705 do {
80706@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
80707 unsigned long next;
80708
80709 phys_addr -= addr;
80710- pud = pud_alloc(&init_mm, pgd, addr);
80711+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
80712 if (!pud)
80713 return -ENOMEM;
80714 do {
80715diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
80716index bd2bea9..6b3c95e 100644
80717--- a/lib/is_single_threaded.c
80718+++ b/lib/is_single_threaded.c
80719@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
80720 struct task_struct *p, *t;
80721 bool ret;
80722
80723+ if (!mm)
80724+ return true;
80725+
80726 if (atomic_read(&task->signal->live) != 1)
80727 return false;
80728
80729diff --git a/lib/kobject.c b/lib/kobject.c
80730index b7e29a6..2f3ca75 100644
80731--- a/lib/kobject.c
80732+++ b/lib/kobject.c
80733@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
80734 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
80735 if (!kset)
80736 return NULL;
80737- retval = kobject_set_name(&kset->kobj, name);
80738+ retval = kobject_set_name(&kset->kobj, "%s", name);
80739 if (retval) {
80740 kfree(kset);
80741 return NULL;
80742@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
80743
80744
80745 static DEFINE_SPINLOCK(kobj_ns_type_lock);
80746-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
80747+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
80748
80749-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80750+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80751 {
80752 enum kobj_ns_type type = ops->type;
80753 int error;
80754diff --git a/lib/list_debug.c b/lib/list_debug.c
80755index c24c2f7..06e070b 100644
80756--- a/lib/list_debug.c
80757+++ b/lib/list_debug.c
80758@@ -11,7 +11,9 @@
80759 #include <linux/bug.h>
80760 #include <linux/kernel.h>
80761 #include <linux/rculist.h>
80762+#include <linux/mm.h>
80763
80764+#ifdef CONFIG_DEBUG_LIST
80765 /*
80766 * Insert a new entry between two known consecutive entries.
80767 *
80768@@ -19,21 +21,32 @@
80769 * the prev/next entries already!
80770 */
80771
80772-void __list_add(struct list_head *new,
80773- struct list_head *prev,
80774- struct list_head *next)
80775+static bool __list_add_debug(struct list_head *new,
80776+ struct list_head *prev,
80777+ struct list_head *next)
80778 {
80779- WARN(next->prev != prev,
80780+ if (WARN(next->prev != prev,
80781 "list_add corruption. next->prev should be "
80782 "prev (%p), but was %p. (next=%p).\n",
80783- prev, next->prev, next);
80784- WARN(prev->next != next,
80785+ prev, next->prev, next) ||
80786+ WARN(prev->next != next,
80787 "list_add corruption. prev->next should be "
80788 "next (%p), but was %p. (prev=%p).\n",
80789- next, prev->next, prev);
80790- WARN(new == prev || new == next,
80791- "list_add double add: new=%p, prev=%p, next=%p.\n",
80792- new, prev, next);
80793+ next, prev->next, prev) ||
80794+ WARN(new == prev || new == next,
80795+ "list_add double add: new=%p, prev=%p, next=%p.\n",
80796+ new, prev, next))
80797+ return false;
80798+ return true;
80799+}
80800+
80801+void __list_add(struct list_head *new,
80802+ struct list_head *prev,
80803+ struct list_head *next)
80804+{
80805+ if (!__list_add_debug(new, prev, next))
80806+ return;
80807+
80808 next->prev = new;
80809 new->next = next;
80810 new->prev = prev;
80811@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
80812 }
80813 EXPORT_SYMBOL(__list_add);
80814
80815-void __list_del_entry(struct list_head *entry)
80816+static bool __list_del_entry_debug(struct list_head *entry)
80817 {
80818 struct list_head *prev, *next;
80819
80820@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
80821 WARN(next->prev != entry,
80822 "list_del corruption. next->prev should be %p, "
80823 "but was %p\n", entry, next->prev))
80824+ return false;
80825+ return true;
80826+}
80827+
80828+void __list_del_entry(struct list_head *entry)
80829+{
80830+ if (!__list_del_entry_debug(entry))
80831 return;
80832
80833- __list_del(prev, next);
80834+ __list_del(entry->prev, entry->next);
80835 }
80836 EXPORT_SYMBOL(__list_del_entry);
80837
80838@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
80839 void __list_add_rcu(struct list_head *new,
80840 struct list_head *prev, struct list_head *next)
80841 {
80842- WARN(next->prev != prev,
80843- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
80844- prev, next->prev, next);
80845- WARN(prev->next != next,
80846- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
80847- next, prev->next, prev);
80848+ if (!__list_add_debug(new, prev, next))
80849+ return;
80850+
80851 new->next = next;
80852 new->prev = prev;
80853 rcu_assign_pointer(list_next_rcu(prev), new);
80854 next->prev = new;
80855 }
80856 EXPORT_SYMBOL(__list_add_rcu);
80857+#endif
80858+
80859+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
80860+{
80861+#ifdef CONFIG_DEBUG_LIST
80862+ if (!__list_add_debug(new, prev, next))
80863+ return;
80864+#endif
80865+
80866+ pax_open_kernel();
80867+ next->prev = new;
80868+ new->next = next;
80869+ new->prev = prev;
80870+ prev->next = new;
80871+ pax_close_kernel();
80872+}
80873+EXPORT_SYMBOL(__pax_list_add);
80874+
80875+void pax_list_del(struct list_head *entry)
80876+{
80877+#ifdef CONFIG_DEBUG_LIST
80878+ if (!__list_del_entry_debug(entry))
80879+ return;
80880+#endif
80881+
80882+ pax_open_kernel();
80883+ __list_del(entry->prev, entry->next);
80884+ entry->next = LIST_POISON1;
80885+ entry->prev = LIST_POISON2;
80886+ pax_close_kernel();
80887+}
80888+EXPORT_SYMBOL(pax_list_del);
80889+
80890+void pax_list_del_init(struct list_head *entry)
80891+{
80892+ pax_open_kernel();
80893+ __list_del(entry->prev, entry->next);
80894+ INIT_LIST_HEAD(entry);
80895+ pax_close_kernel();
80896+}
80897+EXPORT_SYMBOL(pax_list_del_init);
80898+
80899+void __pax_list_add_rcu(struct list_head *new,
80900+ struct list_head *prev, struct list_head *next)
80901+{
80902+#ifdef CONFIG_DEBUG_LIST
80903+ if (!__list_add_debug(new, prev, next))
80904+ return;
80905+#endif
80906+
80907+ pax_open_kernel();
80908+ new->next = next;
80909+ new->prev = prev;
80910+ rcu_assign_pointer(list_next_rcu(prev), new);
80911+ next->prev = new;
80912+ pax_close_kernel();
80913+}
80914+EXPORT_SYMBOL(__pax_list_add_rcu);
80915+
80916+void pax_list_del_rcu(struct list_head *entry)
80917+{
80918+#ifdef CONFIG_DEBUG_LIST
80919+ if (!__list_del_entry_debug(entry))
80920+ return;
80921+#endif
80922+
80923+ pax_open_kernel();
80924+ __list_del(entry->prev, entry->next);
80925+ entry->next = LIST_POISON1;
80926+ entry->prev = LIST_POISON2;
80927+ pax_close_kernel();
80928+}
80929+EXPORT_SYMBOL(pax_list_del_rcu);
80930diff --git a/lib/radix-tree.c b/lib/radix-tree.c
80931index e796429..6e38f9f 100644
80932--- a/lib/radix-tree.c
80933+++ b/lib/radix-tree.c
80934@@ -92,7 +92,7 @@ struct radix_tree_preload {
80935 int nr;
80936 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
80937 };
80938-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
80939+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
80940
80941 static inline void *ptr_to_indirect(void *ptr)
80942 {
80943diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
80944index bb2b201..46abaf9 100644
80945--- a/lib/strncpy_from_user.c
80946+++ b/lib/strncpy_from_user.c
80947@@ -21,7 +21,7 @@
80948 */
80949 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
80950 {
80951- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80952+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80953 long res = 0;
80954
80955 /*
80956diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
80957index a28df52..3d55877 100644
80958--- a/lib/strnlen_user.c
80959+++ b/lib/strnlen_user.c
80960@@ -26,7 +26,7 @@
80961 */
80962 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
80963 {
80964- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80965+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
80966 long align, res = 0;
80967 unsigned long c;
80968
80969diff --git a/lib/swiotlb.c b/lib/swiotlb.c
80970index d23762e..e21eab2 100644
80971--- a/lib/swiotlb.c
80972+++ b/lib/swiotlb.c
80973@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
80974
80975 void
80976 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
80977- dma_addr_t dev_addr)
80978+ dma_addr_t dev_addr, struct dma_attrs *attrs)
80979 {
80980 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
80981
80982diff --git a/lib/usercopy.c b/lib/usercopy.c
80983index 4f5b1dd..7cab418 100644
80984--- a/lib/usercopy.c
80985+++ b/lib/usercopy.c
80986@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
80987 WARN(1, "Buffer overflow detected!\n");
80988 }
80989 EXPORT_SYMBOL(copy_from_user_overflow);
80990+
80991+void copy_to_user_overflow(void)
80992+{
80993+ WARN(1, "Buffer overflow detected!\n");
80994+}
80995+EXPORT_SYMBOL(copy_to_user_overflow);
80996diff --git a/lib/vsprintf.c b/lib/vsprintf.c
80997index e149c64..24aa71a 100644
80998--- a/lib/vsprintf.c
80999+++ b/lib/vsprintf.c
81000@@ -16,6 +16,9 @@
81001 * - scnprintf and vscnprintf
81002 */
81003
81004+#ifdef CONFIG_GRKERNSEC_HIDESYM
81005+#define __INCLUDED_BY_HIDESYM 1
81006+#endif
81007 #include <stdarg.h>
81008 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
81009 #include <linux/types.h>
81010@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
81011 return number(buf, end, *(const netdev_features_t *)addr, spec);
81012 }
81013
81014+#ifdef CONFIG_GRKERNSEC_HIDESYM
81015+int kptr_restrict __read_mostly = 2;
81016+#else
81017 int kptr_restrict __read_mostly;
81018+#endif
81019
81020 /*
81021 * Show a '%p' thing. A kernel extension is that the '%p' is followed
81022@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
81023 * - 'f' For simple symbolic function names without offset
81024 * - 'S' For symbolic direct pointers with offset
81025 * - 's' For symbolic direct pointers without offset
81026+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
81027 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
81028 * - 'B' For backtraced symbolic direct pointers with offset
81029 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
81030@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81031
81032 if (!ptr && *fmt != 'K') {
81033 /*
81034- * Print (null) with the same width as a pointer so it makes
81035+ * Print (nil) with the same width as a pointer so it makes
81036 * tabular output look nice.
81037 */
81038 if (spec.field_width == -1)
81039 spec.field_width = default_width;
81040- return string(buf, end, "(null)", spec);
81041+ return string(buf, end, "(nil)", spec);
81042 }
81043
81044 switch (*fmt) {
81045@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81046 /* Fallthrough */
81047 case 'S':
81048 case 's':
81049+#ifdef CONFIG_GRKERNSEC_HIDESYM
81050+ break;
81051+#else
81052+ return symbol_string(buf, end, ptr, spec, fmt);
81053+#endif
81054+ case 'A':
81055 case 'B':
81056 return symbol_string(buf, end, ptr, spec, fmt);
81057 case 'R':
81058@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81059 va_end(va);
81060 return buf;
81061 }
81062+ case 'P':
81063+ break;
81064 case 'K':
81065 /*
81066 * %pK cannot be used in IRQ context because its test
81067@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81068 return number(buf, end,
81069 (unsigned long long) *((phys_addr_t *)ptr), spec);
81070 }
81071+
81072+#ifdef CONFIG_GRKERNSEC_HIDESYM
81073+ /* 'P' = approved pointers to copy to userland,
81074+ as in the /proc/kallsyms case, as we make it display nothing
81075+ for non-root users, and the real contents for root users
81076+ Also ignore 'K' pointers, since we force their NULLing for non-root users
81077+ above
81078+ */
81079+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
81080+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
81081+ dump_stack();
81082+ ptr = NULL;
81083+ }
81084+#endif
81085+
81086 spec.flags |= SMALL;
81087 if (spec.field_width == -1) {
81088 spec.field_width = default_width;
81089@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
81090 typeof(type) value; \
81091 if (sizeof(type) == 8) { \
81092 args = PTR_ALIGN(args, sizeof(u32)); \
81093- *(u32 *)&value = *(u32 *)args; \
81094- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
81095+ *(u32 *)&value = *(const u32 *)args; \
81096+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
81097 } else { \
81098 args = PTR_ALIGN(args, sizeof(type)); \
81099- value = *(typeof(type) *)args; \
81100+ value = *(const typeof(type) *)args; \
81101 } \
81102 args += sizeof(type); \
81103 value; \
81104@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
81105 case FORMAT_TYPE_STR: {
81106 const char *str_arg = args;
81107 args += strlen(str_arg) + 1;
81108- str = string(str, end, (char *)str_arg, spec);
81109+ str = string(str, end, str_arg, spec);
81110 break;
81111 }
81112
81113diff --git a/localversion-grsec b/localversion-grsec
81114new file mode 100644
81115index 0000000..7cd6065
81116--- /dev/null
81117+++ b/localversion-grsec
81118@@ -0,0 +1 @@
81119+-grsec
81120diff --git a/mm/Kconfig b/mm/Kconfig
81121index e742d06..c56fdd8 100644
81122--- a/mm/Kconfig
81123+++ b/mm/Kconfig
81124@@ -317,10 +317,10 @@ config KSM
81125 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
81126
81127 config DEFAULT_MMAP_MIN_ADDR
81128- int "Low address space to protect from user allocation"
81129+ int "Low address space to protect from user allocation"
81130 depends on MMU
81131- default 4096
81132- help
81133+ default 65536
81134+ help
81135 This is the portion of low virtual memory which should be protected
81136 from userspace allocation. Keeping a user from writing to low pages
81137 can help reduce the impact of kernel NULL pointer bugs.
81138@@ -351,7 +351,7 @@ config MEMORY_FAILURE
81139
81140 config HWPOISON_INJECT
81141 tristate "HWPoison pages injector"
81142- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
81143+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
81144 select PROC_PAGE_MONITOR
81145
81146 config NOMMU_INITIAL_TRIM_EXCESS
81147diff --git a/mm/backing-dev.c b/mm/backing-dev.c
81148index 5025174..9fc1c5c 100644
81149--- a/mm/backing-dev.c
81150+++ b/mm/backing-dev.c
81151@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
81152 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
81153 unsigned int cap)
81154 {
81155- char tmp[32];
81156 int err;
81157
81158 bdi->name = name;
81159@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
81160 if (err)
81161 return err;
81162
81163- sprintf(tmp, "%.28s%s", name, "-%d");
81164- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
81165+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq));
81166 if (err) {
81167 bdi_destroy(bdi);
81168 return err;
81169diff --git a/mm/filemap.c b/mm/filemap.c
81170index 7905fe7..e60faa8 100644
81171--- a/mm/filemap.c
81172+++ b/mm/filemap.c
81173@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
81174 struct address_space *mapping = file->f_mapping;
81175
81176 if (!mapping->a_ops->readpage)
81177- return -ENOEXEC;
81178+ return -ENODEV;
81179 file_accessed(file);
81180 vma->vm_ops = &generic_file_vm_ops;
81181 return 0;
81182@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
81183 *pos = i_size_read(inode);
81184
81185 if (limit != RLIM_INFINITY) {
81186+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
81187 if (*pos >= limit) {
81188 send_sig(SIGXFSZ, current, 0);
81189 return -EFBIG;
81190diff --git a/mm/fremap.c b/mm/fremap.c
81191index 87da359..3f41cb1 100644
81192--- a/mm/fremap.c
81193+++ b/mm/fremap.c
81194@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
81195 retry:
81196 vma = find_vma(mm, start);
81197
81198+#ifdef CONFIG_PAX_SEGMEXEC
81199+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
81200+ goto out;
81201+#endif
81202+
81203 /*
81204 * Make sure the vma is shared, that it supports prefaulting,
81205 * and that the remapped range is valid and fully within
81206diff --git a/mm/highmem.c b/mm/highmem.c
81207index b32b70c..e512eb0 100644
81208--- a/mm/highmem.c
81209+++ b/mm/highmem.c
81210@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
81211 * So no dangers, even with speculative execution.
81212 */
81213 page = pte_page(pkmap_page_table[i]);
81214+ pax_open_kernel();
81215 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
81216-
81217+ pax_close_kernel();
81218 set_page_address(page, NULL);
81219 need_flush = 1;
81220 }
81221@@ -198,9 +199,11 @@ start:
81222 }
81223 }
81224 vaddr = PKMAP_ADDR(last_pkmap_nr);
81225+
81226+ pax_open_kernel();
81227 set_pte_at(&init_mm, vaddr,
81228 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
81229-
81230+ pax_close_kernel();
81231 pkmap_count[last_pkmap_nr] = 1;
81232 set_page_address(page, (void *)vaddr);
81233
81234diff --git a/mm/hugetlb.c b/mm/hugetlb.c
81235index 5cf99bf..28634c8 100644
81236--- a/mm/hugetlb.c
81237+++ b/mm/hugetlb.c
81238@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
81239 struct hstate *h = &default_hstate;
81240 unsigned long tmp;
81241 int ret;
81242+ ctl_table_no_const hugetlb_table;
81243
81244 tmp = h->max_huge_pages;
81245
81246 if (write && h->order >= MAX_ORDER)
81247 return -EINVAL;
81248
81249- table->data = &tmp;
81250- table->maxlen = sizeof(unsigned long);
81251- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81252+ hugetlb_table = *table;
81253+ hugetlb_table.data = &tmp;
81254+ hugetlb_table.maxlen = sizeof(unsigned long);
81255+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81256 if (ret)
81257 goto out;
81258
81259@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
81260 struct hstate *h = &default_hstate;
81261 unsigned long tmp;
81262 int ret;
81263+ ctl_table_no_const hugetlb_table;
81264
81265 tmp = h->nr_overcommit_huge_pages;
81266
81267 if (write && h->order >= MAX_ORDER)
81268 return -EINVAL;
81269
81270- table->data = &tmp;
81271- table->maxlen = sizeof(unsigned long);
81272- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81273+ hugetlb_table = *table;
81274+ hugetlb_table.data = &tmp;
81275+ hugetlb_table.maxlen = sizeof(unsigned long);
81276+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81277 if (ret)
81278 goto out;
81279
81280@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
81281 return 1;
81282 }
81283
81284+#ifdef CONFIG_PAX_SEGMEXEC
81285+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
81286+{
81287+ struct mm_struct *mm = vma->vm_mm;
81288+ struct vm_area_struct *vma_m;
81289+ unsigned long address_m;
81290+ pte_t *ptep_m;
81291+
81292+ vma_m = pax_find_mirror_vma(vma);
81293+ if (!vma_m)
81294+ return;
81295+
81296+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81297+ address_m = address + SEGMEXEC_TASK_SIZE;
81298+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
81299+ get_page(page_m);
81300+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
81301+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
81302+}
81303+#endif
81304+
81305 /*
81306 * Hugetlb_cow() should be called with page lock of the original hugepage held.
81307 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
81308@@ -2663,6 +2688,11 @@ retry_avoidcopy:
81309 make_huge_pte(vma, new_page, 1));
81310 page_remove_rmap(old_page);
81311 hugepage_add_new_anon_rmap(new_page, vma, address);
81312+
81313+#ifdef CONFIG_PAX_SEGMEXEC
81314+ pax_mirror_huge_pte(vma, address, new_page);
81315+#endif
81316+
81317 /* Make the old page be freed below */
81318 new_page = old_page;
81319 }
81320@@ -2821,6 +2851,10 @@ retry:
81321 && (vma->vm_flags & VM_SHARED)));
81322 set_huge_pte_at(mm, address, ptep, new_pte);
81323
81324+#ifdef CONFIG_PAX_SEGMEXEC
81325+ pax_mirror_huge_pte(vma, address, page);
81326+#endif
81327+
81328 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
81329 /* Optimization, do the COW without a second fault */
81330 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
81331@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81332 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
81333 struct hstate *h = hstate_vma(vma);
81334
81335+#ifdef CONFIG_PAX_SEGMEXEC
81336+ struct vm_area_struct *vma_m;
81337+#endif
81338+
81339 address &= huge_page_mask(h);
81340
81341 ptep = huge_pte_offset(mm, address);
81342@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81343 VM_FAULT_SET_HINDEX(hstate_index(h));
81344 }
81345
81346+#ifdef CONFIG_PAX_SEGMEXEC
81347+ vma_m = pax_find_mirror_vma(vma);
81348+ if (vma_m) {
81349+ unsigned long address_m;
81350+
81351+ if (vma->vm_start > vma_m->vm_start) {
81352+ address_m = address;
81353+ address -= SEGMEXEC_TASK_SIZE;
81354+ vma = vma_m;
81355+ h = hstate_vma(vma);
81356+ } else
81357+ address_m = address + SEGMEXEC_TASK_SIZE;
81358+
81359+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
81360+ return VM_FAULT_OOM;
81361+ address_m &= HPAGE_MASK;
81362+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
81363+ }
81364+#endif
81365+
81366 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
81367 if (!ptep)
81368 return VM_FAULT_OOM;
81369diff --git a/mm/internal.h b/mm/internal.h
81370index 8562de0..7fdfe92 100644
81371--- a/mm/internal.h
81372+++ b/mm/internal.h
81373@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
81374 * in mm/page_alloc.c
81375 */
81376 extern void __free_pages_bootmem(struct page *page, unsigned int order);
81377+extern void free_compound_page(struct page *page);
81378 extern void prep_compound_page(struct page *page, unsigned long order);
81379 #ifdef CONFIG_MEMORY_FAILURE
81380 extern bool is_free_buddy_page(struct page *page);
81381diff --git a/mm/kmemleak.c b/mm/kmemleak.c
81382index c8d7f31..2dbeffd 100644
81383--- a/mm/kmemleak.c
81384+++ b/mm/kmemleak.c
81385@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
81386
81387 for (i = 0; i < object->trace_len; i++) {
81388 void *ptr = (void *)object->trace[i];
81389- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
81390+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
81391 }
81392 }
81393
81394@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
81395 return -ENOMEM;
81396 }
81397
81398- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
81399+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
81400 &kmemleak_fops);
81401 if (!dentry)
81402 pr_warning("Failed to create the debugfs kmemleak file\n");
81403diff --git a/mm/maccess.c b/mm/maccess.c
81404index d53adf9..03a24bf 100644
81405--- a/mm/maccess.c
81406+++ b/mm/maccess.c
81407@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
81408 set_fs(KERNEL_DS);
81409 pagefault_disable();
81410 ret = __copy_from_user_inatomic(dst,
81411- (__force const void __user *)src, size);
81412+ (const void __force_user *)src, size);
81413 pagefault_enable();
81414 set_fs(old_fs);
81415
81416@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
81417
81418 set_fs(KERNEL_DS);
81419 pagefault_disable();
81420- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
81421+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
81422 pagefault_enable();
81423 set_fs(old_fs);
81424
81425diff --git a/mm/madvise.c b/mm/madvise.c
81426index 7055883..aafb1ed 100644
81427--- a/mm/madvise.c
81428+++ b/mm/madvise.c
81429@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
81430 pgoff_t pgoff;
81431 unsigned long new_flags = vma->vm_flags;
81432
81433+#ifdef CONFIG_PAX_SEGMEXEC
81434+ struct vm_area_struct *vma_m;
81435+#endif
81436+
81437 switch (behavior) {
81438 case MADV_NORMAL:
81439 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
81440@@ -126,6 +130,13 @@ success:
81441 /*
81442 * vm_flags is protected by the mmap_sem held in write mode.
81443 */
81444+
81445+#ifdef CONFIG_PAX_SEGMEXEC
81446+ vma_m = pax_find_mirror_vma(vma);
81447+ if (vma_m)
81448+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
81449+#endif
81450+
81451 vma->vm_flags = new_flags;
81452
81453 out:
81454@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81455 struct vm_area_struct ** prev,
81456 unsigned long start, unsigned long end)
81457 {
81458+
81459+#ifdef CONFIG_PAX_SEGMEXEC
81460+ struct vm_area_struct *vma_m;
81461+#endif
81462+
81463 *prev = vma;
81464 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
81465 return -EINVAL;
81466@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81467 zap_page_range(vma, start, end - start, &details);
81468 } else
81469 zap_page_range(vma, start, end - start, NULL);
81470+
81471+#ifdef CONFIG_PAX_SEGMEXEC
81472+ vma_m = pax_find_mirror_vma(vma);
81473+ if (vma_m) {
81474+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
81475+ struct zap_details details = {
81476+ .nonlinear_vma = vma_m,
81477+ .last_index = ULONG_MAX,
81478+ };
81479+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
81480+ } else
81481+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
81482+ }
81483+#endif
81484+
81485 return 0;
81486 }
81487
81488@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
81489 if (end < start)
81490 return error;
81491
81492+#ifdef CONFIG_PAX_SEGMEXEC
81493+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
81494+ if (end > SEGMEXEC_TASK_SIZE)
81495+ return error;
81496+ } else
81497+#endif
81498+
81499+ if (end > TASK_SIZE)
81500+ return error;
81501+
81502 error = 0;
81503 if (end == start)
81504 return error;
81505diff --git a/mm/memcontrol.c b/mm/memcontrol.c
81506index fd79df5..15b0409 100644
81507--- a/mm/memcontrol.c
81508+++ b/mm/memcontrol.c
81509@@ -6296,14 +6296,6 @@ mem_cgroup_css_online(struct cgroup *cont)
81510
81511 error = memcg_init_kmem(memcg, &mem_cgroup_subsys);
81512 mutex_unlock(&memcg_create_mutex);
81513- if (error) {
81514- /*
81515- * We call put now because our (and parent's) refcnts
81516- * are already in place. mem_cgroup_put() will internally
81517- * call __mem_cgroup_free, so return directly
81518- */
81519- mem_cgroup_put(memcg);
81520- }
81521 return error;
81522 }
81523
81524diff --git a/mm/memory-failure.c b/mm/memory-failure.c
81525index ceb0c7f..b2b8e94 100644
81526--- a/mm/memory-failure.c
81527+++ b/mm/memory-failure.c
81528@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
81529
81530 int sysctl_memory_failure_recovery __read_mostly = 1;
81531
81532-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
81533+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
81534
81535 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
81536
81537@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
81538 pfn, t->comm, t->pid);
81539 si.si_signo = SIGBUS;
81540 si.si_errno = 0;
81541- si.si_addr = (void *)addr;
81542+ si.si_addr = (void __user *)addr;
81543 #ifdef __ARCH_SI_TRAPNO
81544 si.si_trapno = trapno;
81545 #endif
81546@@ -760,7 +760,7 @@ static struct page_state {
81547 unsigned long res;
81548 char *msg;
81549 int (*action)(struct page *p, unsigned long pfn);
81550-} error_states[] = {
81551+} __do_const error_states[] = {
81552 { reserved, reserved, "reserved kernel", me_kernel },
81553 /*
81554 * free pages are specially detected outside this table:
81555@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81556 nr_pages = 1 << compound_order(hpage);
81557 else /* normal page or thp */
81558 nr_pages = 1;
81559- atomic_long_add(nr_pages, &num_poisoned_pages);
81560+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
81561
81562 /*
81563 * We need/can do nothing about count=0 pages.
81564@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81565 if (!PageHWPoison(hpage)
81566 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
81567 || (p != hpage && TestSetPageHWPoison(hpage))) {
81568- atomic_long_sub(nr_pages, &num_poisoned_pages);
81569+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81570 return 0;
81571 }
81572 set_page_hwpoison_huge_page(hpage);
81573@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81574 }
81575 if (hwpoison_filter(p)) {
81576 if (TestClearPageHWPoison(p))
81577- atomic_long_sub(nr_pages, &num_poisoned_pages);
81578+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81579 unlock_page(hpage);
81580 put_page(hpage);
81581 return 0;
81582@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
81583 return 0;
81584 }
81585 if (TestClearPageHWPoison(p))
81586- atomic_long_sub(nr_pages, &num_poisoned_pages);
81587+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81588 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
81589 return 0;
81590 }
81591@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
81592 */
81593 if (TestClearPageHWPoison(page)) {
81594 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
81595- atomic_long_sub(nr_pages, &num_poisoned_pages);
81596+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81597 freeit = 1;
81598 if (PageHuge(page))
81599 clear_page_hwpoison_huge_page(page);
81600@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
81601 } else {
81602 set_page_hwpoison_huge_page(hpage);
81603 dequeue_hwpoisoned_huge_page(hpage);
81604- atomic_long_add(1 << compound_trans_order(hpage),
81605+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81606 &num_poisoned_pages);
81607 }
81608 /* keep elevated page count for bad page */
81609@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
81610 if (PageHuge(page)) {
81611 set_page_hwpoison_huge_page(hpage);
81612 dequeue_hwpoisoned_huge_page(hpage);
81613- atomic_long_add(1 << compound_trans_order(hpage),
81614+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81615 &num_poisoned_pages);
81616 } else {
81617 SetPageHWPoison(page);
81618- atomic_long_inc(&num_poisoned_pages);
81619+ atomic_long_inc_unchecked(&num_poisoned_pages);
81620 }
81621 }
81622 /* keep elevated page count for bad page */
81623@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
81624 put_page(page);
81625 pr_info("soft_offline: %#lx: invalidated\n", pfn);
81626 SetPageHWPoison(page);
81627- atomic_long_inc(&num_poisoned_pages);
81628+ atomic_long_inc_unchecked(&num_poisoned_pages);
81629 return 0;
81630 }
81631
81632@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
81633 ret = -EIO;
81634 } else {
81635 SetPageHWPoison(page);
81636- atomic_long_inc(&num_poisoned_pages);
81637+ atomic_long_inc_unchecked(&num_poisoned_pages);
81638 }
81639 } else {
81640 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
81641diff --git a/mm/memory.c b/mm/memory.c
81642index 61a262b..77a94d1 100644
81643--- a/mm/memory.c
81644+++ b/mm/memory.c
81645@@ -429,6 +429,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81646 free_pte_range(tlb, pmd, addr);
81647 } while (pmd++, addr = next, addr != end);
81648
81649+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
81650 start &= PUD_MASK;
81651 if (start < floor)
81652 return;
81653@@ -443,6 +444,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81654 pmd = pmd_offset(pud, start);
81655 pud_clear(pud);
81656 pmd_free_tlb(tlb, pmd, start);
81657+#endif
81658+
81659 }
81660
81661 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81662@@ -462,6 +465,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81663 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
81664 } while (pud++, addr = next, addr != end);
81665
81666+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
81667 start &= PGDIR_MASK;
81668 if (start < floor)
81669 return;
81670@@ -476,6 +480,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81671 pud = pud_offset(pgd, start);
81672 pgd_clear(pgd);
81673 pud_free_tlb(tlb, pud, start);
81674+#endif
81675+
81676 }
81677
81678 /*
81679@@ -1635,12 +1641,6 @@ no_page_table:
81680 return page;
81681 }
81682
81683-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
81684-{
81685- return stack_guard_page_start(vma, addr) ||
81686- stack_guard_page_end(vma, addr+PAGE_SIZE);
81687-}
81688-
81689 /**
81690 * __get_user_pages() - pin user pages in memory
81691 * @tsk: task_struct of target task
81692@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81693
81694 i = 0;
81695
81696- do {
81697+ while (nr_pages) {
81698 struct vm_area_struct *vma;
81699
81700- vma = find_extend_vma(mm, start);
81701+ vma = find_vma(mm, start);
81702 if (!vma && in_gate_area(mm, start)) {
81703 unsigned long pg = start & PAGE_MASK;
81704 pgd_t *pgd;
81705@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81706 goto next_page;
81707 }
81708
81709- if (!vma ||
81710+ if (!vma || start < vma->vm_start ||
81711 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
81712 !(vm_flags & vma->vm_flags))
81713 return i ? : -EFAULT;
81714@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81715 int ret;
81716 unsigned int fault_flags = 0;
81717
81718- /* For mlock, just skip the stack guard page. */
81719- if (foll_flags & FOLL_MLOCK) {
81720- if (stack_guard_page(vma, start))
81721- goto next_page;
81722- }
81723 if (foll_flags & FOLL_WRITE)
81724 fault_flags |= FAULT_FLAG_WRITE;
81725 if (nonblocking)
81726@@ -1892,7 +1887,7 @@ next_page:
81727 start += page_increm * PAGE_SIZE;
81728 nr_pages -= page_increm;
81729 } while (nr_pages && start < vma->vm_end);
81730- } while (nr_pages);
81731+ }
81732 return i;
81733 }
81734 EXPORT_SYMBOL(__get_user_pages);
81735@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
81736 page_add_file_rmap(page);
81737 set_pte_at(mm, addr, pte, mk_pte(page, prot));
81738
81739+#ifdef CONFIG_PAX_SEGMEXEC
81740+ pax_mirror_file_pte(vma, addr, page, ptl);
81741+#endif
81742+
81743 retval = 0;
81744 pte_unmap_unlock(pte, ptl);
81745 return retval;
81746@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
81747 if (!page_count(page))
81748 return -EINVAL;
81749 if (!(vma->vm_flags & VM_MIXEDMAP)) {
81750+
81751+#ifdef CONFIG_PAX_SEGMEXEC
81752+ struct vm_area_struct *vma_m;
81753+#endif
81754+
81755 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
81756 BUG_ON(vma->vm_flags & VM_PFNMAP);
81757 vma->vm_flags |= VM_MIXEDMAP;
81758+
81759+#ifdef CONFIG_PAX_SEGMEXEC
81760+ vma_m = pax_find_mirror_vma(vma);
81761+ if (vma_m)
81762+ vma_m->vm_flags |= VM_MIXEDMAP;
81763+#endif
81764+
81765 }
81766 return insert_page(vma, addr, page, vma->vm_page_prot);
81767 }
81768@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
81769 unsigned long pfn)
81770 {
81771 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
81772+ BUG_ON(vma->vm_mirror);
81773
81774 if (addr < vma->vm_start || addr >= vma->vm_end)
81775 return -EFAULT;
81776@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
81777
81778 BUG_ON(pud_huge(*pud));
81779
81780- pmd = pmd_alloc(mm, pud, addr);
81781+ pmd = (mm == &init_mm) ?
81782+ pmd_alloc_kernel(mm, pud, addr) :
81783+ pmd_alloc(mm, pud, addr);
81784 if (!pmd)
81785 return -ENOMEM;
81786 do {
81787@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
81788 unsigned long next;
81789 int err;
81790
81791- pud = pud_alloc(mm, pgd, addr);
81792+ pud = (mm == &init_mm) ?
81793+ pud_alloc_kernel(mm, pgd, addr) :
81794+ pud_alloc(mm, pgd, addr);
81795 if (!pud)
81796 return -ENOMEM;
81797 do {
81798@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
81799 copy_user_highpage(dst, src, va, vma);
81800 }
81801
81802+#ifdef CONFIG_PAX_SEGMEXEC
81803+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
81804+{
81805+ struct mm_struct *mm = vma->vm_mm;
81806+ spinlock_t *ptl;
81807+ pte_t *pte, entry;
81808+
81809+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
81810+ entry = *pte;
81811+ if (!pte_present(entry)) {
81812+ if (!pte_none(entry)) {
81813+ BUG_ON(pte_file(entry));
81814+ free_swap_and_cache(pte_to_swp_entry(entry));
81815+ pte_clear_not_present_full(mm, address, pte, 0);
81816+ }
81817+ } else {
81818+ struct page *page;
81819+
81820+ flush_cache_page(vma, address, pte_pfn(entry));
81821+ entry = ptep_clear_flush(vma, address, pte);
81822+ BUG_ON(pte_dirty(entry));
81823+ page = vm_normal_page(vma, address, entry);
81824+ if (page) {
81825+ update_hiwater_rss(mm);
81826+ if (PageAnon(page))
81827+ dec_mm_counter_fast(mm, MM_ANONPAGES);
81828+ else
81829+ dec_mm_counter_fast(mm, MM_FILEPAGES);
81830+ page_remove_rmap(page);
81831+ page_cache_release(page);
81832+ }
81833+ }
81834+ pte_unmap_unlock(pte, ptl);
81835+}
81836+
81837+/* PaX: if vma is mirrored, synchronize the mirror's PTE
81838+ *
81839+ * the ptl of the lower mapped page is held on entry and is not released on exit
81840+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
81841+ */
81842+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81843+{
81844+ struct mm_struct *mm = vma->vm_mm;
81845+ unsigned long address_m;
81846+ spinlock_t *ptl_m;
81847+ struct vm_area_struct *vma_m;
81848+ pmd_t *pmd_m;
81849+ pte_t *pte_m, entry_m;
81850+
81851+ BUG_ON(!page_m || !PageAnon(page_m));
81852+
81853+ vma_m = pax_find_mirror_vma(vma);
81854+ if (!vma_m)
81855+ return;
81856+
81857+ BUG_ON(!PageLocked(page_m));
81858+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81859+ address_m = address + SEGMEXEC_TASK_SIZE;
81860+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81861+ pte_m = pte_offset_map(pmd_m, address_m);
81862+ ptl_m = pte_lockptr(mm, pmd_m);
81863+ if (ptl != ptl_m) {
81864+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81865+ if (!pte_none(*pte_m))
81866+ goto out;
81867+ }
81868+
81869+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81870+ page_cache_get(page_m);
81871+ page_add_anon_rmap(page_m, vma_m, address_m);
81872+ inc_mm_counter_fast(mm, MM_ANONPAGES);
81873+ set_pte_at(mm, address_m, pte_m, entry_m);
81874+ update_mmu_cache(vma_m, address_m, pte_m);
81875+out:
81876+ if (ptl != ptl_m)
81877+ spin_unlock(ptl_m);
81878+ pte_unmap(pte_m);
81879+ unlock_page(page_m);
81880+}
81881+
81882+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81883+{
81884+ struct mm_struct *mm = vma->vm_mm;
81885+ unsigned long address_m;
81886+ spinlock_t *ptl_m;
81887+ struct vm_area_struct *vma_m;
81888+ pmd_t *pmd_m;
81889+ pte_t *pte_m, entry_m;
81890+
81891+ BUG_ON(!page_m || PageAnon(page_m));
81892+
81893+ vma_m = pax_find_mirror_vma(vma);
81894+ if (!vma_m)
81895+ return;
81896+
81897+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81898+ address_m = address + SEGMEXEC_TASK_SIZE;
81899+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81900+ pte_m = pte_offset_map(pmd_m, address_m);
81901+ ptl_m = pte_lockptr(mm, pmd_m);
81902+ if (ptl != ptl_m) {
81903+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81904+ if (!pte_none(*pte_m))
81905+ goto out;
81906+ }
81907+
81908+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81909+ page_cache_get(page_m);
81910+ page_add_file_rmap(page_m);
81911+ inc_mm_counter_fast(mm, MM_FILEPAGES);
81912+ set_pte_at(mm, address_m, pte_m, entry_m);
81913+ update_mmu_cache(vma_m, address_m, pte_m);
81914+out:
81915+ if (ptl != ptl_m)
81916+ spin_unlock(ptl_m);
81917+ pte_unmap(pte_m);
81918+}
81919+
81920+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
81921+{
81922+ struct mm_struct *mm = vma->vm_mm;
81923+ unsigned long address_m;
81924+ spinlock_t *ptl_m;
81925+ struct vm_area_struct *vma_m;
81926+ pmd_t *pmd_m;
81927+ pte_t *pte_m, entry_m;
81928+
81929+ vma_m = pax_find_mirror_vma(vma);
81930+ if (!vma_m)
81931+ return;
81932+
81933+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81934+ address_m = address + SEGMEXEC_TASK_SIZE;
81935+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81936+ pte_m = pte_offset_map(pmd_m, address_m);
81937+ ptl_m = pte_lockptr(mm, pmd_m);
81938+ if (ptl != ptl_m) {
81939+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81940+ if (!pte_none(*pte_m))
81941+ goto out;
81942+ }
81943+
81944+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
81945+ set_pte_at(mm, address_m, pte_m, entry_m);
81946+out:
81947+ if (ptl != ptl_m)
81948+ spin_unlock(ptl_m);
81949+ pte_unmap(pte_m);
81950+}
81951+
81952+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
81953+{
81954+ struct page *page_m;
81955+ pte_t entry;
81956+
81957+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
81958+ goto out;
81959+
81960+ entry = *pte;
81961+ page_m = vm_normal_page(vma, address, entry);
81962+ if (!page_m)
81963+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
81964+ else if (PageAnon(page_m)) {
81965+ if (pax_find_mirror_vma(vma)) {
81966+ pte_unmap_unlock(pte, ptl);
81967+ lock_page(page_m);
81968+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
81969+ if (pte_same(entry, *pte))
81970+ pax_mirror_anon_pte(vma, address, page_m, ptl);
81971+ else
81972+ unlock_page(page_m);
81973+ }
81974+ } else
81975+ pax_mirror_file_pte(vma, address, page_m, ptl);
81976+
81977+out:
81978+ pte_unmap_unlock(pte, ptl);
81979+}
81980+#endif
81981+
81982 /*
81983 * This routine handles present pages, when users try to write
81984 * to a shared page. It is done by copying the page to a new address
81985@@ -2799,6 +2995,12 @@ gotten:
81986 */
81987 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
81988 if (likely(pte_same(*page_table, orig_pte))) {
81989+
81990+#ifdef CONFIG_PAX_SEGMEXEC
81991+ if (pax_find_mirror_vma(vma))
81992+ BUG_ON(!trylock_page(new_page));
81993+#endif
81994+
81995 if (old_page) {
81996 if (!PageAnon(old_page)) {
81997 dec_mm_counter_fast(mm, MM_FILEPAGES);
81998@@ -2850,6 +3052,10 @@ gotten:
81999 page_remove_rmap(old_page);
82000 }
82001
82002+#ifdef CONFIG_PAX_SEGMEXEC
82003+ pax_mirror_anon_pte(vma, address, new_page, ptl);
82004+#endif
82005+
82006 /* Free the old page.. */
82007 new_page = old_page;
82008 ret |= VM_FAULT_WRITE;
82009@@ -3125,6 +3331,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
82010 swap_free(entry);
82011 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
82012 try_to_free_swap(page);
82013+
82014+#ifdef CONFIG_PAX_SEGMEXEC
82015+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
82016+#endif
82017+
82018 unlock_page(page);
82019 if (page != swapcache) {
82020 /*
82021@@ -3148,6 +3359,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
82022
82023 /* No need to invalidate - it was non-present before */
82024 update_mmu_cache(vma, address, page_table);
82025+
82026+#ifdef CONFIG_PAX_SEGMEXEC
82027+ pax_mirror_anon_pte(vma, address, page, ptl);
82028+#endif
82029+
82030 unlock:
82031 pte_unmap_unlock(page_table, ptl);
82032 out:
82033@@ -3167,40 +3383,6 @@ out_release:
82034 }
82035
82036 /*
82037- * This is like a special single-page "expand_{down|up}wards()",
82038- * except we must first make sure that 'address{-|+}PAGE_SIZE'
82039- * doesn't hit another vma.
82040- */
82041-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
82042-{
82043- address &= PAGE_MASK;
82044- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
82045- struct vm_area_struct *prev = vma->vm_prev;
82046-
82047- /*
82048- * Is there a mapping abutting this one below?
82049- *
82050- * That's only ok if it's the same stack mapping
82051- * that has gotten split..
82052- */
82053- if (prev && prev->vm_end == address)
82054- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
82055-
82056- expand_downwards(vma, address - PAGE_SIZE);
82057- }
82058- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
82059- struct vm_area_struct *next = vma->vm_next;
82060-
82061- /* As VM_GROWSDOWN but s/below/above/ */
82062- if (next && next->vm_start == address + PAGE_SIZE)
82063- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
82064-
82065- expand_upwards(vma, address + PAGE_SIZE);
82066- }
82067- return 0;
82068-}
82069-
82070-/*
82071 * We enter with non-exclusive mmap_sem (to exclude vma changes,
82072 * but allow concurrent faults), and pte mapped but not yet locked.
82073 * We return with mmap_sem still held, but pte unmapped and unlocked.
82074@@ -3209,27 +3391,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
82075 unsigned long address, pte_t *page_table, pmd_t *pmd,
82076 unsigned int flags)
82077 {
82078- struct page *page;
82079+ struct page *page = NULL;
82080 spinlock_t *ptl;
82081 pte_t entry;
82082
82083- pte_unmap(page_table);
82084-
82085- /* Check if we need to add a guard page to the stack */
82086- if (check_stack_guard_page(vma, address) < 0)
82087- return VM_FAULT_SIGBUS;
82088-
82089- /* Use the zero-page for reads */
82090 if (!(flags & FAULT_FLAG_WRITE)) {
82091 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
82092 vma->vm_page_prot));
82093- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
82094+ ptl = pte_lockptr(mm, pmd);
82095+ spin_lock(ptl);
82096 if (!pte_none(*page_table))
82097 goto unlock;
82098 goto setpte;
82099 }
82100
82101 /* Allocate our own private page. */
82102+ pte_unmap(page_table);
82103+
82104 if (unlikely(anon_vma_prepare(vma)))
82105 goto oom;
82106 page = alloc_zeroed_user_highpage_movable(vma, address);
82107@@ -3253,6 +3431,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
82108 if (!pte_none(*page_table))
82109 goto release;
82110
82111+#ifdef CONFIG_PAX_SEGMEXEC
82112+ if (pax_find_mirror_vma(vma))
82113+ BUG_ON(!trylock_page(page));
82114+#endif
82115+
82116 inc_mm_counter_fast(mm, MM_ANONPAGES);
82117 page_add_new_anon_rmap(page, vma, address);
82118 setpte:
82119@@ -3260,6 +3443,12 @@ setpte:
82120
82121 /* No need to invalidate - it was non-present before */
82122 update_mmu_cache(vma, address, page_table);
82123+
82124+#ifdef CONFIG_PAX_SEGMEXEC
82125+ if (page)
82126+ pax_mirror_anon_pte(vma, address, page, ptl);
82127+#endif
82128+
82129 unlock:
82130 pte_unmap_unlock(page_table, ptl);
82131 return 0;
82132@@ -3403,6 +3592,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82133 */
82134 /* Only go through if we didn't race with anybody else... */
82135 if (likely(pte_same(*page_table, orig_pte))) {
82136+
82137+#ifdef CONFIG_PAX_SEGMEXEC
82138+ if (anon && pax_find_mirror_vma(vma))
82139+ BUG_ON(!trylock_page(page));
82140+#endif
82141+
82142 flush_icache_page(vma, page);
82143 entry = mk_pte(page, vma->vm_page_prot);
82144 if (flags & FAULT_FLAG_WRITE)
82145@@ -3422,6 +3617,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82146
82147 /* no need to invalidate: a not-present page won't be cached */
82148 update_mmu_cache(vma, address, page_table);
82149+
82150+#ifdef CONFIG_PAX_SEGMEXEC
82151+ if (anon)
82152+ pax_mirror_anon_pte(vma, address, page, ptl);
82153+ else
82154+ pax_mirror_file_pte(vma, address, page, ptl);
82155+#endif
82156+
82157 } else {
82158 if (cow_page)
82159 mem_cgroup_uncharge_page(cow_page);
82160@@ -3743,6 +3946,12 @@ int handle_pte_fault(struct mm_struct *mm,
82161 if (flags & FAULT_FLAG_WRITE)
82162 flush_tlb_fix_spurious_fault(vma, address);
82163 }
82164+
82165+#ifdef CONFIG_PAX_SEGMEXEC
82166+ pax_mirror_pte(vma, address, pte, pmd, ptl);
82167+ return 0;
82168+#endif
82169+
82170 unlock:
82171 pte_unmap_unlock(pte, ptl);
82172 return 0;
82173@@ -3759,6 +3968,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82174 pmd_t *pmd;
82175 pte_t *pte;
82176
82177+#ifdef CONFIG_PAX_SEGMEXEC
82178+ struct vm_area_struct *vma_m;
82179+#endif
82180+
82181 __set_current_state(TASK_RUNNING);
82182
82183 count_vm_event(PGFAULT);
82184@@ -3770,6 +3983,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82185 if (unlikely(is_vm_hugetlb_page(vma)))
82186 return hugetlb_fault(mm, vma, address, flags);
82187
82188+#ifdef CONFIG_PAX_SEGMEXEC
82189+ vma_m = pax_find_mirror_vma(vma);
82190+ if (vma_m) {
82191+ unsigned long address_m;
82192+ pgd_t *pgd_m;
82193+ pud_t *pud_m;
82194+ pmd_t *pmd_m;
82195+
82196+ if (vma->vm_start > vma_m->vm_start) {
82197+ address_m = address;
82198+ address -= SEGMEXEC_TASK_SIZE;
82199+ vma = vma_m;
82200+ } else
82201+ address_m = address + SEGMEXEC_TASK_SIZE;
82202+
82203+ pgd_m = pgd_offset(mm, address_m);
82204+ pud_m = pud_alloc(mm, pgd_m, address_m);
82205+ if (!pud_m)
82206+ return VM_FAULT_OOM;
82207+ pmd_m = pmd_alloc(mm, pud_m, address_m);
82208+ if (!pmd_m)
82209+ return VM_FAULT_OOM;
82210+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
82211+ return VM_FAULT_OOM;
82212+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
82213+ }
82214+#endif
82215+
82216 retry:
82217 pgd = pgd_offset(mm, address);
82218 pud = pud_alloc(mm, pgd, address);
82219@@ -3868,6 +4109,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
82220 spin_unlock(&mm->page_table_lock);
82221 return 0;
82222 }
82223+
82224+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
82225+{
82226+ pud_t *new = pud_alloc_one(mm, address);
82227+ if (!new)
82228+ return -ENOMEM;
82229+
82230+ smp_wmb(); /* See comment in __pte_alloc */
82231+
82232+ spin_lock(&mm->page_table_lock);
82233+ if (pgd_present(*pgd)) /* Another has populated it */
82234+ pud_free(mm, new);
82235+ else
82236+ pgd_populate_kernel(mm, pgd, new);
82237+ spin_unlock(&mm->page_table_lock);
82238+ return 0;
82239+}
82240 #endif /* __PAGETABLE_PUD_FOLDED */
82241
82242 #ifndef __PAGETABLE_PMD_FOLDED
82243@@ -3898,6 +4156,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
82244 spin_unlock(&mm->page_table_lock);
82245 return 0;
82246 }
82247+
82248+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
82249+{
82250+ pmd_t *new = pmd_alloc_one(mm, address);
82251+ if (!new)
82252+ return -ENOMEM;
82253+
82254+ smp_wmb(); /* See comment in __pte_alloc */
82255+
82256+ spin_lock(&mm->page_table_lock);
82257+#ifndef __ARCH_HAS_4LEVEL_HACK
82258+ if (pud_present(*pud)) /* Another has populated it */
82259+ pmd_free(mm, new);
82260+ else
82261+ pud_populate_kernel(mm, pud, new);
82262+#else
82263+ if (pgd_present(*pud)) /* Another has populated it */
82264+ pmd_free(mm, new);
82265+ else
82266+ pgd_populate_kernel(mm, pud, new);
82267+#endif /* __ARCH_HAS_4LEVEL_HACK */
82268+ spin_unlock(&mm->page_table_lock);
82269+ return 0;
82270+}
82271 #endif /* __PAGETABLE_PMD_FOLDED */
82272
82273 #if !defined(__HAVE_ARCH_GATE_AREA)
82274@@ -3911,7 +4193,7 @@ static int __init gate_vma_init(void)
82275 gate_vma.vm_start = FIXADDR_USER_START;
82276 gate_vma.vm_end = FIXADDR_USER_END;
82277 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
82278- gate_vma.vm_page_prot = __P101;
82279+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
82280
82281 return 0;
82282 }
82283@@ -4045,8 +4327,8 @@ out:
82284 return ret;
82285 }
82286
82287-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82288- void *buf, int len, int write)
82289+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82290+ void *buf, size_t len, int write)
82291 {
82292 resource_size_t phys_addr;
82293 unsigned long prot = 0;
82294@@ -4071,8 +4353,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82295 * Access another process' address space as given in mm. If non-NULL, use the
82296 * given task for page fault accounting.
82297 */
82298-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82299- unsigned long addr, void *buf, int len, int write)
82300+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82301+ unsigned long addr, void *buf, size_t len, int write)
82302 {
82303 struct vm_area_struct *vma;
82304 void *old_buf = buf;
82305@@ -4080,7 +4362,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82306 down_read(&mm->mmap_sem);
82307 /* ignore errors, just check how much was successfully transferred */
82308 while (len) {
82309- int bytes, ret, offset;
82310+ ssize_t bytes, ret, offset;
82311 void *maddr;
82312 struct page *page = NULL;
82313
82314@@ -4139,8 +4421,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82315 *
82316 * The caller must hold a reference on @mm.
82317 */
82318-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82319- void *buf, int len, int write)
82320+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
82321+ void *buf, size_t len, int write)
82322 {
82323 return __access_remote_vm(NULL, mm, addr, buf, len, write);
82324 }
82325@@ -4150,11 +4432,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82326 * Source/target buffer must be kernel space,
82327 * Do not walk the page table directly, use get_user_pages
82328 */
82329-int access_process_vm(struct task_struct *tsk, unsigned long addr,
82330- void *buf, int len, int write)
82331+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
82332+ void *buf, size_t len, int write)
82333 {
82334 struct mm_struct *mm;
82335- int ret;
82336+ ssize_t ret;
82337
82338 mm = get_task_mm(tsk);
82339 if (!mm)
82340diff --git a/mm/mempolicy.c b/mm/mempolicy.c
82341index 7431001..0f8344e 100644
82342--- a/mm/mempolicy.c
82343+++ b/mm/mempolicy.c
82344@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82345 unsigned long vmstart;
82346 unsigned long vmend;
82347
82348+#ifdef CONFIG_PAX_SEGMEXEC
82349+ struct vm_area_struct *vma_m;
82350+#endif
82351+
82352 vma = find_vma(mm, start);
82353 if (!vma || vma->vm_start > start)
82354 return -EFAULT;
82355@@ -744,9 +748,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82356 if (err)
82357 goto out;
82358 }
82359+
82360 err = vma_replace_policy(vma, new_pol);
82361 if (err)
82362 goto out;
82363+
82364+#ifdef CONFIG_PAX_SEGMEXEC
82365+ vma_m = pax_find_mirror_vma(vma);
82366+ if (vma_m) {
82367+ err = vma_replace_policy(vma_m, new_pol);
82368+ if (err)
82369+ goto out;
82370+ }
82371+#endif
82372+
82373 }
82374
82375 out:
82376@@ -1202,6 +1217,17 @@ static long do_mbind(unsigned long start, unsigned long len,
82377
82378 if (end < start)
82379 return -EINVAL;
82380+
82381+#ifdef CONFIG_PAX_SEGMEXEC
82382+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82383+ if (end > SEGMEXEC_TASK_SIZE)
82384+ return -EINVAL;
82385+ } else
82386+#endif
82387+
82388+ if (end > TASK_SIZE)
82389+ return -EINVAL;
82390+
82391 if (end == start)
82392 return 0;
82393
82394@@ -1430,8 +1456,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82395 */
82396 tcred = __task_cred(task);
82397 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82398- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82399- !capable(CAP_SYS_NICE)) {
82400+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82401 rcu_read_unlock();
82402 err = -EPERM;
82403 goto out_put;
82404@@ -1462,6 +1487,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82405 goto out;
82406 }
82407
82408+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
82409+ if (mm != current->mm &&
82410+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
82411+ mmput(mm);
82412+ err = -EPERM;
82413+ goto out;
82414+ }
82415+#endif
82416+
82417 err = do_migrate_pages(mm, old, new,
82418 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
82419
82420diff --git a/mm/migrate.c b/mm/migrate.c
82421index 6f0c244..6d1ae32 100644
82422--- a/mm/migrate.c
82423+++ b/mm/migrate.c
82424@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
82425 */
82426 tcred = __task_cred(task);
82427 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82428- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82429- !capable(CAP_SYS_NICE)) {
82430+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82431 rcu_read_unlock();
82432 err = -EPERM;
82433 goto out;
82434diff --git a/mm/mlock.c b/mm/mlock.c
82435index 79b7cf7..9944291 100644
82436--- a/mm/mlock.c
82437+++ b/mm/mlock.c
82438@@ -13,6 +13,7 @@
82439 #include <linux/pagemap.h>
82440 #include <linux/mempolicy.h>
82441 #include <linux/syscalls.h>
82442+#include <linux/security.h>
82443 #include <linux/sched.h>
82444 #include <linux/export.h>
82445 #include <linux/rmap.h>
82446@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
82447 {
82448 unsigned long nstart, end, tmp;
82449 struct vm_area_struct * vma, * prev;
82450- int error;
82451+ int error = 0;
82452
82453 VM_BUG_ON(start & ~PAGE_MASK);
82454 VM_BUG_ON(len != PAGE_ALIGN(len));
82455@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
82456 return -EINVAL;
82457 if (end == start)
82458 return 0;
82459+ if (end > TASK_SIZE)
82460+ return -EINVAL;
82461+
82462 vma = find_vma(current->mm, start);
82463 if (!vma || vma->vm_start > start)
82464 return -ENOMEM;
82465@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
82466 for (nstart = start ; ; ) {
82467 vm_flags_t newflags;
82468
82469+#ifdef CONFIG_PAX_SEGMEXEC
82470+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82471+ break;
82472+#endif
82473+
82474 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
82475
82476 newflags = vma->vm_flags & ~VM_LOCKED;
82477@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
82478 lock_limit >>= PAGE_SHIFT;
82479
82480 /* check against resource limits */
82481+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
82482 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
82483 error = do_mlock(start, len, 1);
82484 up_write(&current->mm->mmap_sem);
82485@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
82486 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
82487 vm_flags_t newflags;
82488
82489+#ifdef CONFIG_PAX_SEGMEXEC
82490+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82491+ break;
82492+#endif
82493+
82494 newflags = vma->vm_flags & ~VM_LOCKED;
82495 if (flags & MCL_CURRENT)
82496 newflags |= VM_LOCKED;
82497@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
82498 lock_limit >>= PAGE_SHIFT;
82499
82500 ret = -ENOMEM;
82501+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
82502 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
82503 capable(CAP_IPC_LOCK))
82504 ret = do_mlockall(flags);
82505diff --git a/mm/mmap.c b/mm/mmap.c
82506index f681e18..4c2577f 100644
82507--- a/mm/mmap.c
82508+++ b/mm/mmap.c
82509@@ -36,6 +36,7 @@
82510 #include <linux/sched/sysctl.h>
82511 #include <linux/notifier.h>
82512 #include <linux/memory.h>
82513+#include <linux/random.h>
82514
82515 #include <asm/uaccess.h>
82516 #include <asm/cacheflush.h>
82517@@ -52,6 +53,16 @@
82518 #define arch_rebalance_pgtables(addr, len) (addr)
82519 #endif
82520
82521+static inline void verify_mm_writelocked(struct mm_struct *mm)
82522+{
82523+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
82524+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82525+ up_read(&mm->mmap_sem);
82526+ BUG();
82527+ }
82528+#endif
82529+}
82530+
82531 static void unmap_region(struct mm_struct *mm,
82532 struct vm_area_struct *vma, struct vm_area_struct *prev,
82533 unsigned long start, unsigned long end);
82534@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
82535 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
82536 *
82537 */
82538-pgprot_t protection_map[16] = {
82539+pgprot_t protection_map[16] __read_only = {
82540 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
82541 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
82542 };
82543
82544-pgprot_t vm_get_page_prot(unsigned long vm_flags)
82545+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
82546 {
82547- return __pgprot(pgprot_val(protection_map[vm_flags &
82548+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
82549 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
82550 pgprot_val(arch_vm_get_page_prot(vm_flags)));
82551+
82552+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82553+ if (!(__supported_pte_mask & _PAGE_NX) &&
82554+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
82555+ (vm_flags & (VM_READ | VM_WRITE)))
82556+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
82557+#endif
82558+
82559+ return prot;
82560 }
82561 EXPORT_SYMBOL(vm_get_page_prot);
82562
82563@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
82564 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
82565 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
82566 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
82567+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
82568 /*
82569 * Make sure vm_committed_as in one cacheline and not cacheline shared with
82570 * other variables. It can be updated by several CPUs frequently.
82571@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
82572 struct vm_area_struct *next = vma->vm_next;
82573
82574 might_sleep();
82575+ BUG_ON(vma->vm_mirror);
82576 if (vma->vm_ops && vma->vm_ops->close)
82577 vma->vm_ops->close(vma);
82578 if (vma->vm_file)
82579@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
82580 * not page aligned -Ram Gupta
82581 */
82582 rlim = rlimit(RLIMIT_DATA);
82583+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
82584 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
82585 (mm->end_data - mm->start_data) > rlim)
82586 goto out;
82587@@ -933,6 +956,12 @@ static int
82588 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
82589 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82590 {
82591+
82592+#ifdef CONFIG_PAX_SEGMEXEC
82593+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
82594+ return 0;
82595+#endif
82596+
82597 if (is_mergeable_vma(vma, file, vm_flags) &&
82598 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82599 if (vma->vm_pgoff == vm_pgoff)
82600@@ -952,6 +981,12 @@ static int
82601 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82602 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82603 {
82604+
82605+#ifdef CONFIG_PAX_SEGMEXEC
82606+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
82607+ return 0;
82608+#endif
82609+
82610 if (is_mergeable_vma(vma, file, vm_flags) &&
82611 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82612 pgoff_t vm_pglen;
82613@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82614 struct vm_area_struct *vma_merge(struct mm_struct *mm,
82615 struct vm_area_struct *prev, unsigned long addr,
82616 unsigned long end, unsigned long vm_flags,
82617- struct anon_vma *anon_vma, struct file *file,
82618+ struct anon_vma *anon_vma, struct file *file,
82619 pgoff_t pgoff, struct mempolicy *policy)
82620 {
82621 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
82622 struct vm_area_struct *area, *next;
82623 int err;
82624
82625+#ifdef CONFIG_PAX_SEGMEXEC
82626+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
82627+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
82628+
82629+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
82630+#endif
82631+
82632 /*
82633 * We later require that vma->vm_flags == vm_flags,
82634 * so this tests vma->vm_flags & VM_SPECIAL, too.
82635@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82636 if (next && next->vm_end == end) /* cases 6, 7, 8 */
82637 next = next->vm_next;
82638
82639+#ifdef CONFIG_PAX_SEGMEXEC
82640+ if (prev)
82641+ prev_m = pax_find_mirror_vma(prev);
82642+ if (area)
82643+ area_m = pax_find_mirror_vma(area);
82644+ if (next)
82645+ next_m = pax_find_mirror_vma(next);
82646+#endif
82647+
82648 /*
82649 * Can it merge with the predecessor?
82650 */
82651@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82652 /* cases 1, 6 */
82653 err = vma_adjust(prev, prev->vm_start,
82654 next->vm_end, prev->vm_pgoff, NULL);
82655- } else /* cases 2, 5, 7 */
82656+
82657+#ifdef CONFIG_PAX_SEGMEXEC
82658+ if (!err && prev_m)
82659+ err = vma_adjust(prev_m, prev_m->vm_start,
82660+ next_m->vm_end, prev_m->vm_pgoff, NULL);
82661+#endif
82662+
82663+ } else { /* cases 2, 5, 7 */
82664 err = vma_adjust(prev, prev->vm_start,
82665 end, prev->vm_pgoff, NULL);
82666+
82667+#ifdef CONFIG_PAX_SEGMEXEC
82668+ if (!err && prev_m)
82669+ err = vma_adjust(prev_m, prev_m->vm_start,
82670+ end_m, prev_m->vm_pgoff, NULL);
82671+#endif
82672+
82673+ }
82674 if (err)
82675 return NULL;
82676 khugepaged_enter_vma_merge(prev);
82677@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82678 mpol_equal(policy, vma_policy(next)) &&
82679 can_vma_merge_before(next, vm_flags,
82680 anon_vma, file, pgoff+pglen)) {
82681- if (prev && addr < prev->vm_end) /* case 4 */
82682+ if (prev && addr < prev->vm_end) { /* case 4 */
82683 err = vma_adjust(prev, prev->vm_start,
82684 addr, prev->vm_pgoff, NULL);
82685- else /* cases 3, 8 */
82686+
82687+#ifdef CONFIG_PAX_SEGMEXEC
82688+ if (!err && prev_m)
82689+ err = vma_adjust(prev_m, prev_m->vm_start,
82690+ addr_m, prev_m->vm_pgoff, NULL);
82691+#endif
82692+
82693+ } else { /* cases 3, 8 */
82694 err = vma_adjust(area, addr, next->vm_end,
82695 next->vm_pgoff - pglen, NULL);
82696+
82697+#ifdef CONFIG_PAX_SEGMEXEC
82698+ if (!err && area_m)
82699+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
82700+ next_m->vm_pgoff - pglen, NULL);
82701+#endif
82702+
82703+ }
82704 if (err)
82705 return NULL;
82706 khugepaged_enter_vma_merge(area);
82707@@ -1165,8 +1246,10 @@ none:
82708 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82709 struct file *file, long pages)
82710 {
82711- const unsigned long stack_flags
82712- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
82713+
82714+#ifdef CONFIG_PAX_RANDMMAP
82715+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82716+#endif
82717
82718 mm->total_vm += pages;
82719
82720@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82721 mm->shared_vm += pages;
82722 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
82723 mm->exec_vm += pages;
82724- } else if (flags & stack_flags)
82725+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
82726 mm->stack_vm += pages;
82727 }
82728 #endif /* CONFIG_PROC_FS */
82729@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82730 * (the exception is when the underlying filesystem is noexec
82731 * mounted, in which case we dont add PROT_EXEC.)
82732 */
82733- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82734+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82735 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
82736 prot |= PROT_EXEC;
82737
82738@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82739 /* Obtain the address to map to. we verify (or select) it and ensure
82740 * that it represents a valid section of the address space.
82741 */
82742- addr = get_unmapped_area(file, addr, len, pgoff, flags);
82743+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
82744 if (addr & ~PAGE_MASK)
82745 return addr;
82746
82747@@ -1250,6 +1333,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82748 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
82749 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
82750
82751+#ifdef CONFIG_PAX_MPROTECT
82752+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82753+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82754+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
82755+ gr_log_rwxmmap(file);
82756+
82757+#ifdef CONFIG_PAX_EMUPLT
82758+ vm_flags &= ~VM_EXEC;
82759+#else
82760+ return -EPERM;
82761+#endif
82762+
82763+ }
82764+
82765+ if (!(vm_flags & VM_EXEC))
82766+ vm_flags &= ~VM_MAYEXEC;
82767+#else
82768+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82769+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82770+#endif
82771+ else
82772+ vm_flags &= ~VM_MAYWRITE;
82773+ }
82774+#endif
82775+
82776+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82777+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
82778+ vm_flags &= ~VM_PAGEEXEC;
82779+#endif
82780+
82781 if (flags & MAP_LOCKED)
82782 if (!can_do_mlock())
82783 return -EPERM;
82784@@ -1261,6 +1374,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82785 locked += mm->locked_vm;
82786 lock_limit = rlimit(RLIMIT_MEMLOCK);
82787 lock_limit >>= PAGE_SHIFT;
82788+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82789 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
82790 return -EAGAIN;
82791 }
82792@@ -1341,6 +1455,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82793 vm_flags |= VM_NORESERVE;
82794 }
82795
82796+ if (!gr_acl_handle_mmap(file, prot))
82797+ return -EACCES;
82798+
82799 addr = mmap_region(file, addr, len, vm_flags, pgoff);
82800 if (!IS_ERR_VALUE(addr) &&
82801 ((vm_flags & VM_LOCKED) ||
82802@@ -1432,7 +1549,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
82803 vm_flags_t vm_flags = vma->vm_flags;
82804
82805 /* If it was private or non-writable, the write bit is already clear */
82806- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
82807+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
82808 return 0;
82809
82810 /* The backer wishes to know when pages are first written to? */
82811@@ -1480,7 +1597,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82812 unsigned long charged = 0;
82813 struct inode *inode = file ? file_inode(file) : NULL;
82814
82815+#ifdef CONFIG_PAX_SEGMEXEC
82816+ struct vm_area_struct *vma_m = NULL;
82817+#endif
82818+
82819+ /*
82820+ * mm->mmap_sem is required to protect against another thread
82821+ * changing the mappings in case we sleep.
82822+ */
82823+ verify_mm_writelocked(mm);
82824+
82825 /* Check against address space limit. */
82826+
82827+#ifdef CONFIG_PAX_RANDMMAP
82828+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82829+#endif
82830+
82831 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
82832 unsigned long nr_pages;
82833
82834@@ -1499,11 +1631,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82835
82836 /* Clear old maps */
82837 error = -ENOMEM;
82838-munmap_back:
82839 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82840 if (do_munmap(mm, addr, len))
82841 return -ENOMEM;
82842- goto munmap_back;
82843+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82844 }
82845
82846 /*
82847@@ -1534,6 +1665,16 @@ munmap_back:
82848 goto unacct_error;
82849 }
82850
82851+#ifdef CONFIG_PAX_SEGMEXEC
82852+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
82853+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82854+ if (!vma_m) {
82855+ error = -ENOMEM;
82856+ goto free_vma;
82857+ }
82858+ }
82859+#endif
82860+
82861 vma->vm_mm = mm;
82862 vma->vm_start = addr;
82863 vma->vm_end = addr + len;
82864@@ -1558,6 +1699,13 @@ munmap_back:
82865 if (error)
82866 goto unmap_and_free_vma;
82867
82868+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82869+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
82870+ vma->vm_flags |= VM_PAGEEXEC;
82871+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82872+ }
82873+#endif
82874+
82875 /* Can addr have changed??
82876 *
82877 * Answer: Yes, several device drivers can do it in their
82878@@ -1596,6 +1744,11 @@ munmap_back:
82879 vma_link(mm, vma, prev, rb_link, rb_parent);
82880 file = vma->vm_file;
82881
82882+#ifdef CONFIG_PAX_SEGMEXEC
82883+ if (vma_m)
82884+ BUG_ON(pax_mirror_vma(vma_m, vma));
82885+#endif
82886+
82887 /* Once vma denies write, undo our temporary denial count */
82888 if (correct_wcount)
82889 atomic_inc(&inode->i_writecount);
82890@@ -1603,6 +1756,7 @@ out:
82891 perf_event_mmap(vma);
82892
82893 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
82894+ track_exec_limit(mm, addr, addr + len, vm_flags);
82895 if (vm_flags & VM_LOCKED) {
82896 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
82897 vma == get_gate_vma(current->mm)))
82898@@ -1626,6 +1780,12 @@ unmap_and_free_vma:
82899 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
82900 charged = 0;
82901 free_vma:
82902+
82903+#ifdef CONFIG_PAX_SEGMEXEC
82904+ if (vma_m)
82905+ kmem_cache_free(vm_area_cachep, vma_m);
82906+#endif
82907+
82908 kmem_cache_free(vm_area_cachep, vma);
82909 unacct_error:
82910 if (charged)
82911@@ -1633,7 +1793,63 @@ unacct_error:
82912 return error;
82913 }
82914
82915-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
82916+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
82917+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
82918+{
82919+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
82920+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
82921+
82922+ return 0;
82923+}
82924+#endif
82925+
82926+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
82927+{
82928+ if (!vma) {
82929+#ifdef CONFIG_STACK_GROWSUP
82930+ if (addr > sysctl_heap_stack_gap)
82931+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
82932+ else
82933+ vma = find_vma(current->mm, 0);
82934+ if (vma && (vma->vm_flags & VM_GROWSUP))
82935+ return false;
82936+#endif
82937+ return true;
82938+ }
82939+
82940+ if (addr + len > vma->vm_start)
82941+ return false;
82942+
82943+ if (vma->vm_flags & VM_GROWSDOWN)
82944+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
82945+#ifdef CONFIG_STACK_GROWSUP
82946+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
82947+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
82948+#endif
82949+ else if (offset)
82950+ return offset <= vma->vm_start - addr - len;
82951+
82952+ return true;
82953+}
82954+
82955+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
82956+{
82957+ if (vma->vm_start < len)
82958+ return -ENOMEM;
82959+
82960+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
82961+ if (offset <= vma->vm_start - len)
82962+ return vma->vm_start - len - offset;
82963+ else
82964+ return -ENOMEM;
82965+ }
82966+
82967+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
82968+ return vma->vm_start - len - sysctl_heap_stack_gap;
82969+ return -ENOMEM;
82970+}
82971+
82972+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
82973 {
82974 /*
82975 * We implement the search by looking for an rbtree node that
82976@@ -1681,11 +1897,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
82977 }
82978 }
82979
82980- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
82981+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
82982 check_current:
82983 /* Check if current node has a suitable gap */
82984 if (gap_start > high_limit)
82985 return -ENOMEM;
82986+
82987+ if (gap_end - gap_start > info->threadstack_offset)
82988+ gap_start += info->threadstack_offset;
82989+ else
82990+ gap_start = gap_end;
82991+
82992+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
82993+ if (gap_end - gap_start > sysctl_heap_stack_gap)
82994+ gap_start += sysctl_heap_stack_gap;
82995+ else
82996+ gap_start = gap_end;
82997+ }
82998+ if (vma->vm_flags & VM_GROWSDOWN) {
82999+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83000+ gap_end -= sysctl_heap_stack_gap;
83001+ else
83002+ gap_end = gap_start;
83003+ }
83004 if (gap_end >= low_limit && gap_end - gap_start >= length)
83005 goto found;
83006
83007@@ -1735,7 +1969,7 @@ found:
83008 return gap_start;
83009 }
83010
83011-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
83012+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
83013 {
83014 struct mm_struct *mm = current->mm;
83015 struct vm_area_struct *vma;
83016@@ -1789,6 +2023,24 @@ check_current:
83017 gap_end = vma->vm_start;
83018 if (gap_end < low_limit)
83019 return -ENOMEM;
83020+
83021+ if (gap_end - gap_start > info->threadstack_offset)
83022+ gap_end -= info->threadstack_offset;
83023+ else
83024+ gap_end = gap_start;
83025+
83026+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
83027+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83028+ gap_start += sysctl_heap_stack_gap;
83029+ else
83030+ gap_start = gap_end;
83031+ }
83032+ if (vma->vm_flags & VM_GROWSDOWN) {
83033+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83034+ gap_end -= sysctl_heap_stack_gap;
83035+ else
83036+ gap_end = gap_start;
83037+ }
83038 if (gap_start <= high_limit && gap_end - gap_start >= length)
83039 goto found;
83040
83041@@ -1852,6 +2104,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83042 struct mm_struct *mm = current->mm;
83043 struct vm_area_struct *vma;
83044 struct vm_unmapped_area_info info;
83045+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
83046
83047 if (len > TASK_SIZE)
83048 return -ENOMEM;
83049@@ -1859,29 +2112,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83050 if (flags & MAP_FIXED)
83051 return addr;
83052
83053+#ifdef CONFIG_PAX_RANDMMAP
83054+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
83055+#endif
83056+
83057 if (addr) {
83058 addr = PAGE_ALIGN(addr);
83059 vma = find_vma(mm, addr);
83060- if (TASK_SIZE - len >= addr &&
83061- (!vma || addr + len <= vma->vm_start))
83062+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
83063 return addr;
83064 }
83065
83066 info.flags = 0;
83067 info.length = len;
83068 info.low_limit = TASK_UNMAPPED_BASE;
83069+
83070+#ifdef CONFIG_PAX_RANDMMAP
83071+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83072+ info.low_limit += mm->delta_mmap;
83073+#endif
83074+
83075 info.high_limit = TASK_SIZE;
83076 info.align_mask = 0;
83077+ info.threadstack_offset = offset;
83078 return vm_unmapped_area(&info);
83079 }
83080 #endif
83081
83082 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
83083 {
83084+
83085+#ifdef CONFIG_PAX_SEGMEXEC
83086+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
83087+ return;
83088+#endif
83089+
83090 /*
83091 * Is this a new hole at the lowest possible address?
83092 */
83093- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
83094+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
83095 mm->free_area_cache = addr;
83096 }
83097
83098@@ -1899,6 +2168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83099 struct mm_struct *mm = current->mm;
83100 unsigned long addr = addr0;
83101 struct vm_unmapped_area_info info;
83102+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
83103
83104 /* requested length too big for entire address space */
83105 if (len > TASK_SIZE)
83106@@ -1907,12 +2177,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83107 if (flags & MAP_FIXED)
83108 return addr;
83109
83110+#ifdef CONFIG_PAX_RANDMMAP
83111+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
83112+#endif
83113+
83114 /* requesting a specific address */
83115 if (addr) {
83116 addr = PAGE_ALIGN(addr);
83117 vma = find_vma(mm, addr);
83118- if (TASK_SIZE - len >= addr &&
83119- (!vma || addr + len <= vma->vm_start))
83120+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
83121 return addr;
83122 }
83123
83124@@ -1921,6 +2194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83125 info.low_limit = PAGE_SIZE;
83126 info.high_limit = mm->mmap_base;
83127 info.align_mask = 0;
83128+ info.threadstack_offset = offset;
83129 addr = vm_unmapped_area(&info);
83130
83131 /*
83132@@ -1933,6 +2207,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83133 VM_BUG_ON(addr != -ENOMEM);
83134 info.flags = 0;
83135 info.low_limit = TASK_UNMAPPED_BASE;
83136+
83137+#ifdef CONFIG_PAX_RANDMMAP
83138+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83139+ info.low_limit += mm->delta_mmap;
83140+#endif
83141+
83142 info.high_limit = TASK_SIZE;
83143 addr = vm_unmapped_area(&info);
83144 }
83145@@ -1943,6 +2223,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83146
83147 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
83148 {
83149+
83150+#ifdef CONFIG_PAX_SEGMEXEC
83151+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
83152+ return;
83153+#endif
83154+
83155 /*
83156 * Is this a new hole at the highest possible address?
83157 */
83158@@ -1950,8 +2236,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
83159 mm->free_area_cache = addr;
83160
83161 /* dont allow allocations above current base */
83162- if (mm->free_area_cache > mm->mmap_base)
83163+ if (mm->free_area_cache > mm->mmap_base) {
83164 mm->free_area_cache = mm->mmap_base;
83165+ mm->cached_hole_size = ~0UL;
83166+ }
83167 }
83168
83169 unsigned long
83170@@ -2047,6 +2335,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
83171 return vma;
83172 }
83173
83174+#ifdef CONFIG_PAX_SEGMEXEC
83175+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
83176+{
83177+ struct vm_area_struct *vma_m;
83178+
83179+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
83180+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
83181+ BUG_ON(vma->vm_mirror);
83182+ return NULL;
83183+ }
83184+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
83185+ vma_m = vma->vm_mirror;
83186+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
83187+ BUG_ON(vma->vm_file != vma_m->vm_file);
83188+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
83189+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
83190+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
83191+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
83192+ return vma_m;
83193+}
83194+#endif
83195+
83196 /*
83197 * Verify that the stack growth is acceptable and
83198 * update accounting. This is shared with both the
83199@@ -2063,6 +2373,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83200 return -ENOMEM;
83201
83202 /* Stack limit test */
83203+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
83204 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
83205 return -ENOMEM;
83206
83207@@ -2073,6 +2384,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83208 locked = mm->locked_vm + grow;
83209 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
83210 limit >>= PAGE_SHIFT;
83211+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
83212 if (locked > limit && !capable(CAP_IPC_LOCK))
83213 return -ENOMEM;
83214 }
83215@@ -2102,37 +2414,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83216 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
83217 * vma is the last one with address > vma->vm_end. Have to extend vma.
83218 */
83219+#ifndef CONFIG_IA64
83220+static
83221+#endif
83222 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
83223 {
83224 int error;
83225+ bool locknext;
83226
83227 if (!(vma->vm_flags & VM_GROWSUP))
83228 return -EFAULT;
83229
83230+ /* Also guard against wrapping around to address 0. */
83231+ if (address < PAGE_ALIGN(address+1))
83232+ address = PAGE_ALIGN(address+1);
83233+ else
83234+ return -ENOMEM;
83235+
83236 /*
83237 * We must make sure the anon_vma is allocated
83238 * so that the anon_vma locking is not a noop.
83239 */
83240 if (unlikely(anon_vma_prepare(vma)))
83241 return -ENOMEM;
83242+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
83243+ if (locknext && anon_vma_prepare(vma->vm_next))
83244+ return -ENOMEM;
83245 vma_lock_anon_vma(vma);
83246+ if (locknext)
83247+ vma_lock_anon_vma(vma->vm_next);
83248
83249 /*
83250 * vma->vm_start/vm_end cannot change under us because the caller
83251 * is required to hold the mmap_sem in read mode. We need the
83252- * anon_vma lock to serialize against concurrent expand_stacks.
83253- * Also guard against wrapping around to address 0.
83254+ * anon_vma locks to serialize against concurrent expand_stacks
83255+ * and expand_upwards.
83256 */
83257- if (address < PAGE_ALIGN(address+4))
83258- address = PAGE_ALIGN(address+4);
83259- else {
83260- vma_unlock_anon_vma(vma);
83261- return -ENOMEM;
83262- }
83263 error = 0;
83264
83265 /* Somebody else might have raced and expanded it already */
83266- if (address > vma->vm_end) {
83267+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
83268+ error = -ENOMEM;
83269+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
83270 unsigned long size, grow;
83271
83272 size = address - vma->vm_start;
83273@@ -2167,6 +2490,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
83274 }
83275 }
83276 }
83277+ if (locknext)
83278+ vma_unlock_anon_vma(vma->vm_next);
83279 vma_unlock_anon_vma(vma);
83280 khugepaged_enter_vma_merge(vma);
83281 validate_mm(vma->vm_mm);
83282@@ -2181,6 +2506,8 @@ int expand_downwards(struct vm_area_struct *vma,
83283 unsigned long address)
83284 {
83285 int error;
83286+ bool lockprev = false;
83287+ struct vm_area_struct *prev;
83288
83289 /*
83290 * We must make sure the anon_vma is allocated
83291@@ -2194,6 +2521,15 @@ int expand_downwards(struct vm_area_struct *vma,
83292 if (error)
83293 return error;
83294
83295+ prev = vma->vm_prev;
83296+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
83297+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
83298+#endif
83299+ if (lockprev && anon_vma_prepare(prev))
83300+ return -ENOMEM;
83301+ if (lockprev)
83302+ vma_lock_anon_vma(prev);
83303+
83304 vma_lock_anon_vma(vma);
83305
83306 /*
83307@@ -2203,9 +2539,17 @@ int expand_downwards(struct vm_area_struct *vma,
83308 */
83309
83310 /* Somebody else might have raced and expanded it already */
83311- if (address < vma->vm_start) {
83312+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
83313+ error = -ENOMEM;
83314+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
83315 unsigned long size, grow;
83316
83317+#ifdef CONFIG_PAX_SEGMEXEC
83318+ struct vm_area_struct *vma_m;
83319+
83320+ vma_m = pax_find_mirror_vma(vma);
83321+#endif
83322+
83323 size = vma->vm_end - address;
83324 grow = (vma->vm_start - address) >> PAGE_SHIFT;
83325
83326@@ -2230,13 +2574,27 @@ int expand_downwards(struct vm_area_struct *vma,
83327 vma->vm_pgoff -= grow;
83328 anon_vma_interval_tree_post_update_vma(vma);
83329 vma_gap_update(vma);
83330+
83331+#ifdef CONFIG_PAX_SEGMEXEC
83332+ if (vma_m) {
83333+ anon_vma_interval_tree_pre_update_vma(vma_m);
83334+ vma_m->vm_start -= grow << PAGE_SHIFT;
83335+ vma_m->vm_pgoff -= grow;
83336+ anon_vma_interval_tree_post_update_vma(vma_m);
83337+ vma_gap_update(vma_m);
83338+ }
83339+#endif
83340+
83341 spin_unlock(&vma->vm_mm->page_table_lock);
83342
83343+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
83344 perf_event_mmap(vma);
83345 }
83346 }
83347 }
83348 vma_unlock_anon_vma(vma);
83349+ if (lockprev)
83350+ vma_unlock_anon_vma(prev);
83351 khugepaged_enter_vma_merge(vma);
83352 validate_mm(vma->vm_mm);
83353 return error;
83354@@ -2334,6 +2692,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
83355 do {
83356 long nrpages = vma_pages(vma);
83357
83358+#ifdef CONFIG_PAX_SEGMEXEC
83359+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
83360+ vma = remove_vma(vma);
83361+ continue;
83362+ }
83363+#endif
83364+
83365 if (vma->vm_flags & VM_ACCOUNT)
83366 nr_accounted += nrpages;
83367 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
83368@@ -2379,6 +2744,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
83369 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
83370 vma->vm_prev = NULL;
83371 do {
83372+
83373+#ifdef CONFIG_PAX_SEGMEXEC
83374+ if (vma->vm_mirror) {
83375+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
83376+ vma->vm_mirror->vm_mirror = NULL;
83377+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
83378+ vma->vm_mirror = NULL;
83379+ }
83380+#endif
83381+
83382 vma_rb_erase(vma, &mm->mm_rb);
83383 mm->map_count--;
83384 tail_vma = vma;
83385@@ -2410,14 +2785,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83386 struct vm_area_struct *new;
83387 int err = -ENOMEM;
83388
83389+#ifdef CONFIG_PAX_SEGMEXEC
83390+ struct vm_area_struct *vma_m, *new_m = NULL;
83391+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
83392+#endif
83393+
83394 if (is_vm_hugetlb_page(vma) && (addr &
83395 ~(huge_page_mask(hstate_vma(vma)))))
83396 return -EINVAL;
83397
83398+#ifdef CONFIG_PAX_SEGMEXEC
83399+ vma_m = pax_find_mirror_vma(vma);
83400+#endif
83401+
83402 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83403 if (!new)
83404 goto out_err;
83405
83406+#ifdef CONFIG_PAX_SEGMEXEC
83407+ if (vma_m) {
83408+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83409+ if (!new_m) {
83410+ kmem_cache_free(vm_area_cachep, new);
83411+ goto out_err;
83412+ }
83413+ }
83414+#endif
83415+
83416 /* most fields are the same, copy all, and then fixup */
83417 *new = *vma;
83418
83419@@ -2430,6 +2824,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83420 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
83421 }
83422
83423+#ifdef CONFIG_PAX_SEGMEXEC
83424+ if (vma_m) {
83425+ *new_m = *vma_m;
83426+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
83427+ new_m->vm_mirror = new;
83428+ new->vm_mirror = new_m;
83429+
83430+ if (new_below)
83431+ new_m->vm_end = addr_m;
83432+ else {
83433+ new_m->vm_start = addr_m;
83434+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
83435+ }
83436+ }
83437+#endif
83438+
83439 pol = mpol_dup(vma_policy(vma));
83440 if (IS_ERR(pol)) {
83441 err = PTR_ERR(pol);
83442@@ -2452,6 +2862,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83443 else
83444 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
83445
83446+#ifdef CONFIG_PAX_SEGMEXEC
83447+ if (!err && vma_m) {
83448+ if (anon_vma_clone(new_m, vma_m))
83449+ goto out_free_mpol;
83450+
83451+ mpol_get(pol);
83452+ vma_set_policy(new_m, pol);
83453+
83454+ if (new_m->vm_file)
83455+ get_file(new_m->vm_file);
83456+
83457+ if (new_m->vm_ops && new_m->vm_ops->open)
83458+ new_m->vm_ops->open(new_m);
83459+
83460+ if (new_below)
83461+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
83462+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
83463+ else
83464+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
83465+
83466+ if (err) {
83467+ if (new_m->vm_ops && new_m->vm_ops->close)
83468+ new_m->vm_ops->close(new_m);
83469+ if (new_m->vm_file)
83470+ fput(new_m->vm_file);
83471+ mpol_put(pol);
83472+ }
83473+ }
83474+#endif
83475+
83476 /* Success. */
83477 if (!err)
83478 return 0;
83479@@ -2461,10 +2901,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83480 new->vm_ops->close(new);
83481 if (new->vm_file)
83482 fput(new->vm_file);
83483- unlink_anon_vmas(new);
83484 out_free_mpol:
83485 mpol_put(pol);
83486 out_free_vma:
83487+
83488+#ifdef CONFIG_PAX_SEGMEXEC
83489+ if (new_m) {
83490+ unlink_anon_vmas(new_m);
83491+ kmem_cache_free(vm_area_cachep, new_m);
83492+ }
83493+#endif
83494+
83495+ unlink_anon_vmas(new);
83496 kmem_cache_free(vm_area_cachep, new);
83497 out_err:
83498 return err;
83499@@ -2477,6 +2925,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83500 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83501 unsigned long addr, int new_below)
83502 {
83503+
83504+#ifdef CONFIG_PAX_SEGMEXEC
83505+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
83506+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
83507+ if (mm->map_count >= sysctl_max_map_count-1)
83508+ return -ENOMEM;
83509+ } else
83510+#endif
83511+
83512 if (mm->map_count >= sysctl_max_map_count)
83513 return -ENOMEM;
83514
83515@@ -2488,11 +2945,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83516 * work. This now handles partial unmappings.
83517 * Jeremy Fitzhardinge <jeremy@goop.org>
83518 */
83519+#ifdef CONFIG_PAX_SEGMEXEC
83520 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83521 {
83522+ int ret = __do_munmap(mm, start, len);
83523+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
83524+ return ret;
83525+
83526+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
83527+}
83528+
83529+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83530+#else
83531+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83532+#endif
83533+{
83534 unsigned long end;
83535 struct vm_area_struct *vma, *prev, *last;
83536
83537+ /*
83538+ * mm->mmap_sem is required to protect against another thread
83539+ * changing the mappings in case we sleep.
83540+ */
83541+ verify_mm_writelocked(mm);
83542+
83543 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
83544 return -EINVAL;
83545
83546@@ -2567,6 +3043,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83547 /* Fix up all other VM information */
83548 remove_vma_list(mm, vma);
83549
83550+ track_exec_limit(mm, start, end, 0UL);
83551+
83552 return 0;
83553 }
83554
83555@@ -2575,6 +3053,13 @@ int vm_munmap(unsigned long start, size_t len)
83556 int ret;
83557 struct mm_struct *mm = current->mm;
83558
83559+
83560+#ifdef CONFIG_PAX_SEGMEXEC
83561+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
83562+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
83563+ return -EINVAL;
83564+#endif
83565+
83566 down_write(&mm->mmap_sem);
83567 ret = do_munmap(mm, start, len);
83568 up_write(&mm->mmap_sem);
83569@@ -2588,16 +3073,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
83570 return vm_munmap(addr, len);
83571 }
83572
83573-static inline void verify_mm_writelocked(struct mm_struct *mm)
83574-{
83575-#ifdef CONFIG_DEBUG_VM
83576- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
83577- WARN_ON(1);
83578- up_read(&mm->mmap_sem);
83579- }
83580-#endif
83581-}
83582-
83583 /*
83584 * this is really a simplified "do_mmap". it only handles
83585 * anonymous maps. eventually we may be able to do some
83586@@ -2611,6 +3086,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83587 struct rb_node ** rb_link, * rb_parent;
83588 pgoff_t pgoff = addr >> PAGE_SHIFT;
83589 int error;
83590+ unsigned long charged;
83591
83592 len = PAGE_ALIGN(len);
83593 if (!len)
83594@@ -2618,16 +3094,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83595
83596 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
83597
83598+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
83599+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
83600+ flags &= ~VM_EXEC;
83601+
83602+#ifdef CONFIG_PAX_MPROTECT
83603+ if (mm->pax_flags & MF_PAX_MPROTECT)
83604+ flags &= ~VM_MAYEXEC;
83605+#endif
83606+
83607+ }
83608+#endif
83609+
83610 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
83611 if (error & ~PAGE_MASK)
83612 return error;
83613
83614+ charged = len >> PAGE_SHIFT;
83615+
83616 /*
83617 * mlock MCL_FUTURE?
83618 */
83619 if (mm->def_flags & VM_LOCKED) {
83620 unsigned long locked, lock_limit;
83621- locked = len >> PAGE_SHIFT;
83622+ locked = charged;
83623 locked += mm->locked_vm;
83624 lock_limit = rlimit(RLIMIT_MEMLOCK);
83625 lock_limit >>= PAGE_SHIFT;
83626@@ -2644,21 +3134,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83627 /*
83628 * Clear old maps. this also does some error checking for us
83629 */
83630- munmap_back:
83631 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
83632 if (do_munmap(mm, addr, len))
83633 return -ENOMEM;
83634- goto munmap_back;
83635+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
83636 }
83637
83638 /* Check against address space limits *after* clearing old maps... */
83639- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
83640+ if (!may_expand_vm(mm, charged))
83641 return -ENOMEM;
83642
83643 if (mm->map_count > sysctl_max_map_count)
83644 return -ENOMEM;
83645
83646- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
83647+ if (security_vm_enough_memory_mm(mm, charged))
83648 return -ENOMEM;
83649
83650 /* Can we just expand an old private anonymous mapping? */
83651@@ -2672,7 +3161,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83652 */
83653 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83654 if (!vma) {
83655- vm_unacct_memory(len >> PAGE_SHIFT);
83656+ vm_unacct_memory(charged);
83657 return -ENOMEM;
83658 }
83659
83660@@ -2686,9 +3175,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83661 vma_link(mm, vma, prev, rb_link, rb_parent);
83662 out:
83663 perf_event_mmap(vma);
83664- mm->total_vm += len >> PAGE_SHIFT;
83665+ mm->total_vm += charged;
83666 if (flags & VM_LOCKED)
83667- mm->locked_vm += (len >> PAGE_SHIFT);
83668+ mm->locked_vm += charged;
83669+ track_exec_limit(mm, addr, addr + len, flags);
83670 return addr;
83671 }
83672
83673@@ -2750,6 +3240,7 @@ void exit_mmap(struct mm_struct *mm)
83674 while (vma) {
83675 if (vma->vm_flags & VM_ACCOUNT)
83676 nr_accounted += vma_pages(vma);
83677+ vma->vm_mirror = NULL;
83678 vma = remove_vma(vma);
83679 }
83680 vm_unacct_memory(nr_accounted);
83681@@ -2766,6 +3257,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83682 struct vm_area_struct *prev;
83683 struct rb_node **rb_link, *rb_parent;
83684
83685+#ifdef CONFIG_PAX_SEGMEXEC
83686+ struct vm_area_struct *vma_m = NULL;
83687+#endif
83688+
83689+ if (security_mmap_addr(vma->vm_start))
83690+ return -EPERM;
83691+
83692 /*
83693 * The vm_pgoff of a purely anonymous vma should be irrelevant
83694 * until its first write fault, when page's anon_vma and index
83695@@ -2789,7 +3287,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83696 security_vm_enough_memory_mm(mm, vma_pages(vma)))
83697 return -ENOMEM;
83698
83699+#ifdef CONFIG_PAX_SEGMEXEC
83700+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
83701+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83702+ if (!vma_m)
83703+ return -ENOMEM;
83704+ }
83705+#endif
83706+
83707 vma_link(mm, vma, prev, rb_link, rb_parent);
83708+
83709+#ifdef CONFIG_PAX_SEGMEXEC
83710+ if (vma_m)
83711+ BUG_ON(pax_mirror_vma(vma_m, vma));
83712+#endif
83713+
83714 return 0;
83715 }
83716
83717@@ -2809,6 +3321,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83718 struct mempolicy *pol;
83719 bool faulted_in_anon_vma = true;
83720
83721+ BUG_ON(vma->vm_mirror);
83722+
83723 /*
83724 * If anonymous vma has not yet been faulted, update new pgoff
83725 * to match new location, to increase its chance of merging.
83726@@ -2875,6 +3389,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83727 return NULL;
83728 }
83729
83730+#ifdef CONFIG_PAX_SEGMEXEC
83731+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
83732+{
83733+ struct vm_area_struct *prev_m;
83734+ struct rb_node **rb_link_m, *rb_parent_m;
83735+ struct mempolicy *pol_m;
83736+
83737+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
83738+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
83739+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
83740+ *vma_m = *vma;
83741+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
83742+ if (anon_vma_clone(vma_m, vma))
83743+ return -ENOMEM;
83744+ pol_m = vma_policy(vma_m);
83745+ mpol_get(pol_m);
83746+ vma_set_policy(vma_m, pol_m);
83747+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
83748+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
83749+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
83750+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
83751+ if (vma_m->vm_file)
83752+ get_file(vma_m->vm_file);
83753+ if (vma_m->vm_ops && vma_m->vm_ops->open)
83754+ vma_m->vm_ops->open(vma_m);
83755+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
83756+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
83757+ vma_m->vm_mirror = vma;
83758+ vma->vm_mirror = vma_m;
83759+ return 0;
83760+}
83761+#endif
83762+
83763 /*
83764 * Return true if the calling process may expand its vm space by the passed
83765 * number of pages
83766@@ -2886,6 +3433,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
83767
83768 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
83769
83770+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
83771 if (cur + npages > lim)
83772 return 0;
83773 return 1;
83774@@ -2956,6 +3504,22 @@ int install_special_mapping(struct mm_struct *mm,
83775 vma->vm_start = addr;
83776 vma->vm_end = addr + len;
83777
83778+#ifdef CONFIG_PAX_MPROTECT
83779+ if (mm->pax_flags & MF_PAX_MPROTECT) {
83780+#ifndef CONFIG_PAX_MPROTECT_COMPAT
83781+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
83782+ return -EPERM;
83783+ if (!(vm_flags & VM_EXEC))
83784+ vm_flags &= ~VM_MAYEXEC;
83785+#else
83786+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
83787+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
83788+#endif
83789+ else
83790+ vm_flags &= ~VM_MAYWRITE;
83791+ }
83792+#endif
83793+
83794 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
83795 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83796
83797diff --git a/mm/mprotect.c b/mm/mprotect.c
83798index 94722a4..07d9926 100644
83799--- a/mm/mprotect.c
83800+++ b/mm/mprotect.c
83801@@ -23,10 +23,18 @@
83802 #include <linux/mmu_notifier.h>
83803 #include <linux/migrate.h>
83804 #include <linux/perf_event.h>
83805+#include <linux/sched/sysctl.h>
83806+
83807+#ifdef CONFIG_PAX_MPROTECT
83808+#include <linux/elf.h>
83809+#include <linux/binfmts.h>
83810+#endif
83811+
83812 #include <asm/uaccess.h>
83813 #include <asm/pgtable.h>
83814 #include <asm/cacheflush.h>
83815 #include <asm/tlbflush.h>
83816+#include <asm/mmu_context.h>
83817
83818 #ifndef pgprot_modify
83819 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
83820@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
83821 return pages;
83822 }
83823
83824+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83825+/* called while holding the mmap semaphor for writing except stack expansion */
83826+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
83827+{
83828+ unsigned long oldlimit, newlimit = 0UL;
83829+
83830+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
83831+ return;
83832+
83833+ spin_lock(&mm->page_table_lock);
83834+ oldlimit = mm->context.user_cs_limit;
83835+ if ((prot & VM_EXEC) && oldlimit < end)
83836+ /* USER_CS limit moved up */
83837+ newlimit = end;
83838+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
83839+ /* USER_CS limit moved down */
83840+ newlimit = start;
83841+
83842+ if (newlimit) {
83843+ mm->context.user_cs_limit = newlimit;
83844+
83845+#ifdef CONFIG_SMP
83846+ wmb();
83847+ cpus_clear(mm->context.cpu_user_cs_mask);
83848+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
83849+#endif
83850+
83851+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
83852+ }
83853+ spin_unlock(&mm->page_table_lock);
83854+ if (newlimit == end) {
83855+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
83856+
83857+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
83858+ if (is_vm_hugetlb_page(vma))
83859+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
83860+ else
83861+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
83862+ }
83863+}
83864+#endif
83865+
83866 int
83867 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83868 unsigned long start, unsigned long end, unsigned long newflags)
83869@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83870 int error;
83871 int dirty_accountable = 0;
83872
83873+#ifdef CONFIG_PAX_SEGMEXEC
83874+ struct vm_area_struct *vma_m = NULL;
83875+ unsigned long start_m, end_m;
83876+
83877+ start_m = start + SEGMEXEC_TASK_SIZE;
83878+ end_m = end + SEGMEXEC_TASK_SIZE;
83879+#endif
83880+
83881 if (newflags == oldflags) {
83882 *pprev = vma;
83883 return 0;
83884 }
83885
83886+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
83887+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
83888+
83889+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
83890+ return -ENOMEM;
83891+
83892+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
83893+ return -ENOMEM;
83894+ }
83895+
83896 /*
83897 * If we make a private mapping writable we increase our commit;
83898 * but (without finer accounting) cannot reduce our commit if we
83899@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83900 }
83901 }
83902
83903+#ifdef CONFIG_PAX_SEGMEXEC
83904+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
83905+ if (start != vma->vm_start) {
83906+ error = split_vma(mm, vma, start, 1);
83907+ if (error)
83908+ goto fail;
83909+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
83910+ *pprev = (*pprev)->vm_next;
83911+ }
83912+
83913+ if (end != vma->vm_end) {
83914+ error = split_vma(mm, vma, end, 0);
83915+ if (error)
83916+ goto fail;
83917+ }
83918+
83919+ if (pax_find_mirror_vma(vma)) {
83920+ error = __do_munmap(mm, start_m, end_m - start_m);
83921+ if (error)
83922+ goto fail;
83923+ } else {
83924+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83925+ if (!vma_m) {
83926+ error = -ENOMEM;
83927+ goto fail;
83928+ }
83929+ vma->vm_flags = newflags;
83930+ error = pax_mirror_vma(vma_m, vma);
83931+ if (error) {
83932+ vma->vm_flags = oldflags;
83933+ goto fail;
83934+ }
83935+ }
83936+ }
83937+#endif
83938+
83939 /*
83940 * First try to merge with previous and/or next vma.
83941 */
83942@@ -296,9 +400,21 @@ success:
83943 * vm_flags and vm_page_prot are protected by the mmap_sem
83944 * held in write mode.
83945 */
83946+
83947+#ifdef CONFIG_PAX_SEGMEXEC
83948+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
83949+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
83950+#endif
83951+
83952 vma->vm_flags = newflags;
83953+
83954+#ifdef CONFIG_PAX_MPROTECT
83955+ if (mm->binfmt && mm->binfmt->handle_mprotect)
83956+ mm->binfmt->handle_mprotect(vma, newflags);
83957+#endif
83958+
83959 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
83960- vm_get_page_prot(newflags));
83961+ vm_get_page_prot(vma->vm_flags));
83962
83963 if (vma_wants_writenotify(vma)) {
83964 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
83965@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83966 end = start + len;
83967 if (end <= start)
83968 return -ENOMEM;
83969+
83970+#ifdef CONFIG_PAX_SEGMEXEC
83971+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
83972+ if (end > SEGMEXEC_TASK_SIZE)
83973+ return -EINVAL;
83974+ } else
83975+#endif
83976+
83977+ if (end > TASK_SIZE)
83978+ return -EINVAL;
83979+
83980 if (!arch_validate_prot(prot))
83981 return -EINVAL;
83982
83983@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83984 /*
83985 * Does the application expect PROT_READ to imply PROT_EXEC:
83986 */
83987- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
83988+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
83989 prot |= PROT_EXEC;
83990
83991 vm_flags = calc_vm_prot_bits(prot);
83992@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
83993 if (start > vma->vm_start)
83994 prev = vma;
83995
83996+#ifdef CONFIG_PAX_MPROTECT
83997+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
83998+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
83999+#endif
84000+
84001 for (nstart = start ; ; ) {
84002 unsigned long newflags;
84003
84004@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84005
84006 /* newflags >> 4 shift VM_MAY% in place of VM_% */
84007 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
84008+ if (prot & (PROT_WRITE | PROT_EXEC))
84009+ gr_log_rwxmprotect(vma->vm_file);
84010+
84011+ error = -EACCES;
84012+ goto out;
84013+ }
84014+
84015+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
84016 error = -EACCES;
84017 goto out;
84018 }
84019@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84020 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
84021 if (error)
84022 goto out;
84023+
84024+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
84025+
84026 nstart = tmp;
84027
84028 if (nstart < prev->vm_end)
84029diff --git a/mm/mremap.c b/mm/mremap.c
84030index 463a257..c0c7a92 100644
84031--- a/mm/mremap.c
84032+++ b/mm/mremap.c
84033@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
84034 continue;
84035 pte = ptep_get_and_clear(mm, old_addr, old_pte);
84036 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
84037+
84038+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84039+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
84040+ pte = pte_exprotect(pte);
84041+#endif
84042+
84043 set_pte_at(mm, new_addr, new_pte, pte);
84044 }
84045
84046@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
84047 if (is_vm_hugetlb_page(vma))
84048 goto Einval;
84049
84050+#ifdef CONFIG_PAX_SEGMEXEC
84051+ if (pax_find_mirror_vma(vma))
84052+ goto Einval;
84053+#endif
84054+
84055 /* We can't remap across vm area boundaries */
84056 if (old_len > vma->vm_end - addr)
84057 goto Efault;
84058@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
84059 unsigned long ret = -EINVAL;
84060 unsigned long charged = 0;
84061 unsigned long map_flags;
84062+ unsigned long pax_task_size = TASK_SIZE;
84063
84064 if (new_addr & ~PAGE_MASK)
84065 goto out;
84066
84067- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
84068+#ifdef CONFIG_PAX_SEGMEXEC
84069+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
84070+ pax_task_size = SEGMEXEC_TASK_SIZE;
84071+#endif
84072+
84073+ pax_task_size -= PAGE_SIZE;
84074+
84075+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
84076 goto out;
84077
84078 /* Check if the location we're moving into overlaps the
84079 * old location at all, and fail if it does.
84080 */
84081- if ((new_addr <= addr) && (new_addr+new_len) > addr)
84082- goto out;
84083-
84084- if ((addr <= new_addr) && (addr+old_len) > new_addr)
84085+ if (addr + old_len > new_addr && new_addr + new_len > addr)
84086 goto out;
84087
84088 ret = do_munmap(mm, new_addr, new_len);
84089@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84090 unsigned long ret = -EINVAL;
84091 unsigned long charged = 0;
84092 bool locked = false;
84093+ unsigned long pax_task_size = TASK_SIZE;
84094
84095 down_write(&current->mm->mmap_sem);
84096
84097@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84098 if (!new_len)
84099 goto out;
84100
84101+#ifdef CONFIG_PAX_SEGMEXEC
84102+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
84103+ pax_task_size = SEGMEXEC_TASK_SIZE;
84104+#endif
84105+
84106+ pax_task_size -= PAGE_SIZE;
84107+
84108+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
84109+ old_len > pax_task_size || addr > pax_task_size-old_len)
84110+ goto out;
84111+
84112 if (flags & MREMAP_FIXED) {
84113 if (flags & MREMAP_MAYMOVE)
84114 ret = mremap_to(addr, old_len, new_addr, new_len,
84115@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84116 new_addr = addr;
84117 }
84118 ret = addr;
84119+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
84120 goto out;
84121 }
84122 }
84123@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84124 goto out;
84125 }
84126
84127+ map_flags = vma->vm_flags;
84128 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
84129+ if (!(ret & ~PAGE_MASK)) {
84130+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
84131+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
84132+ }
84133 }
84134 out:
84135 if (ret & ~PAGE_MASK)
84136diff --git a/mm/nommu.c b/mm/nommu.c
84137index 298884d..5f74980 100644
84138--- a/mm/nommu.c
84139+++ b/mm/nommu.c
84140@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
84141 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
84142 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
84143 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
84144-int heap_stack_gap = 0;
84145
84146 atomic_long_t mmap_pages_allocated;
84147
84148@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
84149 EXPORT_SYMBOL(find_vma);
84150
84151 /*
84152- * find a VMA
84153- * - we don't extend stack VMAs under NOMMU conditions
84154- */
84155-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
84156-{
84157- return find_vma(mm, addr);
84158-}
84159-
84160-/*
84161 * expand a stack to a given address
84162 * - not supported under NOMMU conditions
84163 */
84164@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
84165
84166 /* most fields are the same, copy all, and then fixup */
84167 *new = *vma;
84168+ INIT_LIST_HEAD(&new->anon_vma_chain);
84169 *region = *vma->vm_region;
84170 new->vm_region = region;
84171
84172@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
84173 }
84174 EXPORT_SYMBOL(generic_file_remap_pages);
84175
84176-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84177- unsigned long addr, void *buf, int len, int write)
84178+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84179+ unsigned long addr, void *buf, size_t len, int write)
84180 {
84181 struct vm_area_struct *vma;
84182
84183@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84184 *
84185 * The caller must hold a reference on @mm.
84186 */
84187-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84188- void *buf, int len, int write)
84189+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84190+ void *buf, size_t len, int write)
84191 {
84192 return __access_remote_vm(NULL, mm, addr, buf, len, write);
84193 }
84194@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84195 * Access another process' address space.
84196 * - source/target buffer must be kernel space
84197 */
84198-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
84199+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
84200 {
84201 struct mm_struct *mm;
84202
84203diff --git a/mm/page-writeback.c b/mm/page-writeback.c
84204index 4514ad7..92eaa1c 100644
84205--- a/mm/page-writeback.c
84206+++ b/mm/page-writeback.c
84207@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
84208 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
84209 * - the bdi dirty thresh drops quickly due to change of JBOD workload
84210 */
84211-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
84212+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
84213 unsigned long thresh,
84214 unsigned long bg_thresh,
84215 unsigned long dirty,
84216@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
84217 }
84218 }
84219
84220-static struct notifier_block __cpuinitdata ratelimit_nb = {
84221+static struct notifier_block ratelimit_nb = {
84222 .notifier_call = ratelimit_handler,
84223 .next = NULL,
84224 };
84225diff --git a/mm/page_alloc.c b/mm/page_alloc.c
84226index c3edb62..2d60097 100644
84227--- a/mm/page_alloc.c
84228+++ b/mm/page_alloc.c
84229@@ -60,6 +60,7 @@
84230 #include <linux/page-debug-flags.h>
84231 #include <linux/hugetlb.h>
84232 #include <linux/sched/rt.h>
84233+#include <linux/random.h>
84234
84235 #include <asm/tlbflush.h>
84236 #include <asm/div64.h>
84237@@ -345,7 +346,7 @@ out:
84238 * This usage means that zero-order pages may not be compound.
84239 */
84240
84241-static void free_compound_page(struct page *page)
84242+void free_compound_page(struct page *page)
84243 {
84244 __free_pages_ok(page, compound_order(page));
84245 }
84246@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
84247 int i;
84248 int bad = 0;
84249
84250+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84251+ unsigned long index = 1UL << order;
84252+#endif
84253+
84254 trace_mm_page_free(page, order);
84255 kmemcheck_free_shadow(page, order);
84256
84257@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
84258 debug_check_no_obj_freed(page_address(page),
84259 PAGE_SIZE << order);
84260 }
84261+
84262+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84263+ for (; index; --index)
84264+ sanitize_highpage(page + index - 1);
84265+#endif
84266+
84267 arch_free_page(page, order);
84268 kernel_map_pages(page, 1 << order, 0);
84269
84270@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
84271 local_irq_restore(flags);
84272 }
84273
84274+#ifdef CONFIG_PAX_LATENT_ENTROPY
84275+bool __meminitdata extra_latent_entropy;
84276+
84277+static int __init setup_pax_extra_latent_entropy(char *str)
84278+{
84279+ extra_latent_entropy = true;
84280+ return 0;
84281+}
84282+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
84283+
84284+volatile u64 latent_entropy;
84285+#endif
84286+
84287 /*
84288 * Read access to zone->managed_pages is safe because it's unsigned long,
84289 * but we still need to serialize writers. Currently all callers of
84290@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
84291 set_page_count(p, 0);
84292 }
84293
84294+#ifdef CONFIG_PAX_LATENT_ENTROPY
84295+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
84296+ u64 hash = 0;
84297+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
84298+ const u64 *data = lowmem_page_address(page);
84299+
84300+ for (index = 0; index < end; index++)
84301+ hash ^= hash + data[index];
84302+ latent_entropy ^= hash;
84303+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84304+ }
84305+#endif
84306+
84307 page_zone(page)->managed_pages += 1 << order;
84308 set_page_refcounted(page);
84309 __free_pages(page, order);
84310@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
84311 arch_alloc_page(page, order);
84312 kernel_map_pages(page, 1 << order, 1);
84313
84314+#ifndef CONFIG_PAX_MEMORY_SANITIZE
84315 if (gfp_flags & __GFP_ZERO)
84316 prep_zero_page(page, order, gfp_flags);
84317+#endif
84318
84319 if (order && (gfp_flags & __GFP_COMP))
84320 prep_compound_page(page, order);
84321diff --git a/mm/page_io.c b/mm/page_io.c
84322index a8a3ef4..7260a60 100644
84323--- a/mm/page_io.c
84324+++ b/mm/page_io.c
84325@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
84326 struct file *swap_file = sis->swap_file;
84327 struct address_space *mapping = swap_file->f_mapping;
84328 struct iovec iov = {
84329- .iov_base = kmap(page),
84330+ .iov_base = (void __force_user *)kmap(page),
84331 .iov_len = PAGE_SIZE,
84332 };
84333
84334diff --git a/mm/percpu.c b/mm/percpu.c
84335index 8c8e08f..73a5cda 100644
84336--- a/mm/percpu.c
84337+++ b/mm/percpu.c
84338@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
84339 static unsigned int pcpu_high_unit_cpu __read_mostly;
84340
84341 /* the address of the first chunk which starts with the kernel static area */
84342-void *pcpu_base_addr __read_mostly;
84343+void *pcpu_base_addr __read_only;
84344 EXPORT_SYMBOL_GPL(pcpu_base_addr);
84345
84346 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
84347diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
84348index fd26d04..0cea1b0 100644
84349--- a/mm/process_vm_access.c
84350+++ b/mm/process_vm_access.c
84351@@ -13,6 +13,7 @@
84352 #include <linux/uio.h>
84353 #include <linux/sched.h>
84354 #include <linux/highmem.h>
84355+#include <linux/security.h>
84356 #include <linux/ptrace.h>
84357 #include <linux/slab.h>
84358 #include <linux/syscalls.h>
84359@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84360 size_t iov_l_curr_offset = 0;
84361 ssize_t iov_len;
84362
84363+ return -ENOSYS; // PaX: until properly audited
84364+
84365 /*
84366 * Work out how many pages of struct pages we're going to need
84367 * when eventually calling get_user_pages
84368 */
84369 for (i = 0; i < riovcnt; i++) {
84370 iov_len = rvec[i].iov_len;
84371- if (iov_len > 0) {
84372- nr_pages_iov = ((unsigned long)rvec[i].iov_base
84373- + iov_len)
84374- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
84375- / PAGE_SIZE + 1;
84376- nr_pages = max(nr_pages, nr_pages_iov);
84377- }
84378+ if (iov_len <= 0)
84379+ continue;
84380+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
84381+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
84382+ nr_pages = max(nr_pages, nr_pages_iov);
84383 }
84384
84385 if (nr_pages == 0)
84386@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84387 goto free_proc_pages;
84388 }
84389
84390+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
84391+ rc = -EPERM;
84392+ goto put_task_struct;
84393+ }
84394+
84395 mm = mm_access(task, PTRACE_MODE_ATTACH);
84396 if (!mm || IS_ERR(mm)) {
84397 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
84398diff --git a/mm/rmap.c b/mm/rmap.c
84399index 6280da8..ecce194 100644
84400--- a/mm/rmap.c
84401+++ b/mm/rmap.c
84402@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84403 struct anon_vma *anon_vma = vma->anon_vma;
84404 struct anon_vma_chain *avc;
84405
84406+#ifdef CONFIG_PAX_SEGMEXEC
84407+ struct anon_vma_chain *avc_m = NULL;
84408+#endif
84409+
84410 might_sleep();
84411 if (unlikely(!anon_vma)) {
84412 struct mm_struct *mm = vma->vm_mm;
84413@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84414 if (!avc)
84415 goto out_enomem;
84416
84417+#ifdef CONFIG_PAX_SEGMEXEC
84418+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
84419+ if (!avc_m)
84420+ goto out_enomem_free_avc;
84421+#endif
84422+
84423 anon_vma = find_mergeable_anon_vma(vma);
84424 allocated = NULL;
84425 if (!anon_vma) {
84426@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84427 /* page_table_lock to protect against threads */
84428 spin_lock(&mm->page_table_lock);
84429 if (likely(!vma->anon_vma)) {
84430+
84431+#ifdef CONFIG_PAX_SEGMEXEC
84432+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
84433+
84434+ if (vma_m) {
84435+ BUG_ON(vma_m->anon_vma);
84436+ vma_m->anon_vma = anon_vma;
84437+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
84438+ avc_m = NULL;
84439+ }
84440+#endif
84441+
84442 vma->anon_vma = anon_vma;
84443 anon_vma_chain_link(vma, avc, anon_vma);
84444 allocated = NULL;
84445@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84446
84447 if (unlikely(allocated))
84448 put_anon_vma(allocated);
84449+
84450+#ifdef CONFIG_PAX_SEGMEXEC
84451+ if (unlikely(avc_m))
84452+ anon_vma_chain_free(avc_m);
84453+#endif
84454+
84455 if (unlikely(avc))
84456 anon_vma_chain_free(avc);
84457 }
84458 return 0;
84459
84460 out_enomem_free_avc:
84461+
84462+#ifdef CONFIG_PAX_SEGMEXEC
84463+ if (avc_m)
84464+ anon_vma_chain_free(avc_m);
84465+#endif
84466+
84467 anon_vma_chain_free(avc);
84468 out_enomem:
84469 return -ENOMEM;
84470@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
84471 * Attach the anon_vmas from src to dst.
84472 * Returns 0 on success, -ENOMEM on failure.
84473 */
84474-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84475+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
84476 {
84477 struct anon_vma_chain *avc, *pavc;
84478 struct anon_vma *root = NULL;
84479@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84480 * the corresponding VMA in the parent process is attached to.
84481 * Returns 0 on success, non-zero on failure.
84482 */
84483-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
84484+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
84485 {
84486 struct anon_vma_chain *avc;
84487 struct anon_vma *anon_vma;
84488diff --git a/mm/shmem.c b/mm/shmem.c
84489index 5e6a842..b41916e 100644
84490--- a/mm/shmem.c
84491+++ b/mm/shmem.c
84492@@ -33,7 +33,7 @@
84493 #include <linux/swap.h>
84494 #include <linux/aio.h>
84495
84496-static struct vfsmount *shm_mnt;
84497+struct vfsmount *shm_mnt;
84498
84499 #ifdef CONFIG_SHMEM
84500 /*
84501@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
84502 #define BOGO_DIRENT_SIZE 20
84503
84504 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
84505-#define SHORT_SYMLINK_LEN 128
84506+#define SHORT_SYMLINK_LEN 64
84507
84508 /*
84509 * shmem_fallocate and shmem_writepage communicate via inode->i_private
84510@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
84511 static int shmem_xattr_validate(const char *name)
84512 {
84513 struct { const char *prefix; size_t len; } arr[] = {
84514+
84515+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84516+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
84517+#endif
84518+
84519 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
84520 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
84521 };
84522@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
84523 if (err)
84524 return err;
84525
84526+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84527+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
84528+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
84529+ return -EOPNOTSUPP;
84530+ if (size > 8)
84531+ return -EINVAL;
84532+ }
84533+#endif
84534+
84535 return simple_xattr_set(&info->xattrs, name, value, size, flags);
84536 }
84537
84538@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
84539 int err = -ENOMEM;
84540
84541 /* Round up to L1_CACHE_BYTES to resist false sharing */
84542- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
84543- L1_CACHE_BYTES), GFP_KERNEL);
84544+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
84545 if (!sbinfo)
84546 return -ENOMEM;
84547
84548diff --git a/mm/slab.c b/mm/slab.c
84549index 8ccd296..012fe4e 100644
84550--- a/mm/slab.c
84551+++ b/mm/slab.c
84552@@ -366,10 +366,10 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
84553 if ((x)->max_freeable < i) \
84554 (x)->max_freeable = i; \
84555 } while (0)
84556-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
84557-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
84558-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
84559-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
84560+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
84561+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
84562+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
84563+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
84564 #else
84565 #define STATS_INC_ACTIVE(x) do { } while (0)
84566 #define STATS_DEC_ACTIVE(x) do { } while (0)
84567@@ -477,7 +477,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
84568 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
84569 */
84570 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
84571- const struct slab *slab, void *obj)
84572+ const struct slab *slab, const void *obj)
84573 {
84574 u32 offset = (obj - slab->s_mem);
84575 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
84576@@ -1384,7 +1384,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
84577 return notifier_from_errno(err);
84578 }
84579
84580-static struct notifier_block __cpuinitdata cpucache_notifier = {
84581+static struct notifier_block cpucache_notifier = {
84582 &cpuup_callback, NULL, 0
84583 };
84584
84585@@ -1565,12 +1565,12 @@ void __init kmem_cache_init(void)
84586 */
84587
84588 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
84589- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
84590+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
84591
84592 if (INDEX_AC != INDEX_NODE)
84593 kmalloc_caches[INDEX_NODE] =
84594 create_kmalloc_cache("kmalloc-node",
84595- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
84596+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
84597
84598 slab_early_init = 0;
84599
84600@@ -3800,6 +3800,7 @@ void kfree(const void *objp)
84601
84602 if (unlikely(ZERO_OR_NULL_PTR(objp)))
84603 return;
84604+ VM_BUG_ON(!virt_addr_valid(objp));
84605 local_irq_save(flags);
84606 kfree_debugcheck(objp);
84607 c = virt_to_cache(objp);
84608@@ -4241,10 +4242,10 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
84609 }
84610 /* cpu stats */
84611 {
84612- unsigned long allochit = atomic_read(&cachep->allochit);
84613- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
84614- unsigned long freehit = atomic_read(&cachep->freehit);
84615- unsigned long freemiss = atomic_read(&cachep->freemiss);
84616+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
84617+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
84618+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
84619+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
84620
84621 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
84622 allochit, allocmiss, freehit, freemiss);
84623@@ -4476,13 +4477,71 @@ static const struct file_operations proc_slabstats_operations = {
84624 static int __init slab_proc_init(void)
84625 {
84626 #ifdef CONFIG_DEBUG_SLAB_LEAK
84627- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
84628+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
84629 #endif
84630 return 0;
84631 }
84632 module_init(slab_proc_init);
84633 #endif
84634
84635+bool is_usercopy_object(const void *ptr)
84636+{
84637+ struct page *page;
84638+ struct kmem_cache *cachep;
84639+
84640+ if (ZERO_OR_NULL_PTR(ptr))
84641+ return false;
84642+
84643+ if (!slab_is_available())
84644+ return false;
84645+
84646+ if (!virt_addr_valid(ptr))
84647+ return false;
84648+
84649+ page = virt_to_head_page(ptr);
84650+
84651+ if (!PageSlab(page))
84652+ return false;
84653+
84654+ cachep = page->slab_cache;
84655+ return cachep->flags & SLAB_USERCOPY;
84656+}
84657+
84658+#ifdef CONFIG_PAX_USERCOPY
84659+const char *check_heap_object(const void *ptr, unsigned long n)
84660+{
84661+ struct page *page;
84662+ struct kmem_cache *cachep;
84663+ struct slab *slabp;
84664+ unsigned int objnr;
84665+ unsigned long offset;
84666+
84667+ if (ZERO_OR_NULL_PTR(ptr))
84668+ return "<null>";
84669+
84670+ if (!virt_addr_valid(ptr))
84671+ return NULL;
84672+
84673+ page = virt_to_head_page(ptr);
84674+
84675+ if (!PageSlab(page))
84676+ return NULL;
84677+
84678+ cachep = page->slab_cache;
84679+ if (!(cachep->flags & SLAB_USERCOPY))
84680+ return cachep->name;
84681+
84682+ slabp = page->slab_page;
84683+ objnr = obj_to_index(cachep, slabp, ptr);
84684+ BUG_ON(objnr >= cachep->num);
84685+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
84686+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
84687+ return NULL;
84688+
84689+ return cachep->name;
84690+}
84691+#endif
84692+
84693 /**
84694 * ksize - get the actual amount of memory allocated for a given object
84695 * @objp: Pointer to the object
84696diff --git a/mm/slab.h b/mm/slab.h
84697index f96b49e..5634e90 100644
84698--- a/mm/slab.h
84699+++ b/mm/slab.h
84700@@ -67,7 +67,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84701
84702 /* Legal flag mask for kmem_cache_create(), for various configurations */
84703 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
84704- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
84705+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | SLAB_USERCOPY)
84706
84707 #if defined(CONFIG_DEBUG_SLAB)
84708 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
84709@@ -229,6 +229,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
84710 return s;
84711
84712 page = virt_to_head_page(x);
84713+
84714+ BUG_ON(!PageSlab(page));
84715+
84716 cachep = page->slab_cache;
84717 if (slab_equal_or_root(cachep, s))
84718 return cachep;
84719diff --git a/mm/slab_common.c b/mm/slab_common.c
84720index 2d41450..e22088e 100644
84721--- a/mm/slab_common.c
84722+++ b/mm/slab_common.c
84723@@ -22,7 +22,7 @@
84724
84725 #include "slab.h"
84726
84727-enum slab_state slab_state;
84728+enum slab_state slab_state __read_only;
84729 LIST_HEAD(slab_caches);
84730 DEFINE_MUTEX(slab_mutex);
84731 struct kmem_cache *kmem_cache;
84732@@ -209,7 +209,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
84733
84734 err = __kmem_cache_create(s, flags);
84735 if (!err) {
84736- s->refcount = 1;
84737+ atomic_set(&s->refcount, 1);
84738 list_add(&s->list, &slab_caches);
84739 memcg_cache_list_add(memcg, s);
84740 } else {
84741@@ -255,8 +255,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
84742
84743 get_online_cpus();
84744 mutex_lock(&slab_mutex);
84745- s->refcount--;
84746- if (!s->refcount) {
84747+ if (atomic_dec_and_test(&s->refcount)) {
84748 list_del(&s->list);
84749
84750 if (!__kmem_cache_shutdown(s)) {
84751@@ -302,7 +301,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
84752 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
84753 name, size, err);
84754
84755- s->refcount = -1; /* Exempt from merging for now */
84756+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
84757 }
84758
84759 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84760@@ -315,7 +314,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84761
84762 create_boot_cache(s, name, size, flags);
84763 list_add(&s->list, &slab_caches);
84764- s->refcount = 1;
84765+ atomic_set(&s->refcount, 1);
84766 return s;
84767 }
84768
84769@@ -327,6 +326,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
84770 EXPORT_SYMBOL(kmalloc_dma_caches);
84771 #endif
84772
84773+#ifdef CONFIG_PAX_USERCOPY_SLABS
84774+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
84775+EXPORT_SYMBOL(kmalloc_usercopy_caches);
84776+#endif
84777+
84778 /*
84779 * Conversion table for small slabs sizes / 8 to the index in the
84780 * kmalloc array. This is necessary for slabs < 192 since we have non power
84781@@ -391,6 +395,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
84782 return kmalloc_dma_caches[index];
84783
84784 #endif
84785+
84786+#ifdef CONFIG_PAX_USERCOPY_SLABS
84787+ if (unlikely((flags & GFP_USERCOPY)))
84788+ return kmalloc_usercopy_caches[index];
84789+
84790+#endif
84791+
84792 return kmalloc_caches[index];
84793 }
84794
84795@@ -447,7 +458,7 @@ void __init create_kmalloc_caches(unsigned long flags)
84796 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
84797 if (!kmalloc_caches[i]) {
84798 kmalloc_caches[i] = create_kmalloc_cache(NULL,
84799- 1 << i, flags);
84800+ 1 << i, SLAB_USERCOPY | flags);
84801 }
84802
84803 /*
84804@@ -456,10 +467,10 @@ void __init create_kmalloc_caches(unsigned long flags)
84805 * earlier power of two caches
84806 */
84807 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
84808- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
84809+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
84810
84811 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
84812- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
84813+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
84814 }
84815
84816 /* Kmalloc array is now usable */
84817@@ -492,6 +503,23 @@ void __init create_kmalloc_caches(unsigned long flags)
84818 }
84819 }
84820 #endif
84821+
84822+#ifdef CONFIG_PAX_USERCOPY_SLABS
84823+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
84824+ struct kmem_cache *s = kmalloc_caches[i];
84825+
84826+ if (s) {
84827+ int size = kmalloc_size(i);
84828+ char *n = kasprintf(GFP_NOWAIT,
84829+ "usercopy-kmalloc-%d", size);
84830+
84831+ BUG_ON(!n);
84832+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
84833+ size, SLAB_USERCOPY | flags);
84834+ }
84835+ }
84836+#endif
84837+
84838 }
84839 #endif /* !CONFIG_SLOB */
84840
84841diff --git a/mm/slob.c b/mm/slob.c
84842index eeed4a0..6ee34ec 100644
84843--- a/mm/slob.c
84844+++ b/mm/slob.c
84845@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
84846 /*
84847 * Return the size of a slob block.
84848 */
84849-static slobidx_t slob_units(slob_t *s)
84850+static slobidx_t slob_units(const slob_t *s)
84851 {
84852 if (s->units > 0)
84853 return s->units;
84854@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
84855 /*
84856 * Return the next free slob block pointer after this one.
84857 */
84858-static slob_t *slob_next(slob_t *s)
84859+static slob_t *slob_next(const slob_t *s)
84860 {
84861 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
84862 slobidx_t next;
84863@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
84864 /*
84865 * Returns true if s is the last free block in its page.
84866 */
84867-static int slob_last(slob_t *s)
84868+static int slob_last(const slob_t *s)
84869 {
84870 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
84871 }
84872
84873-static void *slob_new_pages(gfp_t gfp, int order, int node)
84874+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
84875 {
84876- void *page;
84877+ struct page *page;
84878
84879 #ifdef CONFIG_NUMA
84880 if (node != NUMA_NO_NODE)
84881@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
84882 if (!page)
84883 return NULL;
84884
84885- return page_address(page);
84886+ __SetPageSlab(page);
84887+ return page;
84888 }
84889
84890-static void slob_free_pages(void *b, int order)
84891+static void slob_free_pages(struct page *sp, int order)
84892 {
84893 if (current->reclaim_state)
84894 current->reclaim_state->reclaimed_slab += 1 << order;
84895- free_pages((unsigned long)b, order);
84896+ __ClearPageSlab(sp);
84897+ reset_page_mapcount(sp);
84898+ sp->private = 0;
84899+ __free_pages(sp, order);
84900 }
84901
84902 /*
84903@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
84904
84905 /* Not enough space: must allocate a new page */
84906 if (!b) {
84907- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84908- if (!b)
84909+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
84910+ if (!sp)
84911 return NULL;
84912- sp = virt_to_page(b);
84913- __SetPageSlab(sp);
84914+ b = page_address(sp);
84915
84916 spin_lock_irqsave(&slob_lock, flags);
84917 sp->units = SLOB_UNITS(PAGE_SIZE);
84918 sp->freelist = b;
84919+ sp->private = 0;
84920 INIT_LIST_HEAD(&sp->list);
84921 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
84922 set_slob_page_free(sp, slob_list);
84923@@ -359,9 +363,7 @@ static void slob_free(void *block, int size)
84924 if (slob_page_free(sp))
84925 clear_slob_page_free(sp);
84926 spin_unlock_irqrestore(&slob_lock, flags);
84927- __ClearPageSlab(sp);
84928- page_mapcount_reset(sp);
84929- slob_free_pages(b, 0);
84930+ slob_free_pages(sp, 0);
84931 return;
84932 }
84933
84934@@ -424,11 +426,10 @@ out:
84935 */
84936
84937 static __always_inline void *
84938-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84939+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
84940 {
84941- unsigned int *m;
84942- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84943- void *ret;
84944+ slob_t *m;
84945+ void *ret = NULL;
84946
84947 gfp &= gfp_allowed_mask;
84948
84949@@ -442,23 +443,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84950
84951 if (!m)
84952 return NULL;
84953- *m = size;
84954+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
84955+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
84956+ m[0].units = size;
84957+ m[1].units = align;
84958 ret = (void *)m + align;
84959
84960 trace_kmalloc_node(caller, ret,
84961 size, size + align, gfp, node);
84962 } else {
84963 unsigned int order = get_order(size);
84964+ struct page *page;
84965
84966 if (likely(order))
84967 gfp |= __GFP_COMP;
84968- ret = slob_new_pages(gfp, order, node);
84969+ page = slob_new_pages(gfp, order, node);
84970+ if (page) {
84971+ ret = page_address(page);
84972+ page->private = size;
84973+ }
84974
84975 trace_kmalloc_node(caller, ret,
84976 size, PAGE_SIZE << order, gfp, node);
84977 }
84978
84979- kmemleak_alloc(ret, size, 1, gfp);
84980+ return ret;
84981+}
84982+
84983+static __always_inline void *
84984+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
84985+{
84986+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
84987+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
84988+
84989+ if (!ZERO_OR_NULL_PTR(ret))
84990+ kmemleak_alloc(ret, size, 1, gfp);
84991 return ret;
84992 }
84993
84994@@ -493,34 +512,112 @@ void kfree(const void *block)
84995 return;
84996 kmemleak_free(block);
84997
84998+ VM_BUG_ON(!virt_addr_valid(block));
84999 sp = virt_to_page(block);
85000- if (PageSlab(sp)) {
85001+ VM_BUG_ON(!PageSlab(sp));
85002+ if (!sp->private) {
85003 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85004- unsigned int *m = (unsigned int *)(block - align);
85005- slob_free(m, *m + align);
85006- } else
85007+ slob_t *m = (slob_t *)(block - align);
85008+ slob_free(m, m[0].units + align);
85009+ } else {
85010+ __ClearPageSlab(sp);
85011+ reset_page_mapcount(sp);
85012+ sp->private = 0;
85013 __free_pages(sp, compound_order(sp));
85014+ }
85015 }
85016 EXPORT_SYMBOL(kfree);
85017
85018+bool is_usercopy_object(const void *ptr)
85019+{
85020+ if (!slab_is_available())
85021+ return false;
85022+
85023+ // PAX: TODO
85024+
85025+ return false;
85026+}
85027+
85028+#ifdef CONFIG_PAX_USERCOPY
85029+const char *check_heap_object(const void *ptr, unsigned long n)
85030+{
85031+ struct page *page;
85032+ const slob_t *free;
85033+ const void *base;
85034+ unsigned long flags;
85035+
85036+ if (ZERO_OR_NULL_PTR(ptr))
85037+ return "<null>";
85038+
85039+ if (!virt_addr_valid(ptr))
85040+ return NULL;
85041+
85042+ page = virt_to_head_page(ptr);
85043+ if (!PageSlab(page))
85044+ return NULL;
85045+
85046+ if (page->private) {
85047+ base = page;
85048+ if (base <= ptr && n <= page->private - (ptr - base))
85049+ return NULL;
85050+ return "<slob>";
85051+ }
85052+
85053+ /* some tricky double walking to find the chunk */
85054+ spin_lock_irqsave(&slob_lock, flags);
85055+ base = (void *)((unsigned long)ptr & PAGE_MASK);
85056+ free = page->freelist;
85057+
85058+ while (!slob_last(free) && (void *)free <= ptr) {
85059+ base = free + slob_units(free);
85060+ free = slob_next(free);
85061+ }
85062+
85063+ while (base < (void *)free) {
85064+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
85065+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
85066+ int offset;
85067+
85068+ if (ptr < base + align)
85069+ break;
85070+
85071+ offset = ptr - base - align;
85072+ if (offset >= m) {
85073+ base += size;
85074+ continue;
85075+ }
85076+
85077+ if (n > m - offset)
85078+ break;
85079+
85080+ spin_unlock_irqrestore(&slob_lock, flags);
85081+ return NULL;
85082+ }
85083+
85084+ spin_unlock_irqrestore(&slob_lock, flags);
85085+ return "<slob>";
85086+}
85087+#endif
85088+
85089 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
85090 size_t ksize(const void *block)
85091 {
85092 struct page *sp;
85093 int align;
85094- unsigned int *m;
85095+ slob_t *m;
85096
85097 BUG_ON(!block);
85098 if (unlikely(block == ZERO_SIZE_PTR))
85099 return 0;
85100
85101 sp = virt_to_page(block);
85102- if (unlikely(!PageSlab(sp)))
85103- return PAGE_SIZE << compound_order(sp);
85104+ VM_BUG_ON(!PageSlab(sp));
85105+ if (sp->private)
85106+ return sp->private;
85107
85108 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85109- m = (unsigned int *)(block - align);
85110- return SLOB_UNITS(*m) * SLOB_UNIT;
85111+ m = (slob_t *)(block - align);
85112+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
85113 }
85114 EXPORT_SYMBOL(ksize);
85115
85116@@ -536,23 +633,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
85117
85118 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
85119 {
85120- void *b;
85121+ void *b = NULL;
85122
85123 flags &= gfp_allowed_mask;
85124
85125 lockdep_trace_alloc(flags);
85126
85127+#ifdef CONFIG_PAX_USERCOPY_SLABS
85128+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
85129+#else
85130 if (c->size < PAGE_SIZE) {
85131 b = slob_alloc(c->size, flags, c->align, node);
85132 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
85133 SLOB_UNITS(c->size) * SLOB_UNIT,
85134 flags, node);
85135 } else {
85136- b = slob_new_pages(flags, get_order(c->size), node);
85137+ struct page *sp;
85138+
85139+ sp = slob_new_pages(flags, get_order(c->size), node);
85140+ if (sp) {
85141+ b = page_address(sp);
85142+ sp->private = c->size;
85143+ }
85144 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
85145 PAGE_SIZE << get_order(c->size),
85146 flags, node);
85147 }
85148+#endif
85149
85150 if (c->ctor)
85151 c->ctor(b);
85152@@ -564,10 +671,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
85153
85154 static void __kmem_cache_free(void *b, int size)
85155 {
85156- if (size < PAGE_SIZE)
85157+ struct page *sp;
85158+
85159+ sp = virt_to_page(b);
85160+ BUG_ON(!PageSlab(sp));
85161+ if (!sp->private)
85162 slob_free(b, size);
85163 else
85164- slob_free_pages(b, get_order(size));
85165+ slob_free_pages(sp, get_order(size));
85166 }
85167
85168 static void kmem_rcu_free(struct rcu_head *head)
85169@@ -580,17 +691,31 @@ static void kmem_rcu_free(struct rcu_head *head)
85170
85171 void kmem_cache_free(struct kmem_cache *c, void *b)
85172 {
85173+ int size = c->size;
85174+
85175+#ifdef CONFIG_PAX_USERCOPY_SLABS
85176+ if (size + c->align < PAGE_SIZE) {
85177+ size += c->align;
85178+ b -= c->align;
85179+ }
85180+#endif
85181+
85182 kmemleak_free_recursive(b, c->flags);
85183 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
85184 struct slob_rcu *slob_rcu;
85185- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
85186- slob_rcu->size = c->size;
85187+ slob_rcu = b + (size - sizeof(struct slob_rcu));
85188+ slob_rcu->size = size;
85189 call_rcu(&slob_rcu->head, kmem_rcu_free);
85190 } else {
85191- __kmem_cache_free(b, c->size);
85192+ __kmem_cache_free(b, size);
85193 }
85194
85195+#ifdef CONFIG_PAX_USERCOPY_SLABS
85196+ trace_kfree(_RET_IP_, b);
85197+#else
85198 trace_kmem_cache_free(_RET_IP_, b);
85199+#endif
85200+
85201 }
85202 EXPORT_SYMBOL(kmem_cache_free);
85203
85204diff --git a/mm/slub.c b/mm/slub.c
85205index 57707f0..c28619b 100644
85206--- a/mm/slub.c
85207+++ b/mm/slub.c
85208@@ -198,7 +198,7 @@ struct track {
85209
85210 enum track_item { TRACK_ALLOC, TRACK_FREE };
85211
85212-#ifdef CONFIG_SYSFS
85213+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85214 static int sysfs_slab_add(struct kmem_cache *);
85215 static int sysfs_slab_alias(struct kmem_cache *, const char *);
85216 static void sysfs_slab_remove(struct kmem_cache *);
85217@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
85218 if (!t->addr)
85219 return;
85220
85221- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
85222+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
85223 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
85224 #ifdef CONFIG_STACKTRACE
85225 {
85226@@ -2661,7 +2661,7 @@ static int slub_min_objects;
85227 * Merge control. If this is set then no merging of slab caches will occur.
85228 * (Could be removed. This was introduced to pacify the merge skeptics.)
85229 */
85230-static int slub_nomerge;
85231+static int slub_nomerge = 1;
85232
85233 /*
85234 * Calculate the order of allocation given an slab object size.
85235@@ -3283,6 +3283,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
85236 EXPORT_SYMBOL(__kmalloc_node);
85237 #endif
85238
85239+bool is_usercopy_object(const void *ptr)
85240+{
85241+ struct page *page;
85242+ struct kmem_cache *s;
85243+
85244+ if (ZERO_OR_NULL_PTR(ptr))
85245+ return false;
85246+
85247+ if (!slab_is_available())
85248+ return false;
85249+
85250+ if (!virt_addr_valid(ptr))
85251+ return false;
85252+
85253+ page = virt_to_head_page(ptr);
85254+
85255+ if (!PageSlab(page))
85256+ return false;
85257+
85258+ s = page->slab_cache;
85259+ return s->flags & SLAB_USERCOPY;
85260+}
85261+
85262+#ifdef CONFIG_PAX_USERCOPY
85263+const char *check_heap_object(const void *ptr, unsigned long n)
85264+{
85265+ struct page *page;
85266+ struct kmem_cache *s;
85267+ unsigned long offset;
85268+
85269+ if (ZERO_OR_NULL_PTR(ptr))
85270+ return "<null>";
85271+
85272+ if (!virt_addr_valid(ptr))
85273+ return NULL;
85274+
85275+ page = virt_to_head_page(ptr);
85276+
85277+ if (!PageSlab(page))
85278+ return NULL;
85279+
85280+ s = page->slab_cache;
85281+ if (!(s->flags & SLAB_USERCOPY))
85282+ return s->name;
85283+
85284+ offset = (ptr - page_address(page)) % s->size;
85285+ if (offset <= s->object_size && n <= s->object_size - offset)
85286+ return NULL;
85287+
85288+ return s->name;
85289+}
85290+#endif
85291+
85292 size_t ksize(const void *object)
85293 {
85294 struct page *page;
85295@@ -3347,6 +3400,7 @@ void kfree(const void *x)
85296 if (unlikely(ZERO_OR_NULL_PTR(x)))
85297 return;
85298
85299+ VM_BUG_ON(!virt_addr_valid(x));
85300 page = virt_to_head_page(x);
85301 if (unlikely(!PageSlab(page))) {
85302 BUG_ON(!PageCompound(page));
85303@@ -3652,7 +3706,7 @@ static int slab_unmergeable(struct kmem_cache *s)
85304 /*
85305 * We may have set a slab to be unmergeable during bootstrap.
85306 */
85307- if (s->refcount < 0)
85308+ if (atomic_read(&s->refcount) < 0)
85309 return 1;
85310
85311 return 0;
85312@@ -3710,7 +3764,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85313
85314 s = find_mergeable(memcg, size, align, flags, name, ctor);
85315 if (s) {
85316- s->refcount++;
85317+ atomic_inc(&s->refcount);
85318 /*
85319 * Adjust the object sizes so that we clear
85320 * the complete object on kzalloc.
85321@@ -3719,7 +3773,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85322 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
85323
85324 if (sysfs_slab_alias(s, name)) {
85325- s->refcount--;
85326+ atomic_dec(&s->refcount);
85327 s = NULL;
85328 }
85329 }
85330@@ -3781,7 +3835,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
85331 return NOTIFY_OK;
85332 }
85333
85334-static struct notifier_block __cpuinitdata slab_notifier = {
85335+static struct notifier_block slab_notifier = {
85336 .notifier_call = slab_cpuup_callback
85337 };
85338
85339@@ -3839,7 +3893,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
85340 }
85341 #endif
85342
85343-#ifdef CONFIG_SYSFS
85344+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85345 static int count_inuse(struct page *page)
85346 {
85347 return page->inuse;
85348@@ -4226,12 +4280,12 @@ static void resiliency_test(void)
85349 validate_slab_cache(kmalloc_caches[9]);
85350 }
85351 #else
85352-#ifdef CONFIG_SYSFS
85353+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85354 static void resiliency_test(void) {};
85355 #endif
85356 #endif
85357
85358-#ifdef CONFIG_SYSFS
85359+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85360 enum slab_stat_type {
85361 SL_ALL, /* All slabs */
85362 SL_PARTIAL, /* Only partially allocated slabs */
85363@@ -4475,7 +4529,7 @@ SLAB_ATTR_RO(ctor);
85364
85365 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
85366 {
85367- return sprintf(buf, "%d\n", s->refcount - 1);
85368+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
85369 }
85370 SLAB_ATTR_RO(aliases);
85371
85372@@ -4563,6 +4617,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
85373 SLAB_ATTR_RO(cache_dma);
85374 #endif
85375
85376+#ifdef CONFIG_PAX_USERCOPY_SLABS
85377+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
85378+{
85379+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
85380+}
85381+SLAB_ATTR_RO(usercopy);
85382+#endif
85383+
85384 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
85385 {
85386 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
85387@@ -4897,6 +4959,9 @@ static struct attribute *slab_attrs[] = {
85388 #ifdef CONFIG_ZONE_DMA
85389 &cache_dma_attr.attr,
85390 #endif
85391+#ifdef CONFIG_PAX_USERCOPY_SLABS
85392+ &usercopy_attr.attr,
85393+#endif
85394 #ifdef CONFIG_NUMA
85395 &remote_node_defrag_ratio_attr.attr,
85396 #endif
85397@@ -5128,6 +5193,7 @@ static char *create_unique_id(struct kmem_cache *s)
85398 return name;
85399 }
85400
85401+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85402 static int sysfs_slab_add(struct kmem_cache *s)
85403 {
85404 int err;
85405@@ -5151,7 +5217,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
85406 }
85407
85408 s->kobj.kset = slab_kset;
85409- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
85410+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
85411 if (err) {
85412 kobject_put(&s->kobj);
85413 return err;
85414@@ -5185,6 +5251,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
85415 kobject_del(&s->kobj);
85416 kobject_put(&s->kobj);
85417 }
85418+#endif
85419
85420 /*
85421 * Need to buffer aliases during bootup until sysfs becomes
85422@@ -5198,6 +5265,7 @@ struct saved_alias {
85423
85424 static struct saved_alias *alias_list;
85425
85426+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85427 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85428 {
85429 struct saved_alias *al;
85430@@ -5220,6 +5288,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85431 alias_list = al;
85432 return 0;
85433 }
85434+#endif
85435
85436 static int __init slab_sysfs_init(void)
85437 {
85438diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
85439index 27eeab3..7c3f7f2 100644
85440--- a/mm/sparse-vmemmap.c
85441+++ b/mm/sparse-vmemmap.c
85442@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
85443 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85444 if (!p)
85445 return NULL;
85446- pud_populate(&init_mm, pud, p);
85447+ pud_populate_kernel(&init_mm, pud, p);
85448 }
85449 return pud;
85450 }
85451@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
85452 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85453 if (!p)
85454 return NULL;
85455- pgd_populate(&init_mm, pgd, p);
85456+ pgd_populate_kernel(&init_mm, pgd, p);
85457 }
85458 return pgd;
85459 }
85460diff --git a/mm/sparse.c b/mm/sparse.c
85461index 1c91f0d3..485470a 100644
85462--- a/mm/sparse.c
85463+++ b/mm/sparse.c
85464@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
85465
85466 for (i = 0; i < PAGES_PER_SECTION; i++) {
85467 if (PageHWPoison(&memmap[i])) {
85468- atomic_long_sub(1, &num_poisoned_pages);
85469+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
85470 ClearPageHWPoison(&memmap[i]);
85471 }
85472 }
85473diff --git a/mm/swap.c b/mm/swap.c
85474index dfd7d71..ccdf688 100644
85475--- a/mm/swap.c
85476+++ b/mm/swap.c
85477@@ -31,6 +31,7 @@
85478 #include <linux/memcontrol.h>
85479 #include <linux/gfp.h>
85480 #include <linux/uio.h>
85481+#include <linux/hugetlb.h>
85482
85483 #include "internal.h"
85484
85485@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
85486
85487 __page_cache_release(page);
85488 dtor = get_compound_page_dtor(page);
85489+ if (!PageHuge(page))
85490+ BUG_ON(dtor != free_compound_page);
85491 (*dtor)(page);
85492 }
85493
85494diff --git a/mm/swapfile.c b/mm/swapfile.c
85495index 746af55b..7ac94ae 100644
85496--- a/mm/swapfile.c
85497+++ b/mm/swapfile.c
85498@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
85499
85500 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
85501 /* Activity counter to indicate that a swapon or swapoff has occurred */
85502-static atomic_t proc_poll_event = ATOMIC_INIT(0);
85503+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
85504
85505 static inline unsigned char swap_count(unsigned char ent)
85506 {
85507@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
85508 }
85509 filp_close(swap_file, NULL);
85510 err = 0;
85511- atomic_inc(&proc_poll_event);
85512+ atomic_inc_unchecked(&proc_poll_event);
85513 wake_up_interruptible(&proc_poll_wait);
85514
85515 out_dput:
85516@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
85517
85518 poll_wait(file, &proc_poll_wait, wait);
85519
85520- if (seq->poll_event != atomic_read(&proc_poll_event)) {
85521- seq->poll_event = atomic_read(&proc_poll_event);
85522+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
85523+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85524 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
85525 }
85526
85527@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
85528 return ret;
85529
85530 seq = file->private_data;
85531- seq->poll_event = atomic_read(&proc_poll_event);
85532+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85533 return 0;
85534 }
85535
85536@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
85537 (frontswap_map) ? "FS" : "");
85538
85539 mutex_unlock(&swapon_mutex);
85540- atomic_inc(&proc_poll_event);
85541+ atomic_inc_unchecked(&proc_poll_event);
85542 wake_up_interruptible(&proc_poll_wait);
85543
85544 if (S_ISREG(inode->i_mode))
85545diff --git a/mm/util.c b/mm/util.c
85546index ab1424d..7c5bd5a 100644
85547--- a/mm/util.c
85548+++ b/mm/util.c
85549@@ -294,6 +294,12 @@ done:
85550 void arch_pick_mmap_layout(struct mm_struct *mm)
85551 {
85552 mm->mmap_base = TASK_UNMAPPED_BASE;
85553+
85554+#ifdef CONFIG_PAX_RANDMMAP
85555+ if (mm->pax_flags & MF_PAX_RANDMMAP)
85556+ mm->mmap_base += mm->delta_mmap;
85557+#endif
85558+
85559 mm->get_unmapped_area = arch_get_unmapped_area;
85560 mm->unmap_area = arch_unmap_area;
85561 }
85562diff --git a/mm/vmalloc.c b/mm/vmalloc.c
85563index d365724..6cae7c2 100644
85564--- a/mm/vmalloc.c
85565+++ b/mm/vmalloc.c
85566@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
85567
85568 pte = pte_offset_kernel(pmd, addr);
85569 do {
85570- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85571- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85572+
85573+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85574+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
85575+ BUG_ON(!pte_exec(*pte));
85576+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
85577+ continue;
85578+ }
85579+#endif
85580+
85581+ {
85582+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85583+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85584+ }
85585 } while (pte++, addr += PAGE_SIZE, addr != end);
85586 }
85587
85588@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
85589 pte = pte_alloc_kernel(pmd, addr);
85590 if (!pte)
85591 return -ENOMEM;
85592+
85593+ pax_open_kernel();
85594 do {
85595 struct page *page = pages[*nr];
85596
85597- if (WARN_ON(!pte_none(*pte)))
85598+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85599+ if (pgprot_val(prot) & _PAGE_NX)
85600+#endif
85601+
85602+ if (!pte_none(*pte)) {
85603+ pax_close_kernel();
85604+ WARN_ON(1);
85605 return -EBUSY;
85606- if (WARN_ON(!page))
85607+ }
85608+ if (!page) {
85609+ pax_close_kernel();
85610+ WARN_ON(1);
85611 return -ENOMEM;
85612+ }
85613 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
85614 (*nr)++;
85615 } while (pte++, addr += PAGE_SIZE, addr != end);
85616+ pax_close_kernel();
85617 return 0;
85618 }
85619
85620@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
85621 pmd_t *pmd;
85622 unsigned long next;
85623
85624- pmd = pmd_alloc(&init_mm, pud, addr);
85625+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85626 if (!pmd)
85627 return -ENOMEM;
85628 do {
85629@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
85630 pud_t *pud;
85631 unsigned long next;
85632
85633- pud = pud_alloc(&init_mm, pgd, addr);
85634+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85635 if (!pud)
85636 return -ENOMEM;
85637 do {
85638@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
85639 if (addr >= MODULES_VADDR && addr < MODULES_END)
85640 return 1;
85641 #endif
85642+
85643+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85644+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
85645+ return 1;
85646+#endif
85647+
85648 return is_vmalloc_addr(x);
85649 }
85650
85651@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
85652
85653 if (!pgd_none(*pgd)) {
85654 pud_t *pud = pud_offset(pgd, addr);
85655+#ifdef CONFIG_X86
85656+ if (!pud_large(*pud))
85657+#endif
85658 if (!pud_none(*pud)) {
85659 pmd_t *pmd = pmd_offset(pud, addr);
85660+#ifdef CONFIG_X86
85661+ if (!pmd_large(*pmd))
85662+#endif
85663 if (!pmd_none(*pmd)) {
85664 pte_t *ptep, pte;
85665
85666@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
85667 * Allocate a region of KVA of the specified size and alignment, within the
85668 * vstart and vend.
85669 */
85670-static struct vmap_area *alloc_vmap_area(unsigned long size,
85671+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
85672 unsigned long align,
85673 unsigned long vstart, unsigned long vend,
85674 int node, gfp_t gfp_mask)
85675@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
85676 struct vm_struct *area;
85677
85678 BUG_ON(in_interrupt());
85679+
85680+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85681+ if (flags & VM_KERNEXEC) {
85682+ if (start != VMALLOC_START || end != VMALLOC_END)
85683+ return NULL;
85684+ start = (unsigned long)MODULES_EXEC_VADDR;
85685+ end = (unsigned long)MODULES_EXEC_END;
85686+ }
85687+#endif
85688+
85689 if (flags & VM_IOREMAP) {
85690 int bit = fls(size);
85691
85692@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
85693 if (count > totalram_pages)
85694 return NULL;
85695
85696+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85697+ if (!(pgprot_val(prot) & _PAGE_NX))
85698+ flags |= VM_KERNEXEC;
85699+#endif
85700+
85701 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
85702 __builtin_return_address(0));
85703 if (!area)
85704@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
85705 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
85706 goto fail;
85707
85708+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85709+ if (!(pgprot_val(prot) & _PAGE_NX))
85710+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
85711+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
85712+ else
85713+#endif
85714+
85715 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
85716 start, end, node, gfp_mask, caller);
85717 if (!area)
85718@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
85719 * For tight control over page level allocator and protection flags
85720 * use __vmalloc() instead.
85721 */
85722-
85723 void *vmalloc_exec(unsigned long size)
85724 {
85725- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
85726+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
85727 NUMA_NO_NODE, __builtin_return_address(0));
85728 }
85729
85730@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
85731 unsigned long uaddr = vma->vm_start;
85732 unsigned long usize = vma->vm_end - vma->vm_start;
85733
85734+ BUG_ON(vma->vm_mirror);
85735+
85736 if ((PAGE_SIZE-1) & (unsigned long)addr)
85737 return -EINVAL;
85738
85739@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
85740 v->addr, v->addr + v->size, v->size);
85741
85742 if (v->caller)
85743+#ifdef CONFIG_GRKERNSEC_HIDESYM
85744+ seq_printf(m, " %pK", v->caller);
85745+#else
85746 seq_printf(m, " %pS", v->caller);
85747+#endif
85748
85749 if (v->nr_pages)
85750 seq_printf(m, " pages=%d", v->nr_pages);
85751diff --git a/mm/vmstat.c b/mm/vmstat.c
85752index f42745e..62f8346 100644
85753--- a/mm/vmstat.c
85754+++ b/mm/vmstat.c
85755@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
85756 *
85757 * vm_stat contains the global counters
85758 */
85759-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85760+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85761 EXPORT_SYMBOL(vm_stat);
85762
85763 #ifdef CONFIG_SMP
85764@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
85765 v = p->vm_stat_diff[i];
85766 p->vm_stat_diff[i] = 0;
85767 local_irq_restore(flags);
85768- atomic_long_add(v, &zone->vm_stat[i]);
85769+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85770 global_diff[i] += v;
85771 #ifdef CONFIG_NUMA
85772 /* 3 seconds idle till flush */
85773@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
85774
85775 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
85776 if (global_diff[i])
85777- atomic_long_add(global_diff[i], &vm_stat[i]);
85778+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
85779 }
85780
85781 /*
85782@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
85783 if (pset->vm_stat_diff[i]) {
85784 int v = pset->vm_stat_diff[i];
85785 pset->vm_stat_diff[i] = 0;
85786- atomic_long_add(v, &zone->vm_stat[i]);
85787- atomic_long_add(v, &vm_stat[i]);
85788+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
85789+ atomic_long_add_unchecked(v, &vm_stat[i]);
85790 }
85791 }
85792 #endif
85793@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
85794 return NOTIFY_OK;
85795 }
85796
85797-static struct notifier_block __cpuinitdata vmstat_notifier =
85798+static struct notifier_block vmstat_notifier =
85799 { &vmstat_cpuup_callback, NULL, 0 };
85800 #endif
85801
85802@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
85803 start_cpu_timer(cpu);
85804 #endif
85805 #ifdef CONFIG_PROC_FS
85806- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
85807- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
85808- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
85809- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
85810+ {
85811+ mode_t gr_mode = S_IRUGO;
85812+#ifdef CONFIG_GRKERNSEC_PROC_ADD
85813+ gr_mode = S_IRUSR;
85814+#endif
85815+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
85816+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
85817+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
85818+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
85819+#else
85820+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
85821+#endif
85822+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
85823+ }
85824 #endif
85825 return 0;
85826 }
85827diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
85828index 9424f37..6aabf19 100644
85829--- a/net/8021q/vlan.c
85830+++ b/net/8021q/vlan.c
85831@@ -469,7 +469,7 @@ out:
85832 return NOTIFY_DONE;
85833 }
85834
85835-static struct notifier_block vlan_notifier_block __read_mostly = {
85836+static struct notifier_block vlan_notifier_block = {
85837 .notifier_call = vlan_device_event,
85838 };
85839
85840@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
85841 err = -EPERM;
85842 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
85843 break;
85844- if ((args.u.name_type >= 0) &&
85845- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
85846+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
85847 struct vlan_net *vn;
85848
85849 vn = net_generic(net, vlan_net_id);
85850diff --git a/net/9p/mod.c b/net/9p/mod.c
85851index 6ab36ae..6f1841b 100644
85852--- a/net/9p/mod.c
85853+++ b/net/9p/mod.c
85854@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
85855 void v9fs_register_trans(struct p9_trans_module *m)
85856 {
85857 spin_lock(&v9fs_trans_lock);
85858- list_add_tail(&m->list, &v9fs_trans_list);
85859+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
85860 spin_unlock(&v9fs_trans_lock);
85861 }
85862 EXPORT_SYMBOL(v9fs_register_trans);
85863@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
85864 void v9fs_unregister_trans(struct p9_trans_module *m)
85865 {
85866 spin_lock(&v9fs_trans_lock);
85867- list_del_init(&m->list);
85868+ pax_list_del_init((struct list_head *)&m->list);
85869 spin_unlock(&v9fs_trans_lock);
85870 }
85871 EXPORT_SYMBOL(v9fs_unregister_trans);
85872diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
85873index 02efb25..41541a9 100644
85874--- a/net/9p/trans_fd.c
85875+++ b/net/9p/trans_fd.c
85876@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
85877 oldfs = get_fs();
85878 set_fs(get_ds());
85879 /* The cast to a user pointer is valid due to the set_fs() */
85880- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
85881+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
85882 set_fs(oldfs);
85883
85884 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
85885diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
85886index 876fbe8..8bbea9f 100644
85887--- a/net/atm/atm_misc.c
85888+++ b/net/atm/atm_misc.c
85889@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
85890 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
85891 return 1;
85892 atm_return(vcc, truesize);
85893- atomic_inc(&vcc->stats->rx_drop);
85894+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85895 return 0;
85896 }
85897 EXPORT_SYMBOL(atm_charge);
85898@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
85899 }
85900 }
85901 atm_return(vcc, guess);
85902- atomic_inc(&vcc->stats->rx_drop);
85903+ atomic_inc_unchecked(&vcc->stats->rx_drop);
85904 return NULL;
85905 }
85906 EXPORT_SYMBOL(atm_alloc_charge);
85907@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
85908
85909 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85910 {
85911-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85912+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85913 __SONET_ITEMS
85914 #undef __HANDLE_ITEM
85915 }
85916@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
85917
85918 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
85919 {
85920-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85921+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
85922 __SONET_ITEMS
85923 #undef __HANDLE_ITEM
85924 }
85925diff --git a/net/atm/lec.h b/net/atm/lec.h
85926index 4149db1..f2ab682 100644
85927--- a/net/atm/lec.h
85928+++ b/net/atm/lec.h
85929@@ -48,7 +48,7 @@ struct lane2_ops {
85930 const u8 *tlvs, u32 sizeoftlvs);
85931 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
85932 const u8 *tlvs, u32 sizeoftlvs);
85933-};
85934+} __no_const;
85935
85936 /*
85937 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
85938diff --git a/net/atm/proc.c b/net/atm/proc.c
85939index bbb6461..cf04016 100644
85940--- a/net/atm/proc.c
85941+++ b/net/atm/proc.c
85942@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
85943 const struct k_atm_aal_stats *stats)
85944 {
85945 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
85946- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
85947- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
85948- atomic_read(&stats->rx_drop));
85949+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
85950+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
85951+ atomic_read_unchecked(&stats->rx_drop));
85952 }
85953
85954 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
85955diff --git a/net/atm/resources.c b/net/atm/resources.c
85956index 0447d5d..3cf4728 100644
85957--- a/net/atm/resources.c
85958+++ b/net/atm/resources.c
85959@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
85960 static void copy_aal_stats(struct k_atm_aal_stats *from,
85961 struct atm_aal_stats *to)
85962 {
85963-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
85964+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
85965 __AAL_STAT_ITEMS
85966 #undef __HANDLE_ITEM
85967 }
85968@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
85969 static void subtract_aal_stats(struct k_atm_aal_stats *from,
85970 struct atm_aal_stats *to)
85971 {
85972-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
85973+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
85974 __AAL_STAT_ITEMS
85975 #undef __HANDLE_ITEM
85976 }
85977diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
85978index d5744b7..506bae3 100644
85979--- a/net/ax25/sysctl_net_ax25.c
85980+++ b/net/ax25/sysctl_net_ax25.c
85981@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
85982 {
85983 char path[sizeof("net/ax25/") + IFNAMSIZ];
85984 int k;
85985- struct ctl_table *table;
85986+ ctl_table_no_const *table;
85987
85988 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
85989 if (!table)
85990diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
85991index f680ee1..97e3542 100644
85992--- a/net/batman-adv/bat_iv_ogm.c
85993+++ b/net/batman-adv/bat_iv_ogm.c
85994@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
85995
85996 /* randomize initial seqno to avoid collision */
85997 get_random_bytes(&random_seqno, sizeof(random_seqno));
85998- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
85999+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
86000
86001 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
86002 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
86003@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
86004 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
86005
86006 /* change sequence number to network order */
86007- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
86008+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
86009 batadv_ogm_packet->seqno = htonl(seqno);
86010- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
86011+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
86012
86013 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
86014 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
86015@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
86016 return;
86017
86018 /* could be changed by schedule_own_packet() */
86019- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
86020+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
86021
86022 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
86023 has_directlink_flag = 1;
86024diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
86025index 522243a..b48c0ef 100644
86026--- a/net/batman-adv/hard-interface.c
86027+++ b/net/batman-adv/hard-interface.c
86028@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
86029 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
86030 dev_add_pack(&hard_iface->batman_adv_ptype);
86031
86032- atomic_set(&hard_iface->frag_seqno, 1);
86033+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
86034 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
86035 hard_iface->net_dev->name);
86036
86037@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
86038 /* This can't be called via a bat_priv callback because
86039 * we have no bat_priv yet.
86040 */
86041- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
86042+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
86043 hard_iface->bat_iv.ogm_buff = NULL;
86044
86045 return hard_iface;
86046diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
86047index 819dfb0..9a672d1 100644
86048--- a/net/batman-adv/soft-interface.c
86049+++ b/net/batman-adv/soft-interface.c
86050@@ -253,7 +253,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
86051 primary_if->net_dev->dev_addr, ETH_ALEN);
86052
86053 /* set broadcast sequence number */
86054- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
86055+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
86056 bcast_packet->seqno = htonl(seqno);
86057
86058 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
86059@@ -472,7 +472,7 @@ static int batadv_softif_init_late(struct net_device *dev)
86060 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
86061
86062 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
86063- atomic_set(&bat_priv->bcast_seqno, 1);
86064+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
86065 atomic_set(&bat_priv->tt.vn, 0);
86066 atomic_set(&bat_priv->tt.local_changes, 0);
86067 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
86068diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
86069index aba8364..50fcbb8 100644
86070--- a/net/batman-adv/types.h
86071+++ b/net/batman-adv/types.h
86072@@ -51,7 +51,7 @@
86073 struct batadv_hard_iface_bat_iv {
86074 unsigned char *ogm_buff;
86075 int ogm_buff_len;
86076- atomic_t ogm_seqno;
86077+ atomic_unchecked_t ogm_seqno;
86078 };
86079
86080 /**
86081@@ -75,7 +75,7 @@ struct batadv_hard_iface {
86082 int16_t if_num;
86083 char if_status;
86084 struct net_device *net_dev;
86085- atomic_t frag_seqno;
86086+ atomic_unchecked_t frag_seqno;
86087 struct kobject *hardif_obj;
86088 atomic_t refcount;
86089 struct packet_type batman_adv_ptype;
86090@@ -558,7 +558,7 @@ struct batadv_priv {
86091 #ifdef CONFIG_BATMAN_ADV_DEBUG
86092 atomic_t log_level;
86093 #endif
86094- atomic_t bcast_seqno;
86095+ atomic_unchecked_t bcast_seqno;
86096 atomic_t bcast_queue_left;
86097 atomic_t batman_queue_left;
86098 char num_ifaces;
86099diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
86100index 0bb3b59..ffcbf2f 100644
86101--- a/net/batman-adv/unicast.c
86102+++ b/net/batman-adv/unicast.c
86103@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
86104 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
86105 frag2->flags = large_tail;
86106
86107- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
86108+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
86109 frag1->seqno = htons(seqno - 1);
86110 frag2->seqno = htons(seqno);
86111
86112diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
86113index ace5e55..a65a1c0 100644
86114--- a/net/bluetooth/hci_core.c
86115+++ b/net/bluetooth/hci_core.c
86116@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
86117 list_add(&hdev->list, &hci_dev_list);
86118 write_unlock(&hci_dev_list_lock);
86119
86120- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
86121- WQ_MEM_RECLAIM, 1);
86122+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
86123+ WQ_MEM_RECLAIM, 1, hdev->name);
86124 if (!hdev->workqueue) {
86125 error = -ENOMEM;
86126 goto err;
86127 }
86128
86129- hdev->req_workqueue = alloc_workqueue(hdev->name,
86130+ hdev->req_workqueue = alloc_workqueue("%s",
86131 WQ_HIGHPRI | WQ_UNBOUND |
86132- WQ_MEM_RECLAIM, 1);
86133+ WQ_MEM_RECLAIM, 1, hdev->name);
86134 if (!hdev->req_workqueue) {
86135 destroy_workqueue(hdev->workqueue);
86136 error = -ENOMEM;
86137diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
86138index 9bd7d95..6c4884f 100644
86139--- a/net/bluetooth/hci_sock.c
86140+++ b/net/bluetooth/hci_sock.c
86141@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
86142 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
86143 }
86144
86145- len = min_t(unsigned int, len, sizeof(uf));
86146+ len = min((size_t)len, sizeof(uf));
86147 if (copy_from_user(&uf, optval, len)) {
86148 err = -EFAULT;
86149 break;
86150diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
86151index 68843a2..30e9342 100644
86152--- a/net/bluetooth/l2cap_core.c
86153+++ b/net/bluetooth/l2cap_core.c
86154@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
86155 break;
86156
86157 case L2CAP_CONF_RFC:
86158- if (olen == sizeof(rfc))
86159- memcpy(&rfc, (void *)val, olen);
86160+ if (olen != sizeof(rfc))
86161+ break;
86162+
86163+ memcpy(&rfc, (void *)val, olen);
86164
86165 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
86166 rfc.mode != chan->mode)
86167diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
86168index 36fed40..be2eeb2 100644
86169--- a/net/bluetooth/l2cap_sock.c
86170+++ b/net/bluetooth/l2cap_sock.c
86171@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
86172 struct sock *sk = sock->sk;
86173 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
86174 struct l2cap_options opts;
86175- int len, err = 0;
86176+ int err = 0;
86177+ size_t len = optlen;
86178 u32 opt;
86179
86180 BT_DBG("sk %p", sk);
86181@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
86182 opts.max_tx = chan->max_tx;
86183 opts.txwin_size = chan->tx_win;
86184
86185- len = min_t(unsigned int, sizeof(opts), optlen);
86186+ len = min(sizeof(opts), len);
86187 if (copy_from_user((char *) &opts, optval, len)) {
86188 err = -EFAULT;
86189 break;
86190@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86191 struct bt_security sec;
86192 struct bt_power pwr;
86193 struct l2cap_conn *conn;
86194- int len, err = 0;
86195+ int err = 0;
86196+ size_t len = optlen;
86197 u32 opt;
86198
86199 BT_DBG("sk %p", sk);
86200@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86201
86202 sec.level = BT_SECURITY_LOW;
86203
86204- len = min_t(unsigned int, sizeof(sec), optlen);
86205+ len = min(sizeof(sec), len);
86206 if (copy_from_user((char *) &sec, optval, len)) {
86207 err = -EFAULT;
86208 break;
86209@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86210
86211 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
86212
86213- len = min_t(unsigned int, sizeof(pwr), optlen);
86214+ len = min(sizeof(pwr), len);
86215 if (copy_from_user((char *) &pwr, optval, len)) {
86216 err = -EFAULT;
86217 break;
86218diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
86219index 30b3721..c1bd0a0 100644
86220--- a/net/bluetooth/rfcomm/sock.c
86221+++ b/net/bluetooth/rfcomm/sock.c
86222@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
86223 struct sock *sk = sock->sk;
86224 struct bt_security sec;
86225 int err = 0;
86226- size_t len;
86227+ size_t len = optlen;
86228 u32 opt;
86229
86230 BT_DBG("sk %p", sk);
86231@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
86232
86233 sec.level = BT_SECURITY_LOW;
86234
86235- len = min_t(unsigned int, sizeof(sec), optlen);
86236+ len = min(sizeof(sec), len);
86237 if (copy_from_user((char *) &sec, optval, len)) {
86238 err = -EFAULT;
86239 break;
86240diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
86241index b6e44ad..5b0d514 100644
86242--- a/net/bluetooth/rfcomm/tty.c
86243+++ b/net/bluetooth/rfcomm/tty.c
86244@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
86245 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
86246
86247 spin_lock_irqsave(&dev->port.lock, flags);
86248- if (dev->port.count > 0) {
86249+ if (atomic_read(&dev->port.count) > 0) {
86250 spin_unlock_irqrestore(&dev->port.lock, flags);
86251 return;
86252 }
86253@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
86254 return -ENODEV;
86255
86256 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
86257- dev->channel, dev->port.count);
86258+ dev->channel, atomic_read(&dev->port.count));
86259
86260 spin_lock_irqsave(&dev->port.lock, flags);
86261- if (++dev->port.count > 1) {
86262+ if (atomic_inc_return(&dev->port.count) > 1) {
86263 spin_unlock_irqrestore(&dev->port.lock, flags);
86264 return 0;
86265 }
86266@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
86267 return;
86268
86269 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
86270- dev->port.count);
86271+ atomic_read(&dev->port.count));
86272
86273 spin_lock_irqsave(&dev->port.lock, flags);
86274- if (!--dev->port.count) {
86275+ if (!atomic_dec_return(&dev->port.count)) {
86276 spin_unlock_irqrestore(&dev->port.lock, flags);
86277 if (dev->tty_dev->parent)
86278 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
86279diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
86280index 3d110c4..4e1b2eb 100644
86281--- a/net/bridge/netfilter/ebtables.c
86282+++ b/net/bridge/netfilter/ebtables.c
86283@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86284 tmp.valid_hooks = t->table->valid_hooks;
86285 }
86286 mutex_unlock(&ebt_mutex);
86287- if (copy_to_user(user, &tmp, *len) != 0){
86288+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
86289 BUGPRINT("c2u Didn't work\n");
86290 ret = -EFAULT;
86291 break;
86292@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86293 goto out;
86294 tmp.valid_hooks = t->valid_hooks;
86295
86296- if (copy_to_user(user, &tmp, *len) != 0) {
86297+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86298 ret = -EFAULT;
86299 break;
86300 }
86301@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86302 tmp.entries_size = t->table->entries_size;
86303 tmp.valid_hooks = t->table->valid_hooks;
86304
86305- if (copy_to_user(user, &tmp, *len) != 0) {
86306+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86307 ret = -EFAULT;
86308 break;
86309 }
86310diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
86311index 2bd4b58..0dc30a1 100644
86312--- a/net/caif/cfctrl.c
86313+++ b/net/caif/cfctrl.c
86314@@ -10,6 +10,7 @@
86315 #include <linux/spinlock.h>
86316 #include <linux/slab.h>
86317 #include <linux/pkt_sched.h>
86318+#include <linux/sched.h>
86319 #include <net/caif/caif_layer.h>
86320 #include <net/caif/cfpkt.h>
86321 #include <net/caif/cfctrl.h>
86322@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
86323 memset(&dev_info, 0, sizeof(dev_info));
86324 dev_info.id = 0xff;
86325 cfsrvl_init(&this->serv, 0, &dev_info, false);
86326- atomic_set(&this->req_seq_no, 1);
86327- atomic_set(&this->rsp_seq_no, 1);
86328+ atomic_set_unchecked(&this->req_seq_no, 1);
86329+ atomic_set_unchecked(&this->rsp_seq_no, 1);
86330 this->serv.layer.receive = cfctrl_recv;
86331 sprintf(this->serv.layer.name, "ctrl");
86332 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
86333@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
86334 struct cfctrl_request_info *req)
86335 {
86336 spin_lock_bh(&ctrl->info_list_lock);
86337- atomic_inc(&ctrl->req_seq_no);
86338- req->sequence_no = atomic_read(&ctrl->req_seq_no);
86339+ atomic_inc_unchecked(&ctrl->req_seq_no);
86340+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
86341 list_add_tail(&req->list, &ctrl->list);
86342 spin_unlock_bh(&ctrl->info_list_lock);
86343 }
86344@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
86345 if (p != first)
86346 pr_warn("Requests are not received in order\n");
86347
86348- atomic_set(&ctrl->rsp_seq_no,
86349+ atomic_set_unchecked(&ctrl->rsp_seq_no,
86350 p->sequence_no);
86351 list_del(&p->list);
86352 goto out;
86353diff --git a/net/can/af_can.c b/net/can/af_can.c
86354index c4e5085..aa9efdf 100644
86355--- a/net/can/af_can.c
86356+++ b/net/can/af_can.c
86357@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
86358 };
86359
86360 /* notifier block for netdevice event */
86361-static struct notifier_block can_netdev_notifier __read_mostly = {
86362+static struct notifier_block can_netdev_notifier = {
86363 .notifier_call = can_notifier,
86364 };
86365
86366diff --git a/net/can/gw.c b/net/can/gw.c
86367index 3ee690e..00d581b 100644
86368--- a/net/can/gw.c
86369+++ b/net/can/gw.c
86370@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
86371 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
86372
86373 static HLIST_HEAD(cgw_list);
86374-static struct notifier_block notifier;
86375
86376 static struct kmem_cache *cgw_cache __read_mostly;
86377
86378@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
86379 return err;
86380 }
86381
86382+static struct notifier_block notifier = {
86383+ .notifier_call = cgw_notifier
86384+};
86385+
86386 static __init int cgw_module_init(void)
86387 {
86388 /* sanitize given module parameter */
86389@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
86390 return -ENOMEM;
86391
86392 /* set notifier */
86393- notifier.notifier_call = cgw_notifier;
86394 register_netdevice_notifier(&notifier);
86395
86396 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
86397diff --git a/net/compat.c b/net/compat.c
86398index f0a1ba6..0541331 100644
86399--- a/net/compat.c
86400+++ b/net/compat.c
86401@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
86402 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
86403 __get_user(kmsg->msg_flags, &umsg->msg_flags))
86404 return -EFAULT;
86405- kmsg->msg_name = compat_ptr(tmp1);
86406- kmsg->msg_iov = compat_ptr(tmp2);
86407- kmsg->msg_control = compat_ptr(tmp3);
86408+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
86409+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
86410+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
86411 return 0;
86412 }
86413
86414@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86415
86416 if (kern_msg->msg_namelen) {
86417 if (mode == VERIFY_READ) {
86418- int err = move_addr_to_kernel(kern_msg->msg_name,
86419+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
86420 kern_msg->msg_namelen,
86421 kern_address);
86422 if (err < 0)
86423@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86424 kern_msg->msg_name = NULL;
86425
86426 tot_len = iov_from_user_compat_to_kern(kern_iov,
86427- (struct compat_iovec __user *)kern_msg->msg_iov,
86428+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
86429 kern_msg->msg_iovlen);
86430 if (tot_len >= 0)
86431 kern_msg->msg_iov = kern_iov;
86432@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86433
86434 #define CMSG_COMPAT_FIRSTHDR(msg) \
86435 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
86436- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
86437+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
86438 (struct compat_cmsghdr __user *)NULL)
86439
86440 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
86441 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
86442 (ucmlen) <= (unsigned long) \
86443 ((mhdr)->msg_controllen - \
86444- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
86445+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
86446
86447 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
86448 struct compat_cmsghdr __user *cmsg, int cmsg_len)
86449 {
86450 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
86451- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
86452+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
86453 msg->msg_controllen)
86454 return NULL;
86455 return (struct compat_cmsghdr __user *)ptr;
86456@@ -219,7 +219,7 @@ Efault:
86457
86458 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
86459 {
86460- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86461+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86462 struct compat_cmsghdr cmhdr;
86463 struct compat_timeval ctv;
86464 struct compat_timespec cts[3];
86465@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
86466
86467 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
86468 {
86469- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86470+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86471 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
86472 int fdnum = scm->fp->count;
86473 struct file **fp = scm->fp->fp;
86474@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
86475 return -EFAULT;
86476 old_fs = get_fs();
86477 set_fs(KERNEL_DS);
86478- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
86479+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
86480 set_fs(old_fs);
86481
86482 return err;
86483@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
86484 len = sizeof(ktime);
86485 old_fs = get_fs();
86486 set_fs(KERNEL_DS);
86487- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
86488+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
86489 set_fs(old_fs);
86490
86491 if (!err) {
86492@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86493 case MCAST_JOIN_GROUP:
86494 case MCAST_LEAVE_GROUP:
86495 {
86496- struct compat_group_req __user *gr32 = (void *)optval;
86497+ struct compat_group_req __user *gr32 = (void __user *)optval;
86498 struct group_req __user *kgr =
86499 compat_alloc_user_space(sizeof(struct group_req));
86500 u32 interface;
86501@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86502 case MCAST_BLOCK_SOURCE:
86503 case MCAST_UNBLOCK_SOURCE:
86504 {
86505- struct compat_group_source_req __user *gsr32 = (void *)optval;
86506+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
86507 struct group_source_req __user *kgsr = compat_alloc_user_space(
86508 sizeof(struct group_source_req));
86509 u32 interface;
86510@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86511 }
86512 case MCAST_MSFILTER:
86513 {
86514- struct compat_group_filter __user *gf32 = (void *)optval;
86515+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86516 struct group_filter __user *kgf;
86517 u32 interface, fmode, numsrc;
86518
86519@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
86520 char __user *optval, int __user *optlen,
86521 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
86522 {
86523- struct compat_group_filter __user *gf32 = (void *)optval;
86524+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86525 struct group_filter __user *kgf;
86526 int __user *koptlen;
86527 u32 interface, fmode, numsrc;
86528@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
86529
86530 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
86531 return -EINVAL;
86532- if (copy_from_user(a, args, nas[call]))
86533+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
86534 return -EFAULT;
86535 a0 = a[0];
86536 a1 = a[1];
86537diff --git a/net/core/datagram.c b/net/core/datagram.c
86538index b71423d..0360434 100644
86539--- a/net/core/datagram.c
86540+++ b/net/core/datagram.c
86541@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
86542 }
86543
86544 kfree_skb(skb);
86545- atomic_inc(&sk->sk_drops);
86546+ atomic_inc_unchecked(&sk->sk_drops);
86547 sk_mem_reclaim_partial(sk);
86548
86549 return err;
86550diff --git a/net/core/dev.c b/net/core/dev.c
86551index faebb39..a38fb42 100644
86552--- a/net/core/dev.c
86553+++ b/net/core/dev.c
86554@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86555 {
86556 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
86557 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
86558- atomic_long_inc(&dev->rx_dropped);
86559+ atomic_long_inc_unchecked(&dev->rx_dropped);
86560 kfree_skb(skb);
86561 return NET_RX_DROP;
86562 }
86563@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86564 skb_orphan(skb);
86565
86566 if (unlikely(!is_skb_forwardable(dev, skb))) {
86567- atomic_long_inc(&dev->rx_dropped);
86568+ atomic_long_inc_unchecked(&dev->rx_dropped);
86569 kfree_skb(skb);
86570 return NET_RX_DROP;
86571 }
86572@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
86573
86574 struct dev_gso_cb {
86575 void (*destructor)(struct sk_buff *skb);
86576-};
86577+} __no_const;
86578
86579 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
86580
86581@@ -3139,7 +3139,7 @@ enqueue:
86582
86583 local_irq_restore(flags);
86584
86585- atomic_long_inc(&skb->dev->rx_dropped);
86586+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86587 kfree_skb(skb);
86588 return NET_RX_DROP;
86589 }
86590@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
86591 }
86592 EXPORT_SYMBOL(netif_rx_ni);
86593
86594-static void net_tx_action(struct softirq_action *h)
86595+static void net_tx_action(void)
86596 {
86597 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86598
86599@@ -3538,7 +3538,7 @@ ncls:
86600 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
86601 } else {
86602 drop:
86603- atomic_long_inc(&skb->dev->rx_dropped);
86604+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86605 kfree_skb(skb);
86606 /* Jamal, now you will not able to escape explaining
86607 * me how you were going to use this. :-)
86608@@ -4146,7 +4146,7 @@ void netif_napi_del(struct napi_struct *napi)
86609 }
86610 EXPORT_SYMBOL(netif_napi_del);
86611
86612-static void net_rx_action(struct softirq_action *h)
86613+static void net_rx_action(void)
86614 {
86615 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86616 unsigned long time_limit = jiffies + 2;
86617@@ -5583,7 +5583,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
86618 } else {
86619 netdev_stats_to_stats64(storage, &dev->stats);
86620 }
86621- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
86622+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
86623 return storage;
86624 }
86625 EXPORT_SYMBOL(dev_get_stats);
86626diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
86627index 5b7d0e1..cb960fc 100644
86628--- a/net/core/dev_ioctl.c
86629+++ b/net/core/dev_ioctl.c
86630@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
86631 if (no_module && capable(CAP_NET_ADMIN))
86632 no_module = request_module("netdev-%s", name);
86633 if (no_module && capable(CAP_SYS_MODULE)) {
86634+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86635+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
86636+#else
86637 if (!request_module("%s", name))
86638 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
86639 name);
86640+#endif
86641 }
86642 }
86643 EXPORT_SYMBOL(dev_load);
86644diff --git a/net/core/ethtool.c b/net/core/ethtool.c
86645index ce91766..3b71cdb 100644
86646--- a/net/core/ethtool.c
86647+++ b/net/core/ethtool.c
86648@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
86649 if (ret)
86650 return ret;
86651
86652- len = (tmp.len > dump.len) ? dump.len : tmp.len;
86653+ len = min(tmp.len, dump.len);
86654 if (!len)
86655 return -EFAULT;
86656
86657+ /* Don't ever let the driver think there's more space available
86658+ * than it requested with .get_dump_flag().
86659+ */
86660+ dump.len = len;
86661+
86662+ /* Always allocate enough space to hold the whole thing so that the
86663+ * driver does not need to check the length and bother with partial
86664+ * dumping.
86665+ */
86666 data = vzalloc(tmp.len);
86667 if (!data)
86668 return -ENOMEM;
86669@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
86670 if (ret)
86671 goto out;
86672
86673+ /* There are two sane possibilities:
86674+ * 1. The driver's .get_dump_data() does not touch dump.len.
86675+ * 2. Or it may set dump.len to how much it really writes, which
86676+ * should be tmp.len (or len if it can do a partial dump).
86677+ * In any case respond to userspace with the actual length of data
86678+ * it's receiving.
86679+ */
86680+ WARN_ON(dump.len != len && dump.len != tmp.len);
86681+ dump.len = len;
86682+
86683 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
86684 ret = -EFAULT;
86685 goto out;
86686diff --git a/net/core/flow.c b/net/core/flow.c
86687index 7102f16..146b4bd 100644
86688--- a/net/core/flow.c
86689+++ b/net/core/flow.c
86690@@ -61,7 +61,7 @@ struct flow_cache {
86691 struct timer_list rnd_timer;
86692 };
86693
86694-atomic_t flow_cache_genid = ATOMIC_INIT(0);
86695+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
86696 EXPORT_SYMBOL(flow_cache_genid);
86697 static struct flow_cache flow_cache_global;
86698 static struct kmem_cache *flow_cachep __read_mostly;
86699@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
86700
86701 static int flow_entry_valid(struct flow_cache_entry *fle)
86702 {
86703- if (atomic_read(&flow_cache_genid) != fle->genid)
86704+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
86705 return 0;
86706 if (fle->object && !fle->object->ops->check(fle->object))
86707 return 0;
86708@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
86709 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
86710 fcp->hash_count++;
86711 }
86712- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
86713+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
86714 flo = fle->object;
86715 if (!flo)
86716 goto ret_object;
86717@@ -279,7 +279,7 @@ nocache:
86718 }
86719 flo = resolver(net, key, family, dir, flo, ctx);
86720 if (fle) {
86721- fle->genid = atomic_read(&flow_cache_genid);
86722+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
86723 if (!IS_ERR(flo))
86724 fle->object = flo;
86725 else
86726diff --git a/net/core/iovec.c b/net/core/iovec.c
86727index de178e4..1dabd8b 100644
86728--- a/net/core/iovec.c
86729+++ b/net/core/iovec.c
86730@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86731 if (m->msg_namelen) {
86732 if (mode == VERIFY_READ) {
86733 void __user *namep;
86734- namep = (void __user __force *) m->msg_name;
86735+ namep = (void __force_user *) m->msg_name;
86736 err = move_addr_to_kernel(namep, m->msg_namelen,
86737 address);
86738 if (err < 0)
86739@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86740 }
86741
86742 size = m->msg_iovlen * sizeof(struct iovec);
86743- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
86744+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
86745 return -EFAULT;
86746
86747 m->msg_iov = iov;
86748diff --git a/net/core/neighbour.c b/net/core/neighbour.c
86749index 5c56b21..8766fbf 100644
86750--- a/net/core/neighbour.c
86751+++ b/net/core/neighbour.c
86752@@ -2769,7 +2769,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
86753 size_t *lenp, loff_t *ppos)
86754 {
86755 int size, ret;
86756- ctl_table tmp = *ctl;
86757+ ctl_table_no_const tmp = *ctl;
86758
86759 tmp.extra1 = &zero;
86760 tmp.extra2 = &unres_qlen_max;
86761diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
86762index 569d355..79cf2d0 100644
86763--- a/net/core/net-procfs.c
86764+++ b/net/core/net-procfs.c
86765@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
86766 else
86767 seq_printf(seq, "%04x", ntohs(pt->type));
86768
86769+#ifdef CONFIG_GRKERNSEC_HIDESYM
86770+ seq_printf(seq, " %-8s %pf\n",
86771+ pt->dev ? pt->dev->name : "", NULL);
86772+#else
86773 seq_printf(seq, " %-8s %pf\n",
86774 pt->dev ? pt->dev->name : "", pt->func);
86775+#endif
86776 }
86777
86778 return 0;
86779diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
86780index 981fed3..536af34 100644
86781--- a/net/core/net-sysfs.c
86782+++ b/net/core/net-sysfs.c
86783@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
86784 }
86785 EXPORT_SYMBOL(netdev_class_remove_file);
86786
86787-int netdev_kobject_init(void)
86788+int __init netdev_kobject_init(void)
86789 {
86790 kobj_ns_type_register(&net_ns_type_operations);
86791 return class_register(&net_class);
86792diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
86793index f9765203..9feaef8 100644
86794--- a/net/core/net_namespace.c
86795+++ b/net/core/net_namespace.c
86796@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
86797 int error;
86798 LIST_HEAD(net_exit_list);
86799
86800- list_add_tail(&ops->list, list);
86801+ pax_list_add_tail((struct list_head *)&ops->list, list);
86802 if (ops->init || (ops->id && ops->size)) {
86803 for_each_net(net) {
86804 error = ops_init(ops, net);
86805@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
86806
86807 out_undo:
86808 /* If I have an error cleanup all namespaces I initialized */
86809- list_del(&ops->list);
86810+ pax_list_del((struct list_head *)&ops->list);
86811 ops_exit_list(ops, &net_exit_list);
86812 ops_free_list(ops, &net_exit_list);
86813 return error;
86814@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
86815 struct net *net;
86816 LIST_HEAD(net_exit_list);
86817
86818- list_del(&ops->list);
86819+ pax_list_del((struct list_head *)&ops->list);
86820 for_each_net(net)
86821 list_add_tail(&net->exit_list, &net_exit_list);
86822 ops_exit_list(ops, &net_exit_list);
86823@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
86824 mutex_lock(&net_mutex);
86825 error = register_pernet_operations(&pernet_list, ops);
86826 if (!error && (first_device == &pernet_list))
86827- first_device = &ops->list;
86828+ first_device = (struct list_head *)&ops->list;
86829 mutex_unlock(&net_mutex);
86830 return error;
86831 }
86832diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
86833index a08bd2b..4e8f43c 100644
86834--- a/net/core/rtnetlink.c
86835+++ b/net/core/rtnetlink.c
86836@@ -58,7 +58,7 @@ struct rtnl_link {
86837 rtnl_doit_func doit;
86838 rtnl_dumpit_func dumpit;
86839 rtnl_calcit_func calcit;
86840-};
86841+} __no_const;
86842
86843 static DEFINE_MUTEX(rtnl_mutex);
86844
86845@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
86846 if (rtnl_link_ops_get(ops->kind))
86847 return -EEXIST;
86848
86849- if (!ops->dellink)
86850- ops->dellink = unregister_netdevice_queue;
86851+ if (!ops->dellink) {
86852+ pax_open_kernel();
86853+ *(void **)&ops->dellink = unregister_netdevice_queue;
86854+ pax_close_kernel();
86855+ }
86856
86857- list_add_tail(&ops->list, &link_ops);
86858+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
86859 return 0;
86860 }
86861 EXPORT_SYMBOL_GPL(__rtnl_link_register);
86862@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
86863 for_each_net(net) {
86864 __rtnl_kill_links(net, ops);
86865 }
86866- list_del(&ops->list);
86867+ pax_list_del((struct list_head *)&ops->list);
86868 }
86869 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
86870
86871diff --git a/net/core/scm.c b/net/core/scm.c
86872index 03795d0..eaf7368 100644
86873--- a/net/core/scm.c
86874+++ b/net/core/scm.c
86875@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
86876 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86877 {
86878 struct cmsghdr __user *cm
86879- = (__force struct cmsghdr __user *)msg->msg_control;
86880+ = (struct cmsghdr __force_user *)msg->msg_control;
86881 struct cmsghdr cmhdr;
86882 int cmlen = CMSG_LEN(len);
86883 int err;
86884@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
86885 err = -EFAULT;
86886 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
86887 goto out;
86888- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
86889+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
86890 goto out;
86891 cmlen = CMSG_SPACE(len);
86892 if (msg->msg_controllen < cmlen)
86893@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
86894 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86895 {
86896 struct cmsghdr __user *cm
86897- = (__force struct cmsghdr __user*)msg->msg_control;
86898+ = (struct cmsghdr __force_user *)msg->msg_control;
86899
86900 int fdmax = 0;
86901 int fdnum = scm->fp->count;
86902@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
86903 if (fdnum < fdmax)
86904 fdmax = fdnum;
86905
86906- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
86907+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
86908 i++, cmfptr++)
86909 {
86910 struct socket *sock;
86911diff --git a/net/core/sock.c b/net/core/sock.c
86912index d6d024c..6ea7ab4 100644
86913--- a/net/core/sock.c
86914+++ b/net/core/sock.c
86915@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86916 struct sk_buff_head *list = &sk->sk_receive_queue;
86917
86918 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
86919- atomic_inc(&sk->sk_drops);
86920+ atomic_inc_unchecked(&sk->sk_drops);
86921 trace_sock_rcvqueue_full(sk, skb);
86922 return -ENOMEM;
86923 }
86924@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86925 return err;
86926
86927 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
86928- atomic_inc(&sk->sk_drops);
86929+ atomic_inc_unchecked(&sk->sk_drops);
86930 return -ENOBUFS;
86931 }
86932
86933@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
86934 skb_dst_force(skb);
86935
86936 spin_lock_irqsave(&list->lock, flags);
86937- skb->dropcount = atomic_read(&sk->sk_drops);
86938+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
86939 __skb_queue_tail(list, skb);
86940 spin_unlock_irqrestore(&list->lock, flags);
86941
86942@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86943 skb->dev = NULL;
86944
86945 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
86946- atomic_inc(&sk->sk_drops);
86947+ atomic_inc_unchecked(&sk->sk_drops);
86948 goto discard_and_relse;
86949 }
86950 if (nested)
86951@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
86952 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
86953 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
86954 bh_unlock_sock(sk);
86955- atomic_inc(&sk->sk_drops);
86956+ atomic_inc_unchecked(&sk->sk_drops);
86957 goto discard_and_relse;
86958 }
86959
86960@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86961 struct timeval tm;
86962 } v;
86963
86964- int lv = sizeof(int);
86965- int len;
86966+ unsigned int lv = sizeof(int);
86967+ unsigned int len;
86968
86969 if (get_user(len, optlen))
86970 return -EFAULT;
86971- if (len < 0)
86972+ if (len > INT_MAX)
86973 return -EINVAL;
86974
86975 memset(&v, 0, sizeof(v));
86976@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86977
86978 case SO_PEERNAME:
86979 {
86980- char address[128];
86981+ char address[_K_SS_MAXSIZE];
86982
86983 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
86984 return -ENOTCONN;
86985- if (lv < len)
86986+ if (lv < len || sizeof address < len)
86987 return -EINVAL;
86988 if (copy_to_user(optval, address, len))
86989 return -EFAULT;
86990@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
86991
86992 if (len > lv)
86993 len = lv;
86994- if (copy_to_user(optval, &v, len))
86995+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
86996 return -EFAULT;
86997 lenout:
86998 if (put_user(len, optlen))
86999@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
87000 */
87001 smp_wmb();
87002 atomic_set(&sk->sk_refcnt, 1);
87003- atomic_set(&sk->sk_drops, 0);
87004+ atomic_set_unchecked(&sk->sk_drops, 0);
87005 }
87006 EXPORT_SYMBOL(sock_init_data);
87007
87008diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
87009index a0e9cf6..ef7f9ed 100644
87010--- a/net/core/sock_diag.c
87011+++ b/net/core/sock_diag.c
87012@@ -9,26 +9,33 @@
87013 #include <linux/inet_diag.h>
87014 #include <linux/sock_diag.h>
87015
87016-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
87017+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
87018 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
87019 static DEFINE_MUTEX(sock_diag_table_mutex);
87020
87021 int sock_diag_check_cookie(void *sk, __u32 *cookie)
87022 {
87023+#ifndef CONFIG_GRKERNSEC_HIDESYM
87024 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
87025 cookie[1] != INET_DIAG_NOCOOKIE) &&
87026 ((u32)(unsigned long)sk != cookie[0] ||
87027 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
87028 return -ESTALE;
87029 else
87030+#endif
87031 return 0;
87032 }
87033 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
87034
87035 void sock_diag_save_cookie(void *sk, __u32 *cookie)
87036 {
87037+#ifdef CONFIG_GRKERNSEC_HIDESYM
87038+ cookie[0] = 0;
87039+ cookie[1] = 0;
87040+#else
87041 cookie[0] = (u32)(unsigned long)sk;
87042 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
87043+#endif
87044 }
87045 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
87046
87047@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
87048 mutex_lock(&sock_diag_table_mutex);
87049 if (sock_diag_handlers[hndl->family])
87050 err = -EBUSY;
87051- else
87052+ else {
87053+ pax_open_kernel();
87054 sock_diag_handlers[hndl->family] = hndl;
87055+ pax_close_kernel();
87056+ }
87057 mutex_unlock(&sock_diag_table_mutex);
87058
87059 return err;
87060@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
87061
87062 mutex_lock(&sock_diag_table_mutex);
87063 BUG_ON(sock_diag_handlers[family] != hnld);
87064+ pax_open_kernel();
87065 sock_diag_handlers[family] = NULL;
87066+ pax_close_kernel();
87067 mutex_unlock(&sock_diag_table_mutex);
87068 }
87069 EXPORT_SYMBOL_GPL(sock_diag_unregister);
87070diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
87071index cfdb46a..cef55e1 100644
87072--- a/net/core/sysctl_net_core.c
87073+++ b/net/core/sysctl_net_core.c
87074@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
87075 {
87076 unsigned int orig_size, size;
87077 int ret, i;
87078- ctl_table tmp = {
87079+ ctl_table_no_const tmp = {
87080 .data = &size,
87081 .maxlen = sizeof(size),
87082 .mode = table->mode
87083@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
87084
87085 static __net_init int sysctl_core_net_init(struct net *net)
87086 {
87087- struct ctl_table *tbl;
87088+ ctl_table_no_const *tbl = NULL;
87089
87090 net->core.sysctl_somaxconn = SOMAXCONN;
87091
87092- tbl = netns_core_table;
87093 if (!net_eq(net, &init_net)) {
87094- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
87095+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
87096 if (tbl == NULL)
87097 goto err_dup;
87098
87099@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
87100 if (net->user_ns != &init_user_ns) {
87101 tbl[0].procname = NULL;
87102 }
87103- }
87104-
87105- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
87106+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
87107+ } else
87108+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
87109 if (net->core.sysctl_hdr == NULL)
87110 goto err_reg;
87111
87112 return 0;
87113
87114 err_reg:
87115- if (tbl != netns_core_table)
87116- kfree(tbl);
87117+ kfree(tbl);
87118 err_dup:
87119 return -ENOMEM;
87120 }
87121@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
87122 kfree(tbl);
87123 }
87124
87125-static __net_initdata struct pernet_operations sysctl_core_ops = {
87126+static __net_initconst struct pernet_operations sysctl_core_ops = {
87127 .init = sysctl_core_net_init,
87128 .exit = sysctl_core_net_exit,
87129 };
87130diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
87131index c21f200..bc4565b 100644
87132--- a/net/decnet/af_decnet.c
87133+++ b/net/decnet/af_decnet.c
87134@@ -465,6 +465,7 @@ static struct proto dn_proto = {
87135 .sysctl_rmem = sysctl_decnet_rmem,
87136 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
87137 .obj_size = sizeof(struct dn_sock),
87138+ .slab_flags = SLAB_USERCOPY,
87139 };
87140
87141 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
87142diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
87143index a55eecc..dd8428c 100644
87144--- a/net/decnet/sysctl_net_decnet.c
87145+++ b/net/decnet/sysctl_net_decnet.c
87146@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
87147
87148 if (len > *lenp) len = *lenp;
87149
87150- if (copy_to_user(buffer, addr, len))
87151+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
87152 return -EFAULT;
87153
87154 *lenp = len;
87155@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
87156
87157 if (len > *lenp) len = *lenp;
87158
87159- if (copy_to_user(buffer, devname, len))
87160+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
87161 return -EFAULT;
87162
87163 *lenp = len;
87164diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
87165index d01be2a..8976537 100644
87166--- a/net/ipv4/af_inet.c
87167+++ b/net/ipv4/af_inet.c
87168@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
87169
87170 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
87171
87172- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
87173- if (!sysctl_local_reserved_ports)
87174- goto out;
87175-
87176 rc = proto_register(&tcp_prot, 1);
87177 if (rc)
87178- goto out_free_reserved_ports;
87179+ goto out;
87180
87181 rc = proto_register(&udp_prot, 1);
87182 if (rc)
87183@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
87184 proto_unregister(&udp_prot);
87185 out_unregister_tcp_proto:
87186 proto_unregister(&tcp_prot);
87187-out_free_reserved_ports:
87188- kfree(sysctl_local_reserved_ports);
87189 goto out;
87190 }
87191
87192diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
87193index 2e7f194..0fa4d6d 100644
87194--- a/net/ipv4/ah4.c
87195+++ b/net/ipv4/ah4.c
87196@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
87197 return;
87198
87199 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87200- atomic_inc(&flow_cache_genid);
87201+ atomic_inc_unchecked(&flow_cache_genid);
87202 rt_genid_bump(net);
87203
87204 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
87205diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
87206index dfc39d4..0b82c4d 100644
87207--- a/net/ipv4/devinet.c
87208+++ b/net/ipv4/devinet.c
87209@@ -1529,7 +1529,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
87210 idx = 0;
87211 head = &net->dev_index_head[h];
87212 rcu_read_lock();
87213- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
87214+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
87215 net->dev_base_seq;
87216 hlist_for_each_entry_rcu(dev, head, index_hlist) {
87217 if (idx < s_idx)
87218@@ -1840,7 +1840,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
87219 idx = 0;
87220 head = &net->dev_index_head[h];
87221 rcu_read_lock();
87222- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
87223+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
87224 net->dev_base_seq;
87225 hlist_for_each_entry_rcu(dev, head, index_hlist) {
87226 if (idx < s_idx)
87227@@ -2065,7 +2065,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
87228 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
87229 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
87230
87231-static struct devinet_sysctl_table {
87232+static const struct devinet_sysctl_table {
87233 struct ctl_table_header *sysctl_header;
87234 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
87235 } devinet_sysctl = {
87236@@ -2183,7 +2183,7 @@ static __net_init int devinet_init_net(struct net *net)
87237 int err;
87238 struct ipv4_devconf *all, *dflt;
87239 #ifdef CONFIG_SYSCTL
87240- struct ctl_table *tbl = ctl_forward_entry;
87241+ ctl_table_no_const *tbl = NULL;
87242 struct ctl_table_header *forw_hdr;
87243 #endif
87244
87245@@ -2201,7 +2201,7 @@ static __net_init int devinet_init_net(struct net *net)
87246 goto err_alloc_dflt;
87247
87248 #ifdef CONFIG_SYSCTL
87249- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
87250+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
87251 if (tbl == NULL)
87252 goto err_alloc_ctl;
87253
87254@@ -2221,7 +2221,10 @@ static __net_init int devinet_init_net(struct net *net)
87255 goto err_reg_dflt;
87256
87257 err = -ENOMEM;
87258- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87259+ if (!net_eq(net, &init_net))
87260+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87261+ else
87262+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
87263 if (forw_hdr == NULL)
87264 goto err_reg_ctl;
87265 net->ipv4.forw_hdr = forw_hdr;
87266@@ -2237,8 +2240,7 @@ err_reg_ctl:
87267 err_reg_dflt:
87268 __devinet_sysctl_unregister(all);
87269 err_reg_all:
87270- if (tbl != ctl_forward_entry)
87271- kfree(tbl);
87272+ kfree(tbl);
87273 err_alloc_ctl:
87274 #endif
87275 if (dflt != &ipv4_devconf_dflt)
87276diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
87277index 4cfe34d..a6ba66e 100644
87278--- a/net/ipv4/esp4.c
87279+++ b/net/ipv4/esp4.c
87280@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
87281 return;
87282
87283 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87284- atomic_inc(&flow_cache_genid);
87285+ atomic_inc_unchecked(&flow_cache_genid);
87286 rt_genid_bump(net);
87287
87288 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
87289diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
87290index c7629a2..b62d139 100644
87291--- a/net/ipv4/fib_frontend.c
87292+++ b/net/ipv4/fib_frontend.c
87293@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
87294 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87295 fib_sync_up(dev);
87296 #endif
87297- atomic_inc(&net->ipv4.dev_addr_genid);
87298+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87299 rt_cache_flush(dev_net(dev));
87300 break;
87301 case NETDEV_DOWN:
87302 fib_del_ifaddr(ifa, NULL);
87303- atomic_inc(&net->ipv4.dev_addr_genid);
87304+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87305 if (ifa->ifa_dev->ifa_list == NULL) {
87306 /* Last address was deleted from this interface.
87307 * Disable IP.
87308@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
87309 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87310 fib_sync_up(dev);
87311 #endif
87312- atomic_inc(&net->ipv4.dev_addr_genid);
87313+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87314 rt_cache_flush(net);
87315 break;
87316 case NETDEV_DOWN:
87317diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
87318index 8f6cb7a..34507f9 100644
87319--- a/net/ipv4/fib_semantics.c
87320+++ b/net/ipv4/fib_semantics.c
87321@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
87322 nh->nh_saddr = inet_select_addr(nh->nh_dev,
87323 nh->nh_gw,
87324 nh->nh_parent->fib_scope);
87325- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
87326+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
87327
87328 return nh->nh_saddr;
87329 }
87330diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
87331index 6acb541..9ea617d 100644
87332--- a/net/ipv4/inet_connection_sock.c
87333+++ b/net/ipv4/inet_connection_sock.c
87334@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
87335 .range = { 32768, 61000 },
87336 };
87337
87338-unsigned long *sysctl_local_reserved_ports;
87339+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
87340 EXPORT_SYMBOL(sysctl_local_reserved_ports);
87341
87342 void inet_get_local_port_range(int *low, int *high)
87343diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
87344index 6af375a..c493c74 100644
87345--- a/net/ipv4/inet_hashtables.c
87346+++ b/net/ipv4/inet_hashtables.c
87347@@ -18,12 +18,15 @@
87348 #include <linux/sched.h>
87349 #include <linux/slab.h>
87350 #include <linux/wait.h>
87351+#include <linux/security.h>
87352
87353 #include <net/inet_connection_sock.h>
87354 #include <net/inet_hashtables.h>
87355 #include <net/secure_seq.h>
87356 #include <net/ip.h>
87357
87358+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
87359+
87360 /*
87361 * Allocate and initialize a new local port bind bucket.
87362 * The bindhash mutex for snum's hash chain must be held here.
87363@@ -554,6 +557,8 @@ ok:
87364 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
87365 spin_unlock(&head->lock);
87366
87367+ gr_update_task_in_ip_table(current, inet_sk(sk));
87368+
87369 if (tw) {
87370 inet_twsk_deschedule(tw, death_row);
87371 while (twrefcnt) {
87372diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
87373index 000e3d2..5472da3 100644
87374--- a/net/ipv4/inetpeer.c
87375+++ b/net/ipv4/inetpeer.c
87376@@ -503,8 +503,8 @@ relookup:
87377 if (p) {
87378 p->daddr = *daddr;
87379 atomic_set(&p->refcnt, 1);
87380- atomic_set(&p->rid, 0);
87381- atomic_set(&p->ip_id_count,
87382+ atomic_set_unchecked(&p->rid, 0);
87383+ atomic_set_unchecked(&p->ip_id_count,
87384 (daddr->family == AF_INET) ?
87385 secure_ip_id(daddr->addr.a4) :
87386 secure_ipv6_id(daddr->addr.a6));
87387diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
87388index b66910a..cfe416e 100644
87389--- a/net/ipv4/ip_fragment.c
87390+++ b/net/ipv4/ip_fragment.c
87391@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
87392 return 0;
87393
87394 start = qp->rid;
87395- end = atomic_inc_return(&peer->rid);
87396+ end = atomic_inc_return_unchecked(&peer->rid);
87397 qp->rid = end;
87398
87399 rc = qp->q.fragments && (end - start) > max;
87400@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
87401
87402 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87403 {
87404- struct ctl_table *table;
87405+ ctl_table_no_const *table = NULL;
87406 struct ctl_table_header *hdr;
87407
87408- table = ip4_frags_ns_ctl_table;
87409 if (!net_eq(net, &init_net)) {
87410- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87411+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87412 if (table == NULL)
87413 goto err_alloc;
87414
87415@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87416 /* Don't export sysctls to unprivileged users */
87417 if (net->user_ns != &init_user_ns)
87418 table[0].procname = NULL;
87419- }
87420+ hdr = register_net_sysctl(net, "net/ipv4", table);
87421+ } else
87422+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
87423
87424- hdr = register_net_sysctl(net, "net/ipv4", table);
87425 if (hdr == NULL)
87426 goto err_reg;
87427
87428@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87429 return 0;
87430
87431 err_reg:
87432- if (!net_eq(net, &init_net))
87433- kfree(table);
87434+ kfree(table);
87435 err_alloc:
87436 return -ENOMEM;
87437 }
87438diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
87439index 2a83591..68e7458 100644
87440--- a/net/ipv4/ip_gre.c
87441+++ b/net/ipv4/ip_gre.c
87442@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
87443 module_param(log_ecn_error, bool, 0644);
87444 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87445
87446-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
87447+static struct rtnl_link_ops ipgre_link_ops;
87448 static int ipgre_tunnel_init(struct net_device *dev);
87449
87450 static int ipgre_net_id __read_mostly;
87451@@ -503,10 +503,11 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
87452
87453 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
87454 return -EFAULT;
87455- if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
87456- p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
87457- ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) {
87458- return -EINVAL;
87459+ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
87460+ if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
87461+ p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
87462+ ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
87463+ return -EINVAL;
87464 }
87465 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
87466 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
87467@@ -918,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
87468 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
87469 };
87470
87471-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87472+static struct rtnl_link_ops ipgre_link_ops = {
87473 .kind = "gre",
87474 .maxtype = IFLA_GRE_MAX,
87475 .policy = ipgre_policy,
87476@@ -932,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87477 .fill_info = ipgre_fill_info,
87478 };
87479
87480-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
87481+static struct rtnl_link_ops ipgre_tap_ops = {
87482 .kind = "gretap",
87483 .maxtype = IFLA_GRE_MAX,
87484 .policy = ipgre_policy,
87485diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
87486index d9c4f11..02b82dbc 100644
87487--- a/net/ipv4/ip_sockglue.c
87488+++ b/net/ipv4/ip_sockglue.c
87489@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87490 len = min_t(unsigned int, len, opt->optlen);
87491 if (put_user(len, optlen))
87492 return -EFAULT;
87493- if (copy_to_user(optval, opt->__data, len))
87494+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
87495+ copy_to_user(optval, opt->__data, len))
87496 return -EFAULT;
87497 return 0;
87498 }
87499@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87500 if (sk->sk_type != SOCK_STREAM)
87501 return -ENOPROTOOPT;
87502
87503- msg.msg_control = optval;
87504+ msg.msg_control = (void __force_kernel *)optval;
87505 msg.msg_controllen = len;
87506 msg.msg_flags = flags;
87507
87508diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
87509index c118f6b..63856c4 100644
87510--- a/net/ipv4/ip_vti.c
87511+++ b/net/ipv4/ip_vti.c
87512@@ -47,7 +47,7 @@
87513 #define HASH_SIZE 16
87514 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
87515
87516-static struct rtnl_link_ops vti_link_ops __read_mostly;
87517+static struct rtnl_link_ops vti_link_ops;
87518
87519 static int vti_net_id __read_mostly;
87520 struct vti_net {
87521@@ -606,17 +606,10 @@ static int __net_init vti_fb_tunnel_init(struct net_device *dev)
87522 struct iphdr *iph = &tunnel->parms.iph;
87523 struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
87524
87525- tunnel->dev = dev;
87526- strcpy(tunnel->parms.name, dev->name);
87527-
87528 iph->version = 4;
87529 iph->protocol = IPPROTO_IPIP;
87530 iph->ihl = 5;
87531
87532- dev->tstats = alloc_percpu(struct pcpu_tstats);
87533- if (!dev->tstats)
87534- return -ENOMEM;
87535-
87536 dev_hold(dev);
87537 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
87538 return 0;
87539@@ -847,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
87540 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
87541 };
87542
87543-static struct rtnl_link_ops vti_link_ops __read_mostly = {
87544+static struct rtnl_link_ops vti_link_ops = {
87545 .kind = "vti",
87546 .maxtype = IFLA_VTI_MAX,
87547 .policy = vti_policy,
87548diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
87549index 59cb8c7..a72160c 100644
87550--- a/net/ipv4/ipcomp.c
87551+++ b/net/ipv4/ipcomp.c
87552@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
87553 return;
87554
87555 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87556- atomic_inc(&flow_cache_genid);
87557+ atomic_inc_unchecked(&flow_cache_genid);
87558 rt_genid_bump(net);
87559
87560 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
87561diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
87562index efa1138..20dbba0 100644
87563--- a/net/ipv4/ipconfig.c
87564+++ b/net/ipv4/ipconfig.c
87565@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
87566
87567 mm_segment_t oldfs = get_fs();
87568 set_fs(get_ds());
87569- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87570+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87571 set_fs(oldfs);
87572 return res;
87573 }
87574@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
87575
87576 mm_segment_t oldfs = get_fs();
87577 set_fs(get_ds());
87578- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87579+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87580 set_fs(oldfs);
87581 return res;
87582 }
87583@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
87584
87585 mm_segment_t oldfs = get_fs();
87586 set_fs(get_ds());
87587- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
87588+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
87589 set_fs(oldfs);
87590 return res;
87591 }
87592diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
87593index 77bfcce..64a55d4 100644
87594--- a/net/ipv4/ipip.c
87595+++ b/net/ipv4/ipip.c
87596@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87597 static int ipip_net_id __read_mostly;
87598
87599 static int ipip_tunnel_init(struct net_device *dev);
87600-static struct rtnl_link_ops ipip_link_ops __read_mostly;
87601+static struct rtnl_link_ops ipip_link_ops;
87602
87603 static int ipip_err(struct sk_buff *skb, u32 info)
87604 {
87605@@ -404,7 +404,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
87606 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
87607 };
87608
87609-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
87610+static struct rtnl_link_ops ipip_link_ops = {
87611 .kind = "ipip",
87612 .maxtype = IFLA_IPTUN_MAX,
87613 .policy = ipip_policy,
87614diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
87615index 85a4f21..1beb1f5 100644
87616--- a/net/ipv4/netfilter/arp_tables.c
87617+++ b/net/ipv4/netfilter/arp_tables.c
87618@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
87619 #endif
87620
87621 static int get_info(struct net *net, void __user *user,
87622- const int *len, int compat)
87623+ int len, int compat)
87624 {
87625 char name[XT_TABLE_MAXNAMELEN];
87626 struct xt_table *t;
87627 int ret;
87628
87629- if (*len != sizeof(struct arpt_getinfo)) {
87630- duprintf("length %u != %Zu\n", *len,
87631+ if (len != sizeof(struct arpt_getinfo)) {
87632+ duprintf("length %u != %Zu\n", len,
87633 sizeof(struct arpt_getinfo));
87634 return -EINVAL;
87635 }
87636@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
87637 info.size = private->size;
87638 strcpy(info.name, name);
87639
87640- if (copy_to_user(user, &info, *len) != 0)
87641+ if (copy_to_user(user, &info, len) != 0)
87642 ret = -EFAULT;
87643 else
87644 ret = 0;
87645@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
87646
87647 switch (cmd) {
87648 case ARPT_SO_GET_INFO:
87649- ret = get_info(sock_net(sk), user, len, 1);
87650+ ret = get_info(sock_net(sk), user, *len, 1);
87651 break;
87652 case ARPT_SO_GET_ENTRIES:
87653 ret = compat_get_entries(sock_net(sk), user, len);
87654@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
87655
87656 switch (cmd) {
87657 case ARPT_SO_GET_INFO:
87658- ret = get_info(sock_net(sk), user, len, 0);
87659+ ret = get_info(sock_net(sk), user, *len, 0);
87660 break;
87661
87662 case ARPT_SO_GET_ENTRIES:
87663diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
87664index d23118d..6ad7277 100644
87665--- a/net/ipv4/netfilter/ip_tables.c
87666+++ b/net/ipv4/netfilter/ip_tables.c
87667@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
87668 #endif
87669
87670 static int get_info(struct net *net, void __user *user,
87671- const int *len, int compat)
87672+ int len, int compat)
87673 {
87674 char name[XT_TABLE_MAXNAMELEN];
87675 struct xt_table *t;
87676 int ret;
87677
87678- if (*len != sizeof(struct ipt_getinfo)) {
87679- duprintf("length %u != %zu\n", *len,
87680+ if (len != sizeof(struct ipt_getinfo)) {
87681+ duprintf("length %u != %zu\n", len,
87682 sizeof(struct ipt_getinfo));
87683 return -EINVAL;
87684 }
87685@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
87686 info.size = private->size;
87687 strcpy(info.name, name);
87688
87689- if (copy_to_user(user, &info, *len) != 0)
87690+ if (copy_to_user(user, &info, len) != 0)
87691 ret = -EFAULT;
87692 else
87693 ret = 0;
87694@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87695
87696 switch (cmd) {
87697 case IPT_SO_GET_INFO:
87698- ret = get_info(sock_net(sk), user, len, 1);
87699+ ret = get_info(sock_net(sk), user, *len, 1);
87700 break;
87701 case IPT_SO_GET_ENTRIES:
87702 ret = compat_get_entries(sock_net(sk), user, len);
87703@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87704
87705 switch (cmd) {
87706 case IPT_SO_GET_INFO:
87707- ret = get_info(sock_net(sk), user, len, 0);
87708+ ret = get_info(sock_net(sk), user, *len, 0);
87709 break;
87710
87711 case IPT_SO_GET_ENTRIES:
87712diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
87713index 7d93d62..cbbf2a3 100644
87714--- a/net/ipv4/ping.c
87715+++ b/net/ipv4/ping.c
87716@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
87717 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87718 0, sock_i_ino(sp),
87719 atomic_read(&sp->sk_refcnt), sp,
87720- atomic_read(&sp->sk_drops), len);
87721+ atomic_read_unchecked(&sp->sk_drops), len);
87722 }
87723
87724 static int ping_seq_show(struct seq_file *seq, void *v)
87725diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
87726index dd44e0a..06dcca4 100644
87727--- a/net/ipv4/raw.c
87728+++ b/net/ipv4/raw.c
87729@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
87730 int raw_rcv(struct sock *sk, struct sk_buff *skb)
87731 {
87732 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
87733- atomic_inc(&sk->sk_drops);
87734+ atomic_inc_unchecked(&sk->sk_drops);
87735 kfree_skb(skb);
87736 return NET_RX_DROP;
87737 }
87738@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
87739
87740 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
87741 {
87742+ struct icmp_filter filter;
87743+
87744 if (optlen > sizeof(struct icmp_filter))
87745 optlen = sizeof(struct icmp_filter);
87746- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
87747+ if (copy_from_user(&filter, optval, optlen))
87748 return -EFAULT;
87749+ raw_sk(sk)->filter = filter;
87750 return 0;
87751 }
87752
87753 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
87754 {
87755 int len, ret = -EFAULT;
87756+ struct icmp_filter filter;
87757
87758 if (get_user(len, optlen))
87759 goto out;
87760@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
87761 if (len > sizeof(struct icmp_filter))
87762 len = sizeof(struct icmp_filter);
87763 ret = -EFAULT;
87764- if (put_user(len, optlen) ||
87765- copy_to_user(optval, &raw_sk(sk)->filter, len))
87766+ filter = raw_sk(sk)->filter;
87767+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
87768 goto out;
87769 ret = 0;
87770 out: return ret;
87771@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
87772 0, 0L, 0,
87773 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
87774 0, sock_i_ino(sp),
87775- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
87776+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
87777 }
87778
87779 static int raw_seq_show(struct seq_file *seq, void *v)
87780diff --git a/net/ipv4/route.c b/net/ipv4/route.c
87781index d35bbf0..faa3ab8 100644
87782--- a/net/ipv4/route.c
87783+++ b/net/ipv4/route.c
87784@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
87785 .maxlen = sizeof(int),
87786 .mode = 0200,
87787 .proc_handler = ipv4_sysctl_rtcache_flush,
87788+ .extra1 = &init_net,
87789 },
87790 { },
87791 };
87792
87793 static __net_init int sysctl_route_net_init(struct net *net)
87794 {
87795- struct ctl_table *tbl;
87796+ ctl_table_no_const *tbl = NULL;
87797
87798- tbl = ipv4_route_flush_table;
87799 if (!net_eq(net, &init_net)) {
87800- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87801+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
87802 if (tbl == NULL)
87803 goto err_dup;
87804
87805 /* Don't export sysctls to unprivileged users */
87806 if (net->user_ns != &init_user_ns)
87807 tbl[0].procname = NULL;
87808- }
87809- tbl[0].extra1 = net;
87810+ tbl[0].extra1 = net;
87811+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87812+ } else
87813+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
87814
87815- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
87816 if (net->ipv4.route_hdr == NULL)
87817 goto err_reg;
87818 return 0;
87819
87820 err_reg:
87821- if (tbl != ipv4_route_flush_table)
87822- kfree(tbl);
87823+ kfree(tbl);
87824 err_dup:
87825 return -ENOMEM;
87826 }
87827@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
87828
87829 static __net_init int rt_genid_init(struct net *net)
87830 {
87831- atomic_set(&net->rt_genid, 0);
87832+ atomic_set_unchecked(&net->rt_genid, 0);
87833 get_random_bytes(&net->ipv4.dev_addr_genid,
87834 sizeof(net->ipv4.dev_addr_genid));
87835 return 0;
87836diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
87837index fa2f63f..6554815 100644
87838--- a/net/ipv4/sysctl_net_ipv4.c
87839+++ b/net/ipv4/sysctl_net_ipv4.c
87840@@ -55,7 +55,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
87841 {
87842 int ret;
87843 int range[2];
87844- ctl_table tmp = {
87845+ ctl_table_no_const tmp = {
87846 .data = &range,
87847 .maxlen = sizeof(range),
87848 .mode = table->mode,
87849@@ -108,7 +108,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
87850 int ret;
87851 gid_t urange[2];
87852 kgid_t low, high;
87853- ctl_table tmp = {
87854+ ctl_table_no_const tmp = {
87855 .data = &urange,
87856 .maxlen = sizeof(urange),
87857 .mode = table->mode,
87858@@ -139,7 +139,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
87859 void __user *buffer, size_t *lenp, loff_t *ppos)
87860 {
87861 char val[TCP_CA_NAME_MAX];
87862- ctl_table tbl = {
87863+ ctl_table_no_const tbl = {
87864 .data = val,
87865 .maxlen = TCP_CA_NAME_MAX,
87866 };
87867@@ -158,7 +158,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
87868 void __user *buffer, size_t *lenp,
87869 loff_t *ppos)
87870 {
87871- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
87872+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
87873 int ret;
87874
87875 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87876@@ -175,7 +175,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
87877 void __user *buffer, size_t *lenp,
87878 loff_t *ppos)
87879 {
87880- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
87881+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
87882 int ret;
87883
87884 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
87885@@ -201,15 +201,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87886 struct mem_cgroup *memcg;
87887 #endif
87888
87889- ctl_table tmp = {
87890+ ctl_table_no_const tmp = {
87891 .data = &vec,
87892 .maxlen = sizeof(vec),
87893 .mode = ctl->mode,
87894 };
87895
87896 if (!write) {
87897- ctl->data = &net->ipv4.sysctl_tcp_mem;
87898- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
87899+ ctl_table_no_const tcp_mem = *ctl;
87900+
87901+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
87902+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
87903 }
87904
87905 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
87906@@ -236,7 +238,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
87907 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
87908 size_t *lenp, loff_t *ppos)
87909 {
87910- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87911+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
87912 struct tcp_fastopen_context *ctxt;
87913 int ret;
87914 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
87915@@ -477,7 +479,7 @@ static struct ctl_table ipv4_table[] = {
87916 },
87917 {
87918 .procname = "ip_local_reserved_ports",
87919- .data = NULL, /* initialized in sysctl_ipv4_init */
87920+ .data = sysctl_local_reserved_ports,
87921 .maxlen = 65536,
87922 .mode = 0644,
87923 .proc_handler = proc_do_large_bitmap,
87924@@ -842,11 +844,10 @@ static struct ctl_table ipv4_net_table[] = {
87925
87926 static __net_init int ipv4_sysctl_init_net(struct net *net)
87927 {
87928- struct ctl_table *table;
87929+ ctl_table_no_const *table = NULL;
87930
87931- table = ipv4_net_table;
87932 if (!net_eq(net, &init_net)) {
87933- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
87934+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
87935 if (table == NULL)
87936 goto err_alloc;
87937
87938@@ -881,15 +882,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
87939
87940 tcp_init_mem(net);
87941
87942- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87943+ if (!net_eq(net, &init_net))
87944+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
87945+ else
87946+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
87947 if (net->ipv4.ipv4_hdr == NULL)
87948 goto err_reg;
87949
87950 return 0;
87951
87952 err_reg:
87953- if (!net_eq(net, &init_net))
87954- kfree(table);
87955+ kfree(table);
87956 err_alloc:
87957 return -ENOMEM;
87958 }
87959@@ -911,16 +914,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
87960 static __init int sysctl_ipv4_init(void)
87961 {
87962 struct ctl_table_header *hdr;
87963- struct ctl_table *i;
87964-
87965- for (i = ipv4_table; i->procname; i++) {
87966- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
87967- i->data = sysctl_local_reserved_ports;
87968- break;
87969- }
87970- }
87971- if (!i->procname)
87972- return -EINVAL;
87973
87974 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
87975 if (hdr == NULL)
87976diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
87977index 9c62257..651cc27 100644
87978--- a/net/ipv4/tcp_input.c
87979+++ b/net/ipv4/tcp_input.c
87980@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
87981 * simplifies code)
87982 */
87983 static void
87984-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87985+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
87986 struct sk_buff *head, struct sk_buff *tail,
87987 u32 start, u32 end)
87988 {
87989@@ -5522,6 +5522,7 @@ discard:
87990 tcp_paws_reject(&tp->rx_opt, 0))
87991 goto discard_and_undo;
87992
87993+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
87994 if (th->syn) {
87995 /* We see SYN without ACK. It is attempt of
87996 * simultaneous connect with crossed SYNs.
87997@@ -5572,6 +5573,7 @@ discard:
87998 goto discard;
87999 #endif
88000 }
88001+#endif
88002 /* "fifth, if neither of the SYN or RST bits is set then
88003 * drop the segment and return."
88004 */
88005@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
88006 goto discard;
88007
88008 if (th->syn) {
88009- if (th->fin)
88010+ if (th->fin || th->urg || th->psh)
88011 goto discard;
88012 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
88013 return 1;
88014diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
88015index 7999fc5..c812f42 100644
88016--- a/net/ipv4/tcp_ipv4.c
88017+++ b/net/ipv4/tcp_ipv4.c
88018@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
88019 EXPORT_SYMBOL(sysctl_tcp_low_latency);
88020
88021
88022+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88023+extern int grsec_enable_blackhole;
88024+#endif
88025+
88026 #ifdef CONFIG_TCP_MD5SIG
88027 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88028 __be32 daddr, __be32 saddr, const struct tcphdr *th);
88029@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
88030 return 0;
88031
88032 reset:
88033+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88034+ if (!grsec_enable_blackhole)
88035+#endif
88036 tcp_v4_send_reset(rsk, skb);
88037 discard:
88038 kfree_skb(skb);
88039@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
88040 TCP_SKB_CB(skb)->sacked = 0;
88041
88042 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88043- if (!sk)
88044+ if (!sk) {
88045+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88046+ ret = 1;
88047+#endif
88048 goto no_tcp_socket;
88049-
88050+ }
88051 process:
88052- if (sk->sk_state == TCP_TIME_WAIT)
88053+ if (sk->sk_state == TCP_TIME_WAIT) {
88054+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88055+ ret = 2;
88056+#endif
88057 goto do_time_wait;
88058+ }
88059
88060 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
88061 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88062@@ -2058,6 +2072,10 @@ csum_error:
88063 bad_packet:
88064 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88065 } else {
88066+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88067+ if (!grsec_enable_blackhole || (ret == 1 &&
88068+ (skb->dev->flags & IFF_LOOPBACK)))
88069+#endif
88070 tcp_v4_send_reset(NULL, skb);
88071 }
88072
88073diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
88074index 0f01788..d52a859 100644
88075--- a/net/ipv4/tcp_minisocks.c
88076+++ b/net/ipv4/tcp_minisocks.c
88077@@ -27,6 +27,10 @@
88078 #include <net/inet_common.h>
88079 #include <net/xfrm.h>
88080
88081+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88082+extern int grsec_enable_blackhole;
88083+#endif
88084+
88085 int sysctl_tcp_syncookies __read_mostly = 1;
88086 EXPORT_SYMBOL(sysctl_tcp_syncookies);
88087
88088@@ -717,7 +721,10 @@ embryonic_reset:
88089 * avoid becoming vulnerable to outside attack aiming at
88090 * resetting legit local connections.
88091 */
88092- req->rsk_ops->send_reset(sk, skb);
88093+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88094+ if (!grsec_enable_blackhole)
88095+#endif
88096+ req->rsk_ops->send_reset(sk, skb);
88097 } else if (fastopen) { /* received a valid RST pkt */
88098 reqsk_fastopen_remove(sk, req, true);
88099 tcp_reset(sk);
88100diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
88101index d4943f6..e7a74a5 100644
88102--- a/net/ipv4/tcp_probe.c
88103+++ b/net/ipv4/tcp_probe.c
88104@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
88105 if (cnt + width >= len)
88106 break;
88107
88108- if (copy_to_user(buf + cnt, tbuf, width))
88109+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
88110 return -EFAULT;
88111 cnt += width;
88112 }
88113diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
88114index 4b85e6f..22f9ac9 100644
88115--- a/net/ipv4/tcp_timer.c
88116+++ b/net/ipv4/tcp_timer.c
88117@@ -22,6 +22,10 @@
88118 #include <linux/gfp.h>
88119 #include <net/tcp.h>
88120
88121+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88122+extern int grsec_lastack_retries;
88123+#endif
88124+
88125 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
88126 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
88127 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
88128@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
88129 }
88130 }
88131
88132+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88133+ if ((sk->sk_state == TCP_LAST_ACK) &&
88134+ (grsec_lastack_retries > 0) &&
88135+ (grsec_lastack_retries < retry_until))
88136+ retry_until = grsec_lastack_retries;
88137+#endif
88138+
88139 if (retransmits_timed_out(sk, retry_until,
88140 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
88141 /* Has it gone just too far? */
88142diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
88143index 0bf5d399..5a2dd92 100644
88144--- a/net/ipv4/udp.c
88145+++ b/net/ipv4/udp.c
88146@@ -87,6 +87,7 @@
88147 #include <linux/types.h>
88148 #include <linux/fcntl.h>
88149 #include <linux/module.h>
88150+#include <linux/security.h>
88151 #include <linux/socket.h>
88152 #include <linux/sockios.h>
88153 #include <linux/igmp.h>
88154@@ -111,6 +112,10 @@
88155 #include <trace/events/skb.h>
88156 #include "udp_impl.h"
88157
88158+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88159+extern int grsec_enable_blackhole;
88160+#endif
88161+
88162 struct udp_table udp_table __read_mostly;
88163 EXPORT_SYMBOL(udp_table);
88164
88165@@ -594,6 +599,9 @@ found:
88166 return s;
88167 }
88168
88169+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
88170+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
88171+
88172 /*
88173 * This routine is called by the ICMP module when it gets some
88174 * sort of error condition. If err < 0 then the socket should
88175@@ -799,7 +807,7 @@ send:
88176 /*
88177 * Push out all pending data as one UDP datagram. Socket is locked.
88178 */
88179-static int udp_push_pending_frames(struct sock *sk)
88180+int udp_push_pending_frames(struct sock *sk)
88181 {
88182 struct udp_sock *up = udp_sk(sk);
88183 struct inet_sock *inet = inet_sk(sk);
88184@@ -818,6 +826,7 @@ out:
88185 up->pending = 0;
88186 return err;
88187 }
88188+EXPORT_SYMBOL(udp_push_pending_frames);
88189
88190 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
88191 size_t len)
88192@@ -889,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
88193 dport = usin->sin_port;
88194 if (dport == 0)
88195 return -EINVAL;
88196+
88197+ err = gr_search_udp_sendmsg(sk, usin);
88198+ if (err)
88199+ return err;
88200 } else {
88201 if (sk->sk_state != TCP_ESTABLISHED)
88202 return -EDESTADDRREQ;
88203+
88204+ err = gr_search_udp_sendmsg(sk, NULL);
88205+ if (err)
88206+ return err;
88207+
88208 daddr = inet->inet_daddr;
88209 dport = inet->inet_dport;
88210 /* Open fast path for connected socket.
88211@@ -1135,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
88212 IS_UDPLITE(sk));
88213 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88214 IS_UDPLITE(sk));
88215- atomic_inc(&sk->sk_drops);
88216+ atomic_inc_unchecked(&sk->sk_drops);
88217 __skb_unlink(skb, rcvq);
88218 __skb_queue_tail(&list_kill, skb);
88219 }
88220@@ -1221,6 +1239,10 @@ try_again:
88221 if (!skb)
88222 goto out;
88223
88224+ err = gr_search_udp_recvmsg(sk, skb);
88225+ if (err)
88226+ goto out_free;
88227+
88228 ulen = skb->len - sizeof(struct udphdr);
88229 copied = len;
88230 if (copied > ulen)
88231@@ -1254,7 +1276,7 @@ try_again:
88232 if (unlikely(err)) {
88233 trace_kfree_skb(skb, udp_recvmsg);
88234 if (!peeked) {
88235- atomic_inc(&sk->sk_drops);
88236+ atomic_inc_unchecked(&sk->sk_drops);
88237 UDP_INC_STATS_USER(sock_net(sk),
88238 UDP_MIB_INERRORS, is_udplite);
88239 }
88240@@ -1541,7 +1563,7 @@ csum_error:
88241 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
88242 drop:
88243 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88244- atomic_inc(&sk->sk_drops);
88245+ atomic_inc_unchecked(&sk->sk_drops);
88246 kfree_skb(skb);
88247 return -1;
88248 }
88249@@ -1560,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88250 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88251
88252 if (!skb1) {
88253- atomic_inc(&sk->sk_drops);
88254+ atomic_inc_unchecked(&sk->sk_drops);
88255 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88256 IS_UDPLITE(sk));
88257 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88258@@ -1729,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88259 goto csum_error;
88260
88261 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88262+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88263+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88264+#endif
88265 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
88266
88267 /*
88268@@ -2159,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
88269 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
88270 0, sock_i_ino(sp),
88271 atomic_read(&sp->sk_refcnt), sp,
88272- atomic_read(&sp->sk_drops), len);
88273+ atomic_read_unchecked(&sp->sk_drops), len);
88274 }
88275
88276 int udp4_seq_show(struct seq_file *seq, void *v)
88277diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
88278index 9a459be..086b866 100644
88279--- a/net/ipv4/xfrm4_policy.c
88280+++ b/net/ipv4/xfrm4_policy.c
88281@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
88282
88283 static int __net_init xfrm4_net_init(struct net *net)
88284 {
88285- struct ctl_table *table;
88286+ ctl_table_no_const *table = NULL;
88287 struct ctl_table_header *hdr;
88288
88289- table = xfrm4_policy_table;
88290 if (!net_eq(net, &init_net)) {
88291- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
88292+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
88293 if (!table)
88294 goto err_alloc;
88295
88296 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
88297- }
88298-
88299- hdr = register_net_sysctl(net, "net/ipv4", table);
88300+ hdr = register_net_sysctl(net, "net/ipv4", table);
88301+ } else
88302+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
88303 if (!hdr)
88304 goto err_reg;
88305
88306@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
88307 return 0;
88308
88309 err_reg:
88310- if (!net_eq(net, &init_net))
88311- kfree(table);
88312+ kfree(table);
88313 err_alloc:
88314 return -ENOMEM;
88315 }
88316diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
88317index 4ab4c38..1533b2d 100644
88318--- a/net/ipv6/addrconf.c
88319+++ b/net/ipv6/addrconf.c
88320@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
88321 idx = 0;
88322 head = &net->dev_index_head[h];
88323 rcu_read_lock();
88324- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
88325+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
88326 net->dev_base_seq;
88327 hlist_for_each_entry_rcu(dev, head, index_hlist) {
88328 if (idx < s_idx)
88329@@ -2372,7 +2372,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
88330 p.iph.ihl = 5;
88331 p.iph.protocol = IPPROTO_IPV6;
88332 p.iph.ttl = 64;
88333- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
88334+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
88335
88336 if (ops->ndo_do_ioctl) {
88337 mm_segment_t oldfs = get_fs();
88338@@ -3994,7 +3994,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
88339 s_ip_idx = ip_idx = cb->args[2];
88340
88341 rcu_read_lock();
88342- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
88343+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
88344 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
88345 idx = 0;
88346 head = &net->dev_index_head[h];
88347@@ -4579,7 +4579,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
88348 dst_free(&ifp->rt->dst);
88349 break;
88350 }
88351- atomic_inc(&net->ipv6.dev_addr_genid);
88352+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
88353 }
88354
88355 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
88356@@ -4599,7 +4599,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
88357 int *valp = ctl->data;
88358 int val = *valp;
88359 loff_t pos = *ppos;
88360- ctl_table lctl;
88361+ ctl_table_no_const lctl;
88362 int ret;
88363
88364 /*
88365@@ -4681,7 +4681,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
88366 int *valp = ctl->data;
88367 int val = *valp;
88368 loff_t pos = *ppos;
88369- ctl_table lctl;
88370+ ctl_table_no_const lctl;
88371 int ret;
88372
88373 /*
88374diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
88375index b4ff0a4..db9b764 100644
88376--- a/net/ipv6/icmp.c
88377+++ b/net/ipv6/icmp.c
88378@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
88379
88380 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
88381 {
88382- struct ctl_table *table;
88383+ ctl_table_no_const *table;
88384
88385 table = kmemdup(ipv6_icmp_table_template,
88386 sizeof(ipv6_icmp_table_template),
88387diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
88388index ecd6073..58162ae 100644
88389--- a/net/ipv6/ip6_gre.c
88390+++ b/net/ipv6/ip6_gre.c
88391@@ -74,7 +74,7 @@ struct ip6gre_net {
88392 struct net_device *fb_tunnel_dev;
88393 };
88394
88395-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
88396+static struct rtnl_link_ops ip6gre_link_ops;
88397 static int ip6gre_tunnel_init(struct net_device *dev);
88398 static void ip6gre_tunnel_setup(struct net_device *dev);
88399 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
88400@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
88401 }
88402
88403
88404-static struct inet6_protocol ip6gre_protocol __read_mostly = {
88405+static struct inet6_protocol ip6gre_protocol = {
88406 .handler = ip6gre_rcv,
88407 .err_handler = ip6gre_err,
88408 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
88409@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
88410 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
88411 };
88412
88413-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88414+static struct rtnl_link_ops ip6gre_link_ops = {
88415 .kind = "ip6gre",
88416 .maxtype = IFLA_GRE_MAX,
88417 .policy = ip6gre_policy,
88418@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88419 .fill_info = ip6gre_fill_info,
88420 };
88421
88422-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
88423+static struct rtnl_link_ops ip6gre_tap_ops = {
88424 .kind = "ip6gretap",
88425 .maxtype = IFLA_GRE_MAX,
88426 .policy = ip6gre_policy,
88427diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
88428index d5d20cd..6e3ddf8 100644
88429--- a/net/ipv6/ip6_output.c
88430+++ b/net/ipv6/ip6_output.c
88431@@ -1098,11 +1098,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
88432 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
88433 }
88434
88435-static void ip6_append_data_mtu(int *mtu,
88436+static void ip6_append_data_mtu(unsigned int *mtu,
88437 int *maxfraglen,
88438 unsigned int fragheaderlen,
88439 struct sk_buff *skb,
88440- struct rt6_info *rt)
88441+ struct rt6_info *rt,
88442+ bool pmtuprobe)
88443 {
88444 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
88445 if (skb == NULL) {
88446@@ -1114,7 +1115,9 @@ static void ip6_append_data_mtu(int *mtu,
88447 * this fragment is not first, the headers
88448 * space is regarded as data space.
88449 */
88450- *mtu = dst_mtu(rt->dst.path);
88451+ *mtu = min(*mtu, pmtuprobe ?
88452+ rt->dst.dev->mtu :
88453+ dst_mtu(rt->dst.path));
88454 }
88455 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
88456 + fragheaderlen - sizeof(struct frag_hdr);
88457@@ -1131,11 +1134,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
88458 struct ipv6_pinfo *np = inet6_sk(sk);
88459 struct inet_cork *cork;
88460 struct sk_buff *skb, *skb_prev = NULL;
88461- unsigned int maxfraglen, fragheaderlen;
88462+ unsigned int maxfraglen, fragheaderlen, mtu;
88463 int exthdrlen;
88464 int dst_exthdrlen;
88465 int hh_len;
88466- int mtu;
88467 int copy;
88468 int err;
88469 int offset = 0;
88470@@ -1292,7 +1294,9 @@ alloc_new_skb:
88471 /* update mtu and maxfraglen if necessary */
88472 if (skb == NULL || skb_prev == NULL)
88473 ip6_append_data_mtu(&mtu, &maxfraglen,
88474- fragheaderlen, skb, rt);
88475+ fragheaderlen, skb, rt,
88476+ np->pmtudisc ==
88477+ IPV6_PMTUDISC_PROBE);
88478
88479 skb_prev = skb;
88480
88481diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
88482index 1e55866..b398dab 100644
88483--- a/net/ipv6/ip6_tunnel.c
88484+++ b/net/ipv6/ip6_tunnel.c
88485@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
88486
88487 static int ip6_tnl_dev_init(struct net_device *dev);
88488 static void ip6_tnl_dev_setup(struct net_device *dev);
88489-static struct rtnl_link_ops ip6_link_ops __read_mostly;
88490+static struct rtnl_link_ops ip6_link_ops;
88491
88492 static int ip6_tnl_net_id __read_mostly;
88493 struct ip6_tnl_net {
88494@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
88495 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
88496 };
88497
88498-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
88499+static struct rtnl_link_ops ip6_link_ops = {
88500 .kind = "ip6tnl",
88501 .maxtype = IFLA_IPTUN_MAX,
88502 .policy = ip6_tnl_policy,
88503diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
88504index d1e2e8e..51c19ae 100644
88505--- a/net/ipv6/ipv6_sockglue.c
88506+++ b/net/ipv6/ipv6_sockglue.c
88507@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
88508 if (sk->sk_type != SOCK_STREAM)
88509 return -ENOPROTOOPT;
88510
88511- msg.msg_control = optval;
88512+ msg.msg_control = (void __force_kernel *)optval;
88513 msg.msg_controllen = len;
88514 msg.msg_flags = flags;
88515
88516diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
88517index 44400c2..8e11f52 100644
88518--- a/net/ipv6/netfilter/ip6_tables.c
88519+++ b/net/ipv6/netfilter/ip6_tables.c
88520@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
88521 #endif
88522
88523 static int get_info(struct net *net, void __user *user,
88524- const int *len, int compat)
88525+ int len, int compat)
88526 {
88527 char name[XT_TABLE_MAXNAMELEN];
88528 struct xt_table *t;
88529 int ret;
88530
88531- if (*len != sizeof(struct ip6t_getinfo)) {
88532- duprintf("length %u != %zu\n", *len,
88533+ if (len != sizeof(struct ip6t_getinfo)) {
88534+ duprintf("length %u != %zu\n", len,
88535 sizeof(struct ip6t_getinfo));
88536 return -EINVAL;
88537 }
88538@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
88539 info.size = private->size;
88540 strcpy(info.name, name);
88541
88542- if (copy_to_user(user, &info, *len) != 0)
88543+ if (copy_to_user(user, &info, len) != 0)
88544 ret = -EFAULT;
88545 else
88546 ret = 0;
88547@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88548
88549 switch (cmd) {
88550 case IP6T_SO_GET_INFO:
88551- ret = get_info(sock_net(sk), user, len, 1);
88552+ ret = get_info(sock_net(sk), user, *len, 1);
88553 break;
88554 case IP6T_SO_GET_ENTRIES:
88555 ret = compat_get_entries(sock_net(sk), user, len);
88556@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88557
88558 switch (cmd) {
88559 case IP6T_SO_GET_INFO:
88560- ret = get_info(sock_net(sk), user, len, 0);
88561+ ret = get_info(sock_net(sk), user, *len, 0);
88562 break;
88563
88564 case IP6T_SO_GET_ENTRIES:
88565diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
88566index dffdc1a..ccc6678 100644
88567--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
88568+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
88569@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
88570
88571 static int nf_ct_frag6_sysctl_register(struct net *net)
88572 {
88573- struct ctl_table *table;
88574+ ctl_table_no_const *table = NULL;
88575 struct ctl_table_header *hdr;
88576
88577- table = nf_ct_frag6_sysctl_table;
88578 if (!net_eq(net, &init_net)) {
88579- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
88580+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
88581 GFP_KERNEL);
88582 if (table == NULL)
88583 goto err_alloc;
88584@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88585 table[0].data = &net->nf_frag.frags.timeout;
88586 table[1].data = &net->nf_frag.frags.low_thresh;
88587 table[2].data = &net->nf_frag.frags.high_thresh;
88588- }
88589-
88590- hdr = register_net_sysctl(net, "net/netfilter", table);
88591+ hdr = register_net_sysctl(net, "net/netfilter", table);
88592+ } else
88593+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
88594 if (hdr == NULL)
88595 goto err_reg;
88596
88597@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88598 return 0;
88599
88600 err_reg:
88601- if (!net_eq(net, &init_net))
88602- kfree(table);
88603+ kfree(table);
88604 err_alloc:
88605 return -ENOMEM;
88606 }
88607diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
88608index eedff8c..6e13a47 100644
88609--- a/net/ipv6/raw.c
88610+++ b/net/ipv6/raw.c
88611@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
88612 {
88613 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
88614 skb_checksum_complete(skb)) {
88615- atomic_inc(&sk->sk_drops);
88616+ atomic_inc_unchecked(&sk->sk_drops);
88617 kfree_skb(skb);
88618 return NET_RX_DROP;
88619 }
88620@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88621 struct raw6_sock *rp = raw6_sk(sk);
88622
88623 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
88624- atomic_inc(&sk->sk_drops);
88625+ atomic_inc_unchecked(&sk->sk_drops);
88626 kfree_skb(skb);
88627 return NET_RX_DROP;
88628 }
88629@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88630
88631 if (inet->hdrincl) {
88632 if (skb_checksum_complete(skb)) {
88633- atomic_inc(&sk->sk_drops);
88634+ atomic_inc_unchecked(&sk->sk_drops);
88635 kfree_skb(skb);
88636 return NET_RX_DROP;
88637 }
88638@@ -602,7 +602,7 @@ out:
88639 return err;
88640 }
88641
88642-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
88643+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
88644 struct flowi6 *fl6, struct dst_entry **dstp,
88645 unsigned int flags)
88646 {
88647@@ -914,12 +914,15 @@ do_confirm:
88648 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
88649 char __user *optval, int optlen)
88650 {
88651+ struct icmp6_filter filter;
88652+
88653 switch (optname) {
88654 case ICMPV6_FILTER:
88655 if (optlen > sizeof(struct icmp6_filter))
88656 optlen = sizeof(struct icmp6_filter);
88657- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
88658+ if (copy_from_user(&filter, optval, optlen))
88659 return -EFAULT;
88660+ raw6_sk(sk)->filter = filter;
88661 return 0;
88662 default:
88663 return -ENOPROTOOPT;
88664@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88665 char __user *optval, int __user *optlen)
88666 {
88667 int len;
88668+ struct icmp6_filter filter;
88669
88670 switch (optname) {
88671 case ICMPV6_FILTER:
88672@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88673 len = sizeof(struct icmp6_filter);
88674 if (put_user(len, optlen))
88675 return -EFAULT;
88676- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
88677+ filter = raw6_sk(sk)->filter;
88678+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
88679 return -EFAULT;
88680 return 0;
88681 default:
88682@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
88683 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
88684 0,
88685 sock_i_ino(sp),
88686- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
88687+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
88688 }
88689
88690 static int raw6_seq_show(struct seq_file *seq, void *v)
88691diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
88692index 790d9f4..68ae078 100644
88693--- a/net/ipv6/reassembly.c
88694+++ b/net/ipv6/reassembly.c
88695@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
88696
88697 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88698 {
88699- struct ctl_table *table;
88700+ ctl_table_no_const *table = NULL;
88701 struct ctl_table_header *hdr;
88702
88703- table = ip6_frags_ns_ctl_table;
88704 if (!net_eq(net, &init_net)) {
88705- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
88706+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
88707 if (table == NULL)
88708 goto err_alloc;
88709
88710@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88711 /* Don't export sysctls to unprivileged users */
88712 if (net->user_ns != &init_user_ns)
88713 table[0].procname = NULL;
88714- }
88715+ hdr = register_net_sysctl(net, "net/ipv6", table);
88716+ } else
88717+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
88718
88719- hdr = register_net_sysctl(net, "net/ipv6", table);
88720 if (hdr == NULL)
88721 goto err_reg;
88722
88723@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
88724 return 0;
88725
88726 err_reg:
88727- if (!net_eq(net, &init_net))
88728- kfree(table);
88729+ kfree(table);
88730 err_alloc:
88731 return -ENOMEM;
88732 }
88733diff --git a/net/ipv6/route.c b/net/ipv6/route.c
88734index ad0aa6b..beaef03 100644
88735--- a/net/ipv6/route.c
88736+++ b/net/ipv6/route.c
88737@@ -2881,7 +2881,7 @@ ctl_table ipv6_route_table_template[] = {
88738
88739 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
88740 {
88741- struct ctl_table *table;
88742+ ctl_table_no_const *table;
88743
88744 table = kmemdup(ipv6_route_table_template,
88745 sizeof(ipv6_route_table_template),
88746diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
88747index 3353634..3d5084a 100644
88748--- a/net/ipv6/sit.c
88749+++ b/net/ipv6/sit.c
88750@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
88751 static void ipip6_dev_free(struct net_device *dev);
88752 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
88753 __be32 *v4dst);
88754-static struct rtnl_link_ops sit_link_ops __read_mostly;
88755+static struct rtnl_link_ops sit_link_ops;
88756
88757 static int sit_net_id __read_mostly;
88758 struct sit_net {
88759@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
88760 #endif
88761 };
88762
88763-static struct rtnl_link_ops sit_link_ops __read_mostly = {
88764+static struct rtnl_link_ops sit_link_ops = {
88765 .kind = "sit",
88766 .maxtype = IFLA_IPTUN_MAX,
88767 .policy = ipip6_policy,
88768diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
88769index e85c48b..b8268d3 100644
88770--- a/net/ipv6/sysctl_net_ipv6.c
88771+++ b/net/ipv6/sysctl_net_ipv6.c
88772@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
88773
88774 static int __net_init ipv6_sysctl_net_init(struct net *net)
88775 {
88776- struct ctl_table *ipv6_table;
88777+ ctl_table_no_const *ipv6_table;
88778 struct ctl_table *ipv6_route_table;
88779 struct ctl_table *ipv6_icmp_table;
88780 int err;
88781diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
88782index 0a17ed9..2526cc3 100644
88783--- a/net/ipv6/tcp_ipv6.c
88784+++ b/net/ipv6/tcp_ipv6.c
88785@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
88786 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
88787 }
88788
88789+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88790+extern int grsec_enable_blackhole;
88791+#endif
88792+
88793 static void tcp_v6_hash(struct sock *sk)
88794 {
88795 if (sk->sk_state != TCP_CLOSE) {
88796@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
88797 return 0;
88798
88799 reset:
88800+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88801+ if (!grsec_enable_blackhole)
88802+#endif
88803 tcp_v6_send_reset(sk, skb);
88804 discard:
88805 if (opt_skb)
88806@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
88807 TCP_SKB_CB(skb)->sacked = 0;
88808
88809 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88810- if (!sk)
88811+ if (!sk) {
88812+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88813+ ret = 1;
88814+#endif
88815 goto no_tcp_socket;
88816+ }
88817
88818 process:
88819- if (sk->sk_state == TCP_TIME_WAIT)
88820+ if (sk->sk_state == TCP_TIME_WAIT) {
88821+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88822+ ret = 2;
88823+#endif
88824 goto do_time_wait;
88825+ }
88826
88827 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
88828 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88829@@ -1536,6 +1551,10 @@ csum_error:
88830 bad_packet:
88831 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88832 } else {
88833+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88834+ if (!grsec_enable_blackhole || (ret == 1 &&
88835+ (skb->dev->flags & IFF_LOOPBACK)))
88836+#endif
88837 tcp_v6_send_reset(NULL, skb);
88838 }
88839
88840diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
88841index 42923b1..d09c290 100644
88842--- a/net/ipv6/udp.c
88843+++ b/net/ipv6/udp.c
88844@@ -52,6 +52,10 @@
88845 #include <trace/events/skb.h>
88846 #include "udp_impl.h"
88847
88848+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88849+extern int grsec_enable_blackhole;
88850+#endif
88851+
88852 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
88853 {
88854 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
88855@@ -419,7 +423,7 @@ try_again:
88856 if (unlikely(err)) {
88857 trace_kfree_skb(skb, udpv6_recvmsg);
88858 if (!peeked) {
88859- atomic_inc(&sk->sk_drops);
88860+ atomic_inc_unchecked(&sk->sk_drops);
88861 if (is_udp4)
88862 UDP_INC_STATS_USER(sock_net(sk),
88863 UDP_MIB_INERRORS,
88864@@ -665,7 +669,7 @@ csum_error:
88865 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
88866 drop:
88867 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88868- atomic_inc(&sk->sk_drops);
88869+ atomic_inc_unchecked(&sk->sk_drops);
88870 kfree_skb(skb);
88871 return -1;
88872 }
88873@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88874 if (likely(skb1 == NULL))
88875 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88876 if (!skb1) {
88877- atomic_inc(&sk->sk_drops);
88878+ atomic_inc_unchecked(&sk->sk_drops);
88879 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88880 IS_UDPLITE(sk));
88881 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88882@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88883 goto csum_error;
88884
88885 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88886+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88887+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88888+#endif
88889 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
88890
88891 kfree_skb(skb);
88892@@ -955,11 +962,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
88893 struct udphdr *uh;
88894 struct udp_sock *up = udp_sk(sk);
88895 struct inet_sock *inet = inet_sk(sk);
88896- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
88897+ struct flowi6 *fl6;
88898 int err = 0;
88899 int is_udplite = IS_UDPLITE(sk);
88900 __wsum csum = 0;
88901
88902+ if (up->pending == AF_INET)
88903+ return udp_push_pending_frames(sk);
88904+
88905+ fl6 = &inet->cork.fl.u.ip6;
88906+
88907 /* Grab the skbuff where UDP header space exists. */
88908 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
88909 goto out;
88910@@ -1387,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
88911 0,
88912 sock_i_ino(sp),
88913 atomic_read(&sp->sk_refcnt), sp,
88914- atomic_read(&sp->sk_drops));
88915+ atomic_read_unchecked(&sp->sk_drops));
88916 }
88917
88918 int udp6_seq_show(struct seq_file *seq, void *v)
88919diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
88920index 23ed03d..465a71d 100644
88921--- a/net/ipv6/xfrm6_policy.c
88922+++ b/net/ipv6/xfrm6_policy.c
88923@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
88924
88925 static int __net_init xfrm6_net_init(struct net *net)
88926 {
88927- struct ctl_table *table;
88928+ ctl_table_no_const *table = NULL;
88929 struct ctl_table_header *hdr;
88930
88931- table = xfrm6_policy_table;
88932 if (!net_eq(net, &init_net)) {
88933- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
88934+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
88935 if (!table)
88936 goto err_alloc;
88937
88938 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
88939- }
88940+ hdr = register_net_sysctl(net, "net/ipv6", table);
88941+ } else
88942+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
88943
88944- hdr = register_net_sysctl(net, "net/ipv6", table);
88945 if (!hdr)
88946 goto err_reg;
88947
88948@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
88949 return 0;
88950
88951 err_reg:
88952- if (!net_eq(net, &init_net))
88953- kfree(table);
88954+ kfree(table);
88955 err_alloc:
88956 return -ENOMEM;
88957 }
88958diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
88959index 41ac7938..75e3bb1 100644
88960--- a/net/irda/ircomm/ircomm_tty.c
88961+++ b/net/irda/ircomm/ircomm_tty.c
88962@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88963 add_wait_queue(&port->open_wait, &wait);
88964
88965 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
88966- __FILE__, __LINE__, tty->driver->name, port->count);
88967+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88968
88969 spin_lock_irqsave(&port->lock, flags);
88970 if (!tty_hung_up_p(filp))
88971- port->count--;
88972+ atomic_dec(&port->count);
88973 port->blocked_open++;
88974 spin_unlock_irqrestore(&port->lock, flags);
88975
88976@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88977 }
88978
88979 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
88980- __FILE__, __LINE__, tty->driver->name, port->count);
88981+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88982
88983 schedule();
88984 }
88985@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
88986
88987 spin_lock_irqsave(&port->lock, flags);
88988 if (!tty_hung_up_p(filp))
88989- port->count++;
88990+ atomic_inc(&port->count);
88991 port->blocked_open--;
88992 spin_unlock_irqrestore(&port->lock, flags);
88993
88994 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
88995- __FILE__, __LINE__, tty->driver->name, port->count);
88996+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
88997
88998 if (!retval)
88999 port->flags |= ASYNC_NORMAL_ACTIVE;
89000@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
89001
89002 /* ++ is not atomic, so this should be protected - Jean II */
89003 spin_lock_irqsave(&self->port.lock, flags);
89004- self->port.count++;
89005+ atomic_inc(&self->port.count);
89006 spin_unlock_irqrestore(&self->port.lock, flags);
89007 tty_port_tty_set(&self->port, tty);
89008
89009 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
89010- self->line, self->port.count);
89011+ self->line, atomic_read(&self->port.count));
89012
89013 /* Not really used by us, but lets do it anyway */
89014 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
89015@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
89016 tty_kref_put(port->tty);
89017 }
89018 port->tty = NULL;
89019- port->count = 0;
89020+ atomic_set(&port->count, 0);
89021 spin_unlock_irqrestore(&port->lock, flags);
89022
89023 wake_up_interruptible(&port->open_wait);
89024@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
89025 seq_putc(m, '\n');
89026
89027 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
89028- seq_printf(m, "Open count: %d\n", self->port.count);
89029+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
89030 seq_printf(m, "Max data size: %d\n", self->max_data_size);
89031 seq_printf(m, "Max header size: %d\n", self->max_header_size);
89032
89033diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
89034index ae69165..c8b82d8 100644
89035--- a/net/iucv/af_iucv.c
89036+++ b/net/iucv/af_iucv.c
89037@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
89038
89039 write_lock_bh(&iucv_sk_list.lock);
89040
89041- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
89042+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
89043 while (__iucv_get_sock_by_name(name)) {
89044 sprintf(name, "%08x",
89045- atomic_inc_return(&iucv_sk_list.autobind_name));
89046+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
89047 }
89048
89049 write_unlock_bh(&iucv_sk_list.lock);
89050diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
89051index 4fe76ff..426a904 100644
89052--- a/net/iucv/iucv.c
89053+++ b/net/iucv/iucv.c
89054@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
89055 return NOTIFY_OK;
89056 }
89057
89058-static struct notifier_block __refdata iucv_cpu_notifier = {
89059+static struct notifier_block iucv_cpu_notifier = {
89060 .notifier_call = iucv_cpu_notify,
89061 };
89062
89063diff --git a/net/key/af_key.c b/net/key/af_key.c
89064index 9da8620..97070ad 100644
89065--- a/net/key/af_key.c
89066+++ b/net/key/af_key.c
89067@@ -3047,10 +3047,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
89068 static u32 get_acqseq(void)
89069 {
89070 u32 res;
89071- static atomic_t acqseq;
89072+ static atomic_unchecked_t acqseq;
89073
89074 do {
89075- res = atomic_inc_return(&acqseq);
89076+ res = atomic_inc_return_unchecked(&acqseq);
89077 } while (!res);
89078 return res;
89079 }
89080diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
89081index 8dec687..5ebee2d 100644
89082--- a/net/l2tp/l2tp_ppp.c
89083+++ b/net/l2tp/l2tp_ppp.c
89084@@ -1793,7 +1793,8 @@ static const struct proto_ops pppol2tp_ops = {
89085
89086 static const struct pppox_proto pppol2tp_proto = {
89087 .create = pppol2tp_create,
89088- .ioctl = pppol2tp_ioctl
89089+ .ioctl = pppol2tp_ioctl,
89090+ .owner = THIS_MODULE,
89091 };
89092
89093 #ifdef CONFIG_L2TP_V3
89094diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
89095index 4fdb306e..920086a 100644
89096--- a/net/mac80211/cfg.c
89097+++ b/net/mac80211/cfg.c
89098@@ -804,7 +804,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
89099 ret = ieee80211_vif_use_channel(sdata, chandef,
89100 IEEE80211_CHANCTX_EXCLUSIVE);
89101 }
89102- } else if (local->open_count == local->monitors) {
89103+ } else if (local_read(&local->open_count) == local->monitors) {
89104 local->_oper_chandef = *chandef;
89105 ieee80211_hw_config(local, 0);
89106 }
89107@@ -2920,7 +2920,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
89108 else
89109 local->probe_req_reg--;
89110
89111- if (!local->open_count)
89112+ if (!local_read(&local->open_count))
89113 break;
89114
89115 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
89116@@ -3383,8 +3383,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
89117 if (chanctx_conf) {
89118 *chandef = chanctx_conf->def;
89119 ret = 0;
89120- } else if (local->open_count > 0 &&
89121- local->open_count == local->monitors &&
89122+ } else if (local_read(&local->open_count) > 0 &&
89123+ local_read(&local->open_count) == local->monitors &&
89124 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
89125 if (local->use_chanctx)
89126 *chandef = local->monitor_chandef;
89127diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
89128index 9ca8e32..48e4a9b 100644
89129--- a/net/mac80211/ieee80211_i.h
89130+++ b/net/mac80211/ieee80211_i.h
89131@@ -28,6 +28,7 @@
89132 #include <net/ieee80211_radiotap.h>
89133 #include <net/cfg80211.h>
89134 #include <net/mac80211.h>
89135+#include <asm/local.h>
89136 #include "key.h"
89137 #include "sta_info.h"
89138 #include "debug.h"
89139@@ -891,7 +892,7 @@ struct ieee80211_local {
89140 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
89141 spinlock_t queue_stop_reason_lock;
89142
89143- int open_count;
89144+ local_t open_count;
89145 int monitors, cooked_mntrs;
89146 /* number of interfaces with corresponding FIF_ flags */
89147 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
89148diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
89149index 98d20c0..586675b 100644
89150--- a/net/mac80211/iface.c
89151+++ b/net/mac80211/iface.c
89152@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89153 break;
89154 }
89155
89156- if (local->open_count == 0) {
89157+ if (local_read(&local->open_count) == 0) {
89158 res = drv_start(local);
89159 if (res)
89160 goto err_del_bss;
89161@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89162 break;
89163 }
89164
89165- if (local->monitors == 0 && local->open_count == 0) {
89166+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
89167 res = ieee80211_add_virtual_monitor(local);
89168 if (res)
89169 goto err_stop;
89170@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89171 atomic_inc(&local->iff_promiscs);
89172
89173 if (coming_up)
89174- local->open_count++;
89175+ local_inc(&local->open_count);
89176
89177 if (hw_reconf_flags)
89178 ieee80211_hw_config(local, hw_reconf_flags);
89179@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89180 err_del_interface:
89181 drv_remove_interface(local, sdata);
89182 err_stop:
89183- if (!local->open_count)
89184+ if (!local_read(&local->open_count))
89185 drv_stop(local);
89186 err_del_bss:
89187 sdata->bss = NULL;
89188@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89189 }
89190
89191 if (going_down)
89192- local->open_count--;
89193+ local_dec(&local->open_count);
89194
89195 switch (sdata->vif.type) {
89196 case NL80211_IFTYPE_AP_VLAN:
89197@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89198 }
89199 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
89200
89201- if (local->open_count == 0)
89202+ if (local_read(&local->open_count) == 0)
89203 ieee80211_clear_tx_pending(local);
89204
89205 /*
89206@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89207
89208 ieee80211_recalc_ps(local, -1);
89209
89210- if (local->open_count == 0) {
89211+ if (local_read(&local->open_count) == 0) {
89212 ieee80211_stop_device(local);
89213
89214 /* no reconfiguring after stop! */
89215@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89216 ieee80211_configure_filter(local);
89217 ieee80211_hw_config(local, hw_reconf_flags);
89218
89219- if (local->monitors == local->open_count)
89220+ if (local->monitors == local_read(&local->open_count))
89221 ieee80211_add_virtual_monitor(local);
89222 }
89223
89224diff --git a/net/mac80211/main.c b/net/mac80211/main.c
89225index 8a7bfc4..4407cd0 100644
89226--- a/net/mac80211/main.c
89227+++ b/net/mac80211/main.c
89228@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
89229 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
89230 IEEE80211_CONF_CHANGE_POWER);
89231
89232- if (changed && local->open_count) {
89233+ if (changed && local_read(&local->open_count)) {
89234 ret = drv_config(local, changed);
89235 /*
89236 * Goal:
89237diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
89238index 7fc5d0d..07ea536 100644
89239--- a/net/mac80211/pm.c
89240+++ b/net/mac80211/pm.c
89241@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89242 struct ieee80211_sub_if_data *sdata;
89243 struct sta_info *sta;
89244
89245- if (!local->open_count)
89246+ if (!local_read(&local->open_count))
89247 goto suspend;
89248
89249 ieee80211_scan_cancel(local);
89250@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89251 cancel_work_sync(&local->dynamic_ps_enable_work);
89252 del_timer_sync(&local->dynamic_ps_timer);
89253
89254- local->wowlan = wowlan && local->open_count;
89255+ local->wowlan = wowlan && local_read(&local->open_count);
89256 if (local->wowlan) {
89257 int err = drv_suspend(local, wowlan);
89258 if (err < 0) {
89259@@ -113,7 +113,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89260 WARN_ON(!list_empty(&local->chanctx_list));
89261
89262 /* stop hardware - this must stop RX */
89263- if (local->open_count)
89264+ if (local_read(&local->open_count))
89265 ieee80211_stop_device(local);
89266
89267 suspend:
89268diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
89269index a02bef3..f2f38dd 100644
89270--- a/net/mac80211/rate.c
89271+++ b/net/mac80211/rate.c
89272@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
89273
89274 ASSERT_RTNL();
89275
89276- if (local->open_count)
89277+ if (local_read(&local->open_count))
89278 return -EBUSY;
89279
89280 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
89281diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
89282index c97a065..ff61928 100644
89283--- a/net/mac80211/rc80211_pid_debugfs.c
89284+++ b/net/mac80211/rc80211_pid_debugfs.c
89285@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
89286
89287 spin_unlock_irqrestore(&events->lock, status);
89288
89289- if (copy_to_user(buf, pb, p))
89290+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
89291 return -EFAULT;
89292
89293 return p;
89294diff --git a/net/mac80211/util.c b/net/mac80211/util.c
89295index 72e6292..e6319eb 100644
89296--- a/net/mac80211/util.c
89297+++ b/net/mac80211/util.c
89298@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
89299 }
89300 #endif
89301 /* everything else happens only if HW was up & running */
89302- if (!local->open_count)
89303+ if (!local_read(&local->open_count))
89304 goto wake_up;
89305
89306 /*
89307@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
89308 local->in_reconfig = false;
89309 barrier();
89310
89311- if (local->monitors == local->open_count && local->monitors > 0)
89312+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
89313 ieee80211_add_virtual_monitor(local);
89314
89315 /*
89316diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
89317index 56d22ca..87c778f 100644
89318--- a/net/netfilter/Kconfig
89319+++ b/net/netfilter/Kconfig
89320@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
89321
89322 To compile it as a module, choose M here. If unsure, say N.
89323
89324+config NETFILTER_XT_MATCH_GRADM
89325+ tristate '"gradm" match support'
89326+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
89327+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
89328+ ---help---
89329+ The gradm match allows to match on grsecurity RBAC being enabled.
89330+ It is useful when iptables rules are applied early on bootup to
89331+ prevent connections to the machine (except from a trusted host)
89332+ while the RBAC system is disabled.
89333+
89334 config NETFILTER_XT_MATCH_HASHLIMIT
89335 tristate '"hashlimit" match support'
89336 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
89337diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
89338index a1abf87..dbcb7ee 100644
89339--- a/net/netfilter/Makefile
89340+++ b/net/netfilter/Makefile
89341@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
89342 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
89343 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
89344 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
89345+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
89346 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
89347 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
89348 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
89349diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
89350index f771390..145b765 100644
89351--- a/net/netfilter/ipset/ip_set_core.c
89352+++ b/net/netfilter/ipset/ip_set_core.c
89353@@ -1820,7 +1820,7 @@ done:
89354 return ret;
89355 }
89356
89357-static struct nf_sockopt_ops so_set __read_mostly = {
89358+static struct nf_sockopt_ops so_set = {
89359 .pf = PF_INET,
89360 .get_optmin = SO_IP_SET,
89361 .get_optmax = SO_IP_SET + 1,
89362diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
89363index a083bda..da661c3 100644
89364--- a/net/netfilter/ipvs/ip_vs_conn.c
89365+++ b/net/netfilter/ipvs/ip_vs_conn.c
89366@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
89367 /* Increase the refcnt counter of the dest */
89368 ip_vs_dest_hold(dest);
89369
89370- conn_flags = atomic_read(&dest->conn_flags);
89371+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
89372 if (cp->protocol != IPPROTO_UDP)
89373 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
89374 flags = cp->flags;
89375@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
89376
89377 cp->control = NULL;
89378 atomic_set(&cp->n_control, 0);
89379- atomic_set(&cp->in_pkts, 0);
89380+ atomic_set_unchecked(&cp->in_pkts, 0);
89381
89382 cp->packet_xmit = NULL;
89383 cp->app = NULL;
89384@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
89385
89386 /* Don't drop the entry if its number of incoming packets is not
89387 located in [0, 8] */
89388- i = atomic_read(&cp->in_pkts);
89389+ i = atomic_read_unchecked(&cp->in_pkts);
89390 if (i > 8 || i < 0) return 0;
89391
89392 if (!todrop_rate[i]) return 0;
89393diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
89394index 23b8eb5..48a8959 100644
89395--- a/net/netfilter/ipvs/ip_vs_core.c
89396+++ b/net/netfilter/ipvs/ip_vs_core.c
89397@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
89398 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
89399 /* do not touch skb anymore */
89400
89401- atomic_inc(&cp->in_pkts);
89402+ atomic_inc_unchecked(&cp->in_pkts);
89403 ip_vs_conn_put(cp);
89404 return ret;
89405 }
89406@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
89407 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
89408 pkts = sysctl_sync_threshold(ipvs);
89409 else
89410- pkts = atomic_add_return(1, &cp->in_pkts);
89411+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89412
89413 if (ipvs->sync_state & IP_VS_STATE_MASTER)
89414 ip_vs_sync_conn(net, cp, pkts);
89415diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
89416index 9e6c2a0..28552e2 100644
89417--- a/net/netfilter/ipvs/ip_vs_ctl.c
89418+++ b/net/netfilter/ipvs/ip_vs_ctl.c
89419@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
89420 */
89421 ip_vs_rs_hash(ipvs, dest);
89422 }
89423- atomic_set(&dest->conn_flags, conn_flags);
89424+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
89425
89426 /* bind the service */
89427 if (!dest->svc) {
89428@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
89429 * align with netns init in ip_vs_control_net_init()
89430 */
89431
89432-static struct ctl_table vs_vars[] = {
89433+static ctl_table_no_const vs_vars[] __read_only = {
89434 {
89435 .procname = "amemthresh",
89436 .maxlen = sizeof(int),
89437@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89438 " %-7s %-6d %-10d %-10d\n",
89439 &dest->addr.in6,
89440 ntohs(dest->port),
89441- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89442+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89443 atomic_read(&dest->weight),
89444 atomic_read(&dest->activeconns),
89445 atomic_read(&dest->inactconns));
89446@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89447 "%-7s %-6d %-10d %-10d\n",
89448 ntohl(dest->addr.ip),
89449 ntohs(dest->port),
89450- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89451+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89452 atomic_read(&dest->weight),
89453 atomic_read(&dest->activeconns),
89454 atomic_read(&dest->inactconns));
89455@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
89456
89457 entry.addr = dest->addr.ip;
89458 entry.port = dest->port;
89459- entry.conn_flags = atomic_read(&dest->conn_flags);
89460+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
89461 entry.weight = atomic_read(&dest->weight);
89462 entry.u_threshold = dest->u_threshold;
89463 entry.l_threshold = dest->l_threshold;
89464@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
89465 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
89466 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
89467 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
89468- (atomic_read(&dest->conn_flags) &
89469+ (atomic_read_unchecked(&dest->conn_flags) &
89470 IP_VS_CONN_F_FWD_MASK)) ||
89471 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
89472 atomic_read(&dest->weight)) ||
89473@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
89474 {
89475 int idx;
89476 struct netns_ipvs *ipvs = net_ipvs(net);
89477- struct ctl_table *tbl;
89478+ ctl_table_no_const *tbl;
89479
89480 atomic_set(&ipvs->dropentry, 0);
89481 spin_lock_init(&ipvs->dropentry_lock);
89482diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
89483index 5ea26bd..c9bc65f 100644
89484--- a/net/netfilter/ipvs/ip_vs_lblc.c
89485+++ b/net/netfilter/ipvs/ip_vs_lblc.c
89486@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
89487 * IPVS LBLC sysctl table
89488 */
89489 #ifdef CONFIG_SYSCTL
89490-static ctl_table vs_vars_table[] = {
89491+static ctl_table_no_const vs_vars_table[] __read_only = {
89492 {
89493 .procname = "lblc_expiration",
89494 .data = NULL,
89495diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
89496index 50123c2..067c773 100644
89497--- a/net/netfilter/ipvs/ip_vs_lblcr.c
89498+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
89499@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
89500 * IPVS LBLCR sysctl table
89501 */
89502
89503-static ctl_table vs_vars_table[] = {
89504+static ctl_table_no_const vs_vars_table[] __read_only = {
89505 {
89506 .procname = "lblcr_expiration",
89507 .data = NULL,
89508diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
89509index f6046d9..4f10cfd 100644
89510--- a/net/netfilter/ipvs/ip_vs_sync.c
89511+++ b/net/netfilter/ipvs/ip_vs_sync.c
89512@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
89513 cp = cp->control;
89514 if (cp) {
89515 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89516- pkts = atomic_add_return(1, &cp->in_pkts);
89517+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89518 else
89519 pkts = sysctl_sync_threshold(ipvs);
89520 ip_vs_sync_conn(net, cp->control, pkts);
89521@@ -758,7 +758,7 @@ control:
89522 if (!cp)
89523 return;
89524 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89525- pkts = atomic_add_return(1, &cp->in_pkts);
89526+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89527 else
89528 pkts = sysctl_sync_threshold(ipvs);
89529 goto sloop;
89530@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
89531
89532 if (opt)
89533 memcpy(&cp->in_seq, opt, sizeof(*opt));
89534- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89535+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89536 cp->state = state;
89537 cp->old_state = cp->state;
89538 /*
89539diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
89540index b75ff64..0c51bbe 100644
89541--- a/net/netfilter/ipvs/ip_vs_xmit.c
89542+++ b/net/netfilter/ipvs/ip_vs_xmit.c
89543@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
89544 else
89545 rc = NF_ACCEPT;
89546 /* do not touch skb anymore */
89547- atomic_inc(&cp->in_pkts);
89548+ atomic_inc_unchecked(&cp->in_pkts);
89549 goto out;
89550 }
89551
89552@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
89553 else
89554 rc = NF_ACCEPT;
89555 /* do not touch skb anymore */
89556- atomic_inc(&cp->in_pkts);
89557+ atomic_inc_unchecked(&cp->in_pkts);
89558 goto out;
89559 }
89560
89561diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
89562index 2d3030a..7ba1c0a 100644
89563--- a/net/netfilter/nf_conntrack_acct.c
89564+++ b/net/netfilter/nf_conntrack_acct.c
89565@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
89566 #ifdef CONFIG_SYSCTL
89567 static int nf_conntrack_acct_init_sysctl(struct net *net)
89568 {
89569- struct ctl_table *table;
89570+ ctl_table_no_const *table;
89571
89572 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
89573 GFP_KERNEL);
89574diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
89575index 0283bae..5febcb0 100644
89576--- a/net/netfilter/nf_conntrack_core.c
89577+++ b/net/netfilter/nf_conntrack_core.c
89578@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
89579 #define DYING_NULLS_VAL ((1<<30)+1)
89580 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
89581
89582+#ifdef CONFIG_GRKERNSEC_HIDESYM
89583+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
89584+#endif
89585+
89586 int nf_conntrack_init_net(struct net *net)
89587 {
89588 int ret;
89589@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
89590 goto err_stat;
89591 }
89592
89593+#ifdef CONFIG_GRKERNSEC_HIDESYM
89594+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
89595+#else
89596 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
89597+#endif
89598 if (!net->ct.slabname) {
89599 ret = -ENOMEM;
89600 goto err_slabname;
89601diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
89602index 1df1761..ce8b88a 100644
89603--- a/net/netfilter/nf_conntrack_ecache.c
89604+++ b/net/netfilter/nf_conntrack_ecache.c
89605@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
89606 #ifdef CONFIG_SYSCTL
89607 static int nf_conntrack_event_init_sysctl(struct net *net)
89608 {
89609- struct ctl_table *table;
89610+ ctl_table_no_const *table;
89611
89612 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
89613 GFP_KERNEL);
89614diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
89615index 974a2a4..52cc6ff 100644
89616--- a/net/netfilter/nf_conntrack_helper.c
89617+++ b/net/netfilter/nf_conntrack_helper.c
89618@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
89619
89620 static int nf_conntrack_helper_init_sysctl(struct net *net)
89621 {
89622- struct ctl_table *table;
89623+ ctl_table_no_const *table;
89624
89625 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
89626 GFP_KERNEL);
89627diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
89628index 0ab9636..cea3c6a 100644
89629--- a/net/netfilter/nf_conntrack_proto.c
89630+++ b/net/netfilter/nf_conntrack_proto.c
89631@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
89632
89633 static void
89634 nf_ct_unregister_sysctl(struct ctl_table_header **header,
89635- struct ctl_table **table,
89636+ ctl_table_no_const **table,
89637 unsigned int users)
89638 {
89639 if (users > 0)
89640diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
89641index a99b6c3..3841268 100644
89642--- a/net/netfilter/nf_conntrack_proto_dccp.c
89643+++ b/net/netfilter/nf_conntrack_proto_dccp.c
89644@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
89645 out_invalid:
89646 if (LOG_INVALID(net, IPPROTO_DCCP))
89647 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
89648- NULL, msg);
89649+ NULL, "%s", msg);
89650 return false;
89651 }
89652
89653@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
89654
89655 out_invalid:
89656 if (LOG_INVALID(net, IPPROTO_DCCP))
89657- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
89658+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
89659 return -NF_ACCEPT;
89660 }
89661
89662diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
89663index bd700b4..4a3dc61 100644
89664--- a/net/netfilter/nf_conntrack_standalone.c
89665+++ b/net/netfilter/nf_conntrack_standalone.c
89666@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
89667
89668 static int nf_conntrack_standalone_init_sysctl(struct net *net)
89669 {
89670- struct ctl_table *table;
89671+ ctl_table_no_const *table;
89672
89673 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
89674 GFP_KERNEL);
89675diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
89676index 902fb0a..87f7fdb 100644
89677--- a/net/netfilter/nf_conntrack_timestamp.c
89678+++ b/net/netfilter/nf_conntrack_timestamp.c
89679@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
89680 #ifdef CONFIG_SYSCTL
89681 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
89682 {
89683- struct ctl_table *table;
89684+ ctl_table_no_const *table;
89685
89686 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
89687 GFP_KERNEL);
89688diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
89689index 3b18dd1..f79e0ca 100644
89690--- a/net/netfilter/nf_log.c
89691+++ b/net/netfilter/nf_log.c
89692@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
89693
89694 #ifdef CONFIG_SYSCTL
89695 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
89696-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
89697+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
89698
89699 static int nf_log_proc_dostring(ctl_table *table, int write,
89700 void __user *buffer, size_t *lenp, loff_t *ppos)
89701@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
89702 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
89703 mutex_unlock(&nf_log_mutex);
89704 } else {
89705+ ctl_table_no_const nf_log_table = *table;
89706+
89707 mutex_lock(&nf_log_mutex);
89708 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
89709 lockdep_is_held(&nf_log_mutex));
89710 if (!logger)
89711- table->data = "NONE";
89712+ nf_log_table.data = "NONE";
89713 else
89714- table->data = logger->name;
89715- r = proc_dostring(table, write, buffer, lenp, ppos);
89716+ nf_log_table.data = logger->name;
89717+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
89718 mutex_unlock(&nf_log_mutex);
89719 }
89720
89721diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
89722index f042ae5..30ea486 100644
89723--- a/net/netfilter/nf_sockopt.c
89724+++ b/net/netfilter/nf_sockopt.c
89725@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
89726 }
89727 }
89728
89729- list_add(&reg->list, &nf_sockopts);
89730+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
89731 out:
89732 mutex_unlock(&nf_sockopt_mutex);
89733 return ret;
89734@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
89735 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
89736 {
89737 mutex_lock(&nf_sockopt_mutex);
89738- list_del(&reg->list);
89739+ pax_list_del((struct list_head *)&reg->list);
89740 mutex_unlock(&nf_sockopt_mutex);
89741 }
89742 EXPORT_SYMBOL(nf_unregister_sockopt);
89743diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
89744index 962e979..d4ae2e9 100644
89745--- a/net/netfilter/nfnetlink_log.c
89746+++ b/net/netfilter/nfnetlink_log.c
89747@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
89748 struct nfnl_log_net {
89749 spinlock_t instances_lock;
89750 struct hlist_head instance_table[INSTANCE_BUCKETS];
89751- atomic_t global_seq;
89752+ atomic_unchecked_t global_seq;
89753 };
89754
89755 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
89756@@ -559,7 +559,7 @@ __build_packet_message(struct nfnl_log_net *log,
89757 /* global sequence number */
89758 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
89759 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
89760- htonl(atomic_inc_return(&log->global_seq))))
89761+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
89762 goto nla_put_failure;
89763
89764 if (data_len) {
89765diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
89766new file mode 100644
89767index 0000000..c566332
89768--- /dev/null
89769+++ b/net/netfilter/xt_gradm.c
89770@@ -0,0 +1,51 @@
89771+/*
89772+ * gradm match for netfilter
89773