]> git.ipfire.org Git - thirdparty/grsecurity-scrape.git/blame - test/grsecurity-2.9.1-3.10.3-201307261236.patch
Auto commit, 1 new patch{es}.
[thirdparty/grsecurity-scrape.git] / test / grsecurity-2.9.1-3.10.3-201307261236.patch
CommitLineData
cae98038
PK
1diff --git a/Documentation/dontdiff b/Documentation/dontdiff
2index b89a739..79768fb 100644
3--- a/Documentation/dontdiff
4+++ b/Documentation/dontdiff
5@@ -2,9 +2,11 @@
6 *.aux
7 *.bin
8 *.bz2
9+*.c.[012]*.*
10 *.cis
11 *.cpio
12 *.csp
13+*.dbg
14 *.dsp
15 *.dvi
16 *.elf
17@@ -14,6 +16,7 @@
18 *.gcov
19 *.gen.S
20 *.gif
21+*.gmo
22 *.grep
23 *.grp
24 *.gz
25@@ -48,14 +51,17 @@
26 *.tab.h
27 *.tex
28 *.ver
29+*.vim
30 *.xml
31 *.xz
32 *_MODULES
33+*_reg_safe.h
34 *_vga16.c
35 *~
36 \#*#
37 *.9
38-.*
39+.[^g]*
40+.gen*
41 .*.d
42 .mm
43 53c700_d.h
44@@ -69,9 +75,11 @@ Image
45 Module.markers
46 Module.symvers
47 PENDING
48+PERF*
49 SCCS
50 System.map*
51 TAGS
52+TRACEEVENT-CFLAGS
53 aconf
54 af_names.h
55 aic7*reg.h*
56@@ -80,6 +88,7 @@ aic7*seq.h*
57 aicasm
58 aicdb.h*
59 altivec*.c
60+ashldi3.S
61 asm-offsets.h
62 asm_offsets.h
63 autoconf.h*
64@@ -92,19 +101,24 @@ bounds.h
65 bsetup
66 btfixupprep
67 build
68+builtin-policy.h
69 bvmlinux
70 bzImage*
71 capability_names.h
72 capflags.c
73 classlist.h*
74+clut_vga16.c
75+common-cmds.h
76 comp*.log
77 compile.h*
78 conf
79 config
80 config-*
81 config_data.h*
82+config.c
83 config.mak
84 config.mak.autogen
85+config.tmp
86 conmakehash
87 consolemap_deftbl.c*
88 cpustr.h
89@@ -115,9 +129,11 @@ devlist.h*
90 dnotify_test
91 docproc
92 dslm
93+dtc-lexer.lex.c
94 elf2ecoff
95 elfconfig.h*
96 evergreen_reg_safe.h
97+exception_policy.conf
98 fixdep
99 flask.h
100 fore200e_mkfirm
101@@ -125,12 +141,15 @@ fore200e_pca_fw.c*
102 gconf
103 gconf.glade.h
104 gen-devlist
105+gen-kdb_cmds.c
106 gen_crc32table
107 gen_init_cpio
108 generated
109 genheaders
110 genksyms
111 *_gray256.c
112+hash
113+hid-example
114 hpet_example
115 hugepage-mmap
116 hugepage-shm
117@@ -145,14 +164,14 @@ int32.c
118 int4.c
119 int8.c
120 kallsyms
121-kconfig
122+kern_constants.h
123 keywords.c
124 ksym.c*
125 ksym.h*
126 kxgettext
127 lex.c
128 lex.*.c
129-linux
130+lib1funcs.S
131 logo_*.c
132 logo_*_clut224.c
133 logo_*_mono.c
134@@ -162,14 +181,15 @@ mach-types.h
135 machtypes.h
136 map
137 map_hugetlb
138-media
139 mconf
140+mdp
141 miboot*
142 mk_elfconfig
143 mkboot
144 mkbugboot
145 mkcpustr
146 mkdep
147+mkpiggy
148 mkprep
149 mkregtable
150 mktables
151@@ -185,6 +205,8 @@ oui.c*
152 page-types
153 parse.c
154 parse.h
155+parse-events*
156+pasyms.h
157 patches*
158 pca200e.bin
159 pca200e_ecd.bin2
160@@ -194,6 +216,7 @@ perf-archive
161 piggyback
162 piggy.gzip
163 piggy.S
164+pmu-*
165 pnmtologo
166 ppc_defs.h*
167 pss_boot.h
168@@ -203,7 +226,10 @@ r200_reg_safe.h
169 r300_reg_safe.h
170 r420_reg_safe.h
171 r600_reg_safe.h
172+realmode.lds
173+realmode.relocs
174 recordmcount
175+regdb.c
176 relocs
177 rlim_names.h
178 rn50_reg_safe.h
179@@ -213,8 +239,12 @@ series
180 setup
181 setup.bin
182 setup.elf
183+signing_key*
184+size_overflow_hash.h
185 sImage
186+slabinfo
187 sm_tbl*
188+sortextable
189 split-include
190 syscalltab.h
191 tables.c
192@@ -224,6 +254,7 @@ tftpboot.img
193 timeconst.h
194 times.h*
195 trix_boot.h
196+user_constants.h
197 utsrelease.h*
198 vdso-syms.lds
199 vdso.lds
200@@ -235,13 +266,17 @@ vdso32.lds
201 vdso32.so.dbg
202 vdso64.lds
203 vdso64.so.dbg
204+vdsox32.lds
205+vdsox32-syms.lds
206 version.h*
207 vmImage
208 vmlinux
209 vmlinux-*
210 vmlinux.aout
211 vmlinux.bin.all
212+vmlinux.bin.bz2
213 vmlinux.lds
214+vmlinux.relocs
215 vmlinuz
216 voffset.h
217 vsyscall.lds
218@@ -249,9 +284,12 @@ vsyscall_32.lds
219 wanxlfw.inc
220 uImage
221 unifdef
222+utsrelease.h
223 wakeup.bin
224 wakeup.elf
225 wakeup.lds
226+x509*
227 zImage*
228 zconf.hash.c
229+zconf.lex.c
230 zoffset.h
231diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
232index 2fe6e76..df58221 100644
233--- a/Documentation/kernel-parameters.txt
234+++ b/Documentation/kernel-parameters.txt
235@@ -976,6 +976,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
236 Format: <unsigned int> such that (rxsize & ~0x1fffc0) == 0.
237 Default: 1024
238
239+ grsec_proc_gid= [GRKERNSEC_PROC_USERGROUP] Chooses GID to
240+ ignore grsecurity's /proc restrictions
241+
242+
243 hashdist= [KNL,NUMA] Large hashes allocated during boot
244 are distributed across NUMA nodes. Defaults on
245 for 64-bit NUMA, off otherwise.
246@@ -2195,6 +2199,22 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
247 the specified number of seconds. This is to be used if
248 your oopses keep scrolling off the screen.
249
250+ pax_nouderef [X86] disables UDEREF. Most likely needed under certain
251+ virtualization environments that don't cope well with the
252+ expand down segment used by UDEREF on X86-32 or the frequent
253+ page table updates on X86-64.
254+
255+ pax_sanitize_slab=
256+ 0/1 to disable/enable slab object sanitization (enabled by
257+ default).
258+
259+ pax_softmode= 0/1 to disable/enable PaX softmode on boot already.
260+
261+ pax_extra_latent_entropy
262+ Enable a very simple form of latent entropy extraction
263+ from the first 4GB of memory as the bootmem allocator
264+ passes the memory pages to the buddy allocator.
265+
266 pcbit= [HW,ISDN]
267
268 pcd. [PARIDE]
269diff --git a/Makefile b/Makefile
270index b548552..6e18246 100644
271--- a/Makefile
272+++ b/Makefile
273@@ -241,8 +241,9 @@ CONFIG_SHELL := $(shell if [ -x "$$BASH" ]; then echo $$BASH; \
274
275 HOSTCC = gcc
276 HOSTCXX = g++
277-HOSTCFLAGS = -Wall -Wmissing-prototypes -Wstrict-prototypes -O2 -fomit-frame-pointer
278-HOSTCXXFLAGS = -O2
279+HOSTCFLAGS = -Wall -W -Wmissing-prototypes -Wstrict-prototypes -Wno-unused-parameter -Wno-missing-field-initializers -O2 -fomit-frame-pointer -fno-delete-null-pointer-checks
280+HOSTCFLAGS += $(call cc-option, -Wno-empty-body)
281+HOSTCXXFLAGS = -O2 -Wall -W -fno-delete-null-pointer-checks
282
283 # Decide whether to build built-in, modular, or both.
284 # Normally, just do built-in.
285@@ -414,8 +415,8 @@ export RCS_TAR_IGNORE := --exclude SCCS --exclude BitKeeper --exclude .svn \
286 # Rules shared between *config targets and build targets
287
288 # Basic helpers built in scripts/
289-PHONY += scripts_basic
290-scripts_basic:
291+PHONY += scripts_basic gcc-plugins
292+scripts_basic: gcc-plugins
293 $(Q)$(MAKE) $(build)=scripts/basic
294 $(Q)rm -f .tmp_quiet_recordmcount
295
296@@ -576,6 +577,65 @@ else
297 KBUILD_CFLAGS += -O2
298 endif
299
300+ifndef DISABLE_PAX_PLUGINS
301+ifeq ($(call cc-ifversion, -ge, 0408, y), y)
302+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCXX)" "$(HOSTCXX)" "$(CC)")
303+else
304+PLUGINCC := $(shell $(CONFIG_SHELL) $(srctree)/scripts/gcc-plugin.sh "$(HOSTCC)" "$(HOSTCXX)" "$(CC)")
305+endif
306+ifneq ($(PLUGINCC),)
307+ifdef CONFIG_PAX_CONSTIFY_PLUGIN
308+CONSTIFY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/constify_plugin.so -DCONSTIFY_PLUGIN
309+endif
310+ifdef CONFIG_PAX_MEMORY_STACKLEAK
311+STACKLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/stackleak_plugin.so -DSTACKLEAK_PLUGIN
312+STACKLEAK_PLUGIN_CFLAGS += -fplugin-arg-stackleak_plugin-track-lowest-sp=100
313+endif
314+ifdef CONFIG_KALLOCSTAT_PLUGIN
315+KALLOCSTAT_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kallocstat_plugin.so
316+endif
317+ifdef CONFIG_PAX_KERNEXEC_PLUGIN
318+KERNEXEC_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/kernexec_plugin.so
319+KERNEXEC_PLUGIN_CFLAGS += -fplugin-arg-kernexec_plugin-method=$(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD) -DKERNEXEC_PLUGIN
320+KERNEXEC_PLUGIN_AFLAGS := -DKERNEXEC_PLUGIN
321+endif
322+ifdef CONFIG_CHECKER_PLUGIN
323+ifeq ($(call cc-ifversion, -ge, 0406, y), y)
324+CHECKER_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/checker_plugin.so -DCHECKER_PLUGIN
325+endif
326+endif
327+COLORIZE_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/colorize_plugin.so
328+ifdef CONFIG_PAX_SIZE_OVERFLOW
329+SIZE_OVERFLOW_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/size_overflow_plugin.so -DSIZE_OVERFLOW_PLUGIN
330+endif
331+ifdef CONFIG_PAX_LATENT_ENTROPY
332+LATENT_ENTROPY_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/latent_entropy_plugin.so -DLATENT_ENTROPY_PLUGIN
333+endif
334+ifdef CONFIG_PAX_MEMORY_STRUCTLEAK
335+STRUCTLEAK_PLUGIN_CFLAGS := -fplugin=$(objtree)/tools/gcc/structleak_plugin.so -DSTRUCTLEAK_PLUGIN
336+endif
337+GCC_PLUGINS_CFLAGS := $(CONSTIFY_PLUGIN_CFLAGS) $(STACKLEAK_PLUGIN_CFLAGS) $(KALLOCSTAT_PLUGIN_CFLAGS)
338+GCC_PLUGINS_CFLAGS += $(KERNEXEC_PLUGIN_CFLAGS) $(CHECKER_PLUGIN_CFLAGS) $(COLORIZE_PLUGIN_CFLAGS)
339+GCC_PLUGINS_CFLAGS += $(SIZE_OVERFLOW_PLUGIN_CFLAGS) $(LATENT_ENTROPY_PLUGIN_CFLAGS) $(STRUCTLEAK_PLUGIN_CFLAGS)
340+GCC_PLUGINS_AFLAGS := $(KERNEXEC_PLUGIN_AFLAGS)
341+export PLUGINCC GCC_PLUGINS_CFLAGS GCC_PLUGINS_AFLAGS CONSTIFY_PLUGIN
342+ifeq ($(KBUILD_EXTMOD),)
343+gcc-plugins:
344+ $(Q)$(MAKE) $(build)=tools/gcc
345+else
346+gcc-plugins: ;
347+endif
348+else
349+gcc-plugins:
350+ifeq ($(call cc-ifversion, -ge, 0405, y), y)
351+ $(error Your gcc installation does not support plugins. If the necessary headers for plugin support are missing, they should be installed. On Debian, apt-get install gcc-<ver>-plugin-dev. If you choose to ignore this error and lessen the improvements provided by this patch, re-run make with the DISABLE_PAX_PLUGINS=y argument.))
352+else
353+ $(Q)echo "warning, your gcc version does not support plugins, you should upgrade it to gcc 4.5 at least"
354+endif
355+ $(Q)echo "PAX_MEMORY_STACKLEAK, constification, PAX_LATENT_ENTROPY and other features will be less secure. PAX_SIZE_OVERFLOW will not be active."
356+endif
357+endif
358+
359 include $(srctree)/arch/$(SRCARCH)/Makefile
360
361 ifdef CONFIG_READABLE_ASM
362@@ -733,7 +793,7 @@ export mod_sign_cmd
363
364
365 ifeq ($(KBUILD_EXTMOD),)
366-core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/
367+core-y += kernel/ mm/ fs/ ipc/ security/ crypto/ block/ grsecurity/
368
369 vmlinux-dirs := $(patsubst %/,%,$(filter %/, $(init-y) $(init-m) \
370 $(core-y) $(core-m) $(drivers-y) $(drivers-m) \
371@@ -782,6 +842,8 @@ endif
372
373 # The actual objects are generated when descending,
374 # make sure no implicit rule kicks in
375+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
376+$(filter-out $(init-y),$(vmlinux-deps)): KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
377 $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
378
379 # Handle descending into subdirectories listed in $(vmlinux-dirs)
380@@ -791,7 +853,7 @@ $(sort $(vmlinux-deps)): $(vmlinux-dirs) ;
381 # Error messages still appears in the original language
382
383 PHONY += $(vmlinux-dirs)
384-$(vmlinux-dirs): prepare scripts
385+$(vmlinux-dirs): gcc-plugins prepare scripts
386 $(Q)$(MAKE) $(build)=$@
387
388 # Store (new) KERNELRELASE string in include/config/kernel.release
389@@ -835,6 +897,7 @@ prepare0: archprepare FORCE
390 $(Q)$(MAKE) $(build)=.
391
392 # All the preparing..
393+prepare: KBUILD_CFLAGS := $(filter-out $(GCC_PLUGINS_CFLAGS),$(KBUILD_CFLAGS))
394 prepare: prepare0
395
396 # Generate some files
397@@ -942,6 +1005,8 @@ all: modules
398 # using awk while concatenating to the final file.
399
400 PHONY += modules
401+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
402+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
403 modules: $(vmlinux-dirs) $(if $(KBUILD_BUILTIN),vmlinux) modules.builtin
404 $(Q)$(AWK) '!x[$$0]++' $(vmlinux-dirs:%=$(objtree)/%/modules.order) > $(objtree)/modules.order
405 @$(kecho) ' Building modules, stage 2.';
406@@ -957,7 +1022,7 @@ modules.builtin: $(vmlinux-dirs:%=%/modules.builtin)
407
408 # Target to prepare building external modules
409 PHONY += modules_prepare
410-modules_prepare: prepare scripts
411+modules_prepare: gcc-plugins prepare scripts
412
413 # Target to install modules
414 PHONY += modules_install
415@@ -1023,7 +1088,7 @@ MRPROPER_FILES += .config .config.old .version .old_version $(version_h) \
416 Module.symvers tags TAGS cscope* GPATH GTAGS GRTAGS GSYMS \
417 signing_key.priv signing_key.x509 x509.genkey \
418 extra_certificates signing_key.x509.keyid \
419- signing_key.x509.signer
420+ signing_key.x509.signer tools/gcc/size_overflow_hash.h
421
422 # clean - Delete most, but leave enough to build external modules
423 #
424@@ -1063,6 +1128,7 @@ distclean: mrproper
425 \( -name '*.orig' -o -name '*.rej' -o -name '*~' \
426 -o -name '*.bak' -o -name '#*#' -o -name '.*.orig' \
427 -o -name '.*.rej' \
428+ -o -name '.*.rej' -o -name '*.so' \
429 -o -name '*%' -o -name '.*.cmd' -o -name 'core' \) \
430 -type f -print | xargs rm -f
431
432@@ -1223,6 +1289,8 @@ PHONY += $(module-dirs) modules
433 $(module-dirs): crmodverdir $(objtree)/Module.symvers
434 $(Q)$(MAKE) $(build)=$(patsubst _module_%,%,$@)
435
436+modules: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
437+modules: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
438 modules: $(module-dirs)
439 @$(kecho) ' Building modules, stage 2.';
440 $(Q)$(MAKE) -f $(srctree)/scripts/Makefile.modpost
441@@ -1359,17 +1427,21 @@ else
442 target-dir = $(if $(KBUILD_EXTMOD),$(dir $<),$(dir $@))
443 endif
444
445-%.s: %.c prepare scripts FORCE
446+%.s: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
447+%.s: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
448+%.s: %.c gcc-plugins prepare scripts FORCE
449 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
450 %.i: %.c prepare scripts FORCE
451 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
452-%.o: %.c prepare scripts FORCE
453+%.o: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
454+%.o: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
455+%.o: %.c gcc-plugins prepare scripts FORCE
456 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
457 %.lst: %.c prepare scripts FORCE
458 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
459-%.s: %.S prepare scripts FORCE
460+%.s: %.S gcc-plugins prepare scripts FORCE
461 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
462-%.o: %.S prepare scripts FORCE
463+%.o: %.S gcc-plugins prepare scripts FORCE
464 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
465 %.symtypes: %.c prepare scripts FORCE
466 $(Q)$(MAKE) $(build)=$(build-dir) $(target-dir)$(notdir $@)
467@@ -1379,11 +1451,15 @@ endif
468 $(cmd_crmodverdir)
469 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
470 $(build)=$(build-dir)
471-%/: prepare scripts FORCE
472+%/: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
473+%/: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
474+%/: gcc-plugins prepare scripts FORCE
475 $(cmd_crmodverdir)
476 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
477 $(build)=$(build-dir)
478-%.ko: prepare scripts FORCE
479+%.ko: KBUILD_CFLAGS += $(GCC_PLUGINS_CFLAGS)
480+%.ko: KBUILD_AFLAGS += $(GCC_PLUGINS_AFLAGS)
481+%.ko: gcc-plugins prepare scripts FORCE
482 $(cmd_crmodverdir)
483 $(Q)$(MAKE) KBUILD_MODULES=$(if $(CONFIG_MODULES),1) \
484 $(build)=$(build-dir) $(@:.ko=.o)
485diff --git a/arch/alpha/include/asm/atomic.h b/arch/alpha/include/asm/atomic.h
486index c2cbe4f..f7264b4 100644
487--- a/arch/alpha/include/asm/atomic.h
488+++ b/arch/alpha/include/asm/atomic.h
489@@ -250,6 +250,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
490 #define atomic_dec(v) atomic_sub(1,(v))
491 #define atomic64_dec(v) atomic64_sub(1,(v))
492
493+#define atomic64_read_unchecked(v) atomic64_read(v)
494+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
495+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
496+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
497+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
498+#define atomic64_inc_unchecked(v) atomic64_inc(v)
499+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
500+#define atomic64_dec_unchecked(v) atomic64_dec(v)
501+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
502+
503 #define smp_mb__before_atomic_dec() smp_mb()
504 #define smp_mb__after_atomic_dec() smp_mb()
505 #define smp_mb__before_atomic_inc() smp_mb()
506diff --git a/arch/alpha/include/asm/cache.h b/arch/alpha/include/asm/cache.h
507index ad368a9..fbe0f25 100644
508--- a/arch/alpha/include/asm/cache.h
509+++ b/arch/alpha/include/asm/cache.h
510@@ -4,19 +4,19 @@
511 #ifndef __ARCH_ALPHA_CACHE_H
512 #define __ARCH_ALPHA_CACHE_H
513
514+#include <linux/const.h>
515
516 /* Bytes per L1 (data) cache line. */
517 #if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_EV6)
518-# define L1_CACHE_BYTES 64
519 # define L1_CACHE_SHIFT 6
520 #else
521 /* Both EV4 and EV5 are write-through, read-allocate,
522 direct-mapped, physical.
523 */
524-# define L1_CACHE_BYTES 32
525 # define L1_CACHE_SHIFT 5
526 #endif
527
528+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
529 #define SMP_CACHE_BYTES L1_CACHE_BYTES
530
531 #endif
532diff --git a/arch/alpha/include/asm/elf.h b/arch/alpha/include/asm/elf.h
533index 968d999..d36b2df 100644
534--- a/arch/alpha/include/asm/elf.h
535+++ b/arch/alpha/include/asm/elf.h
536@@ -91,6 +91,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
537
538 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x1000000)
539
540+#ifdef CONFIG_PAX_ASLR
541+#define PAX_ELF_ET_DYN_BASE (current->personality & ADDR_LIMIT_32BIT ? 0x10000 : 0x120000000UL)
542+
543+#define PAX_DELTA_MMAP_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 28)
544+#define PAX_DELTA_STACK_LEN (current->personality & ADDR_LIMIT_32BIT ? 14 : 19)
545+#endif
546+
547 /* $0 is set by ld.so to a pointer to a function which might be
548 registered using atexit. This provides a mean for the dynamic
549 linker to call DT_FINI functions for shared libraries that have
550diff --git a/arch/alpha/include/asm/pgalloc.h b/arch/alpha/include/asm/pgalloc.h
551index bc2a0da..8ad11ee 100644
552--- a/arch/alpha/include/asm/pgalloc.h
553+++ b/arch/alpha/include/asm/pgalloc.h
554@@ -29,6 +29,12 @@ pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
555 pgd_set(pgd, pmd);
556 }
557
558+static inline void
559+pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
560+{
561+ pgd_populate(mm, pgd, pmd);
562+}
563+
564 extern pgd_t *pgd_alloc(struct mm_struct *mm);
565
566 static inline void
567diff --git a/arch/alpha/include/asm/pgtable.h b/arch/alpha/include/asm/pgtable.h
568index 81a4342..348b927 100644
569--- a/arch/alpha/include/asm/pgtable.h
570+++ b/arch/alpha/include/asm/pgtable.h
571@@ -102,6 +102,17 @@ struct vm_area_struct;
572 #define PAGE_SHARED __pgprot(_PAGE_VALID | __ACCESS_BITS)
573 #define PAGE_COPY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
574 #define PAGE_READONLY __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW)
575+
576+#ifdef CONFIG_PAX_PAGEEXEC
577+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOE)
578+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
579+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_VALID | __ACCESS_BITS | _PAGE_FOW | _PAGE_FOE)
580+#else
581+# define PAGE_SHARED_NOEXEC PAGE_SHARED
582+# define PAGE_COPY_NOEXEC PAGE_COPY
583+# define PAGE_READONLY_NOEXEC PAGE_READONLY
584+#endif
585+
586 #define PAGE_KERNEL __pgprot(_PAGE_VALID | _PAGE_ASM | _PAGE_KRE | _PAGE_KWE)
587
588 #define _PAGE_NORMAL(x) __pgprot(_PAGE_VALID | __ACCESS_BITS | (x))
589diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
590index 2fd00b7..cfd5069 100644
591--- a/arch/alpha/kernel/module.c
592+++ b/arch/alpha/kernel/module.c
593@@ -160,7 +160,7 @@ apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
594
595 /* The small sections were sorted to the end of the segment.
596 The following should definitely cover them. */
597- gp = (u64)me->module_core + me->core_size - 0x8000;
598+ gp = (u64)me->module_core_rw + me->core_size_rw - 0x8000;
599 got = sechdrs[me->arch.gotsecindex].sh_addr;
600
601 for (i = 0; i < n; i++) {
602diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
603index b9e37ad..44c24e7 100644
604--- a/arch/alpha/kernel/osf_sys.c
605+++ b/arch/alpha/kernel/osf_sys.c
606@@ -1297,10 +1297,11 @@ SYSCALL_DEFINE1(old_adjtimex, struct timex32 __user *, txc_p)
607 generic version except that we know how to honor ADDR_LIMIT_32BIT. */
608
609 static unsigned long
610-arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
611- unsigned long limit)
612+arch_get_unmapped_area_1(struct file *filp, unsigned long addr, unsigned long len,
613+ unsigned long limit, unsigned long flags)
614 {
615 struct vm_unmapped_area_info info;
616+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
617
618 info.flags = 0;
619 info.length = len;
620@@ -1308,6 +1309,7 @@ arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
621 info.high_limit = limit;
622 info.align_mask = 0;
623 info.align_offset = 0;
624+ info.threadstack_offset = offset;
625 return vm_unmapped_area(&info);
626 }
627
628@@ -1340,20 +1342,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
629 merely specific addresses, but regions of memory -- perhaps
630 this feature should be incorporated into all ports? */
631
632+#ifdef CONFIG_PAX_RANDMMAP
633+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
634+#endif
635+
636 if (addr) {
637- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
638+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(addr), len, limit, flags);
639 if (addr != (unsigned long) -ENOMEM)
640 return addr;
641 }
642
643 /* Next, try allocating at TASK_UNMAPPED_BASE. */
644- addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
645- len, limit);
646+ addr = arch_get_unmapped_area_1 (filp, PAGE_ALIGN(current->mm->mmap_base), len, limit, flags);
647+
648 if (addr != (unsigned long) -ENOMEM)
649 return addr;
650
651 /* Finally, try allocating in low memory. */
652- addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
653+ addr = arch_get_unmapped_area_1 (filp, PAGE_SIZE, len, limit, flags);
654
655 return addr;
656 }
657diff --git a/arch/alpha/mm/fault.c b/arch/alpha/mm/fault.c
658index 0c4132d..88f0d53 100644
659--- a/arch/alpha/mm/fault.c
660+++ b/arch/alpha/mm/fault.c
661@@ -53,6 +53,124 @@ __load_new_mm_context(struct mm_struct *next_mm)
662 __reload_thread(pcb);
663 }
664
665+#ifdef CONFIG_PAX_PAGEEXEC
666+/*
667+ * PaX: decide what to do with offenders (regs->pc = fault address)
668+ *
669+ * returns 1 when task should be killed
670+ * 2 when patched PLT trampoline was detected
671+ * 3 when unpatched PLT trampoline was detected
672+ */
673+static int pax_handle_fetch_fault(struct pt_regs *regs)
674+{
675+
676+#ifdef CONFIG_PAX_EMUPLT
677+ int err;
678+
679+ do { /* PaX: patched PLT emulation #1 */
680+ unsigned int ldah, ldq, jmp;
681+
682+ err = get_user(ldah, (unsigned int *)regs->pc);
683+ err |= get_user(ldq, (unsigned int *)(regs->pc+4));
684+ err |= get_user(jmp, (unsigned int *)(regs->pc+8));
685+
686+ if (err)
687+ break;
688+
689+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
690+ (ldq & 0xFFFF0000U) == 0xA77B0000U &&
691+ jmp == 0x6BFB0000U)
692+ {
693+ unsigned long r27, addr;
694+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
695+ unsigned long addrl = ldq | 0xFFFFFFFFFFFF0000UL;
696+
697+ addr = regs->r27 + ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
698+ err = get_user(r27, (unsigned long *)addr);
699+ if (err)
700+ break;
701+
702+ regs->r27 = r27;
703+ regs->pc = r27;
704+ return 2;
705+ }
706+ } while (0);
707+
708+ do { /* PaX: patched PLT emulation #2 */
709+ unsigned int ldah, lda, br;
710+
711+ err = get_user(ldah, (unsigned int *)regs->pc);
712+ err |= get_user(lda, (unsigned int *)(regs->pc+4));
713+ err |= get_user(br, (unsigned int *)(regs->pc+8));
714+
715+ if (err)
716+ break;
717+
718+ if ((ldah & 0xFFFF0000U) == 0x277B0000U &&
719+ (lda & 0xFFFF0000U) == 0xA77B0000U &&
720+ (br & 0xFFE00000U) == 0xC3E00000U)
721+ {
722+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL;
723+ unsigned long addrh = (ldah | 0xFFFFFFFFFFFF0000UL) << 16;
724+ unsigned long addrl = lda | 0xFFFFFFFFFFFF0000UL;
725+
726+ regs->r27 += ((addrh ^ 0x80000000UL) + 0x80000000UL) + ((addrl ^ 0x8000UL) + 0x8000UL);
727+ regs->pc += 12 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
728+ return 2;
729+ }
730+ } while (0);
731+
732+ do { /* PaX: unpatched PLT emulation */
733+ unsigned int br;
734+
735+ err = get_user(br, (unsigned int *)regs->pc);
736+
737+ if (!err && (br & 0xFFE00000U) == 0xC3800000U) {
738+ unsigned int br2, ldq, nop, jmp;
739+ unsigned long addr = br | 0xFFFFFFFFFFE00000UL, resolver;
740+
741+ addr = regs->pc + 4 + (((addr ^ 0x00100000UL) + 0x00100000UL) << 2);
742+ err = get_user(br2, (unsigned int *)addr);
743+ err |= get_user(ldq, (unsigned int *)(addr+4));
744+ err |= get_user(nop, (unsigned int *)(addr+8));
745+ err |= get_user(jmp, (unsigned int *)(addr+12));
746+ err |= get_user(resolver, (unsigned long *)(addr+16));
747+
748+ if (err)
749+ break;
750+
751+ if (br2 == 0xC3600000U &&
752+ ldq == 0xA77B000CU &&
753+ nop == 0x47FF041FU &&
754+ jmp == 0x6B7B0000U)
755+ {
756+ regs->r28 = regs->pc+4;
757+ regs->r27 = addr+16;
758+ regs->pc = resolver;
759+ return 3;
760+ }
761+ }
762+ } while (0);
763+#endif
764+
765+ return 1;
766+}
767+
768+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
769+{
770+ unsigned long i;
771+
772+ printk(KERN_ERR "PAX: bytes at PC: ");
773+ for (i = 0; i < 5; i++) {
774+ unsigned int c;
775+ if (get_user(c, (unsigned int *)pc+i))
776+ printk(KERN_CONT "???????? ");
777+ else
778+ printk(KERN_CONT "%08x ", c);
779+ }
780+ printk("\n");
781+}
782+#endif
783
784 /*
785 * This routine handles page faults. It determines the address,
786@@ -133,8 +251,29 @@ retry:
787 good_area:
788 si_code = SEGV_ACCERR;
789 if (cause < 0) {
790- if (!(vma->vm_flags & VM_EXEC))
791+ if (!(vma->vm_flags & VM_EXEC)) {
792+
793+#ifdef CONFIG_PAX_PAGEEXEC
794+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->pc)
795+ goto bad_area;
796+
797+ up_read(&mm->mmap_sem);
798+ switch (pax_handle_fetch_fault(regs)) {
799+
800+#ifdef CONFIG_PAX_EMUPLT
801+ case 2:
802+ case 3:
803+ return;
804+#endif
805+
806+ }
807+ pax_report_fault(regs, (void *)regs->pc, (void *)rdusp());
808+ do_group_exit(SIGKILL);
809+#else
810 goto bad_area;
811+#endif
812+
813+ }
814 } else if (!cause) {
815 /* Allow reads even for write-only mappings */
816 if (!(vma->vm_flags & (VM_READ | VM_WRITE)))
817diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
818index 136f263..f471277 100644
819--- a/arch/arm/Kconfig
820+++ b/arch/arm/Kconfig
821@@ -1766,7 +1766,7 @@ config ALIGNMENT_TRAP
822
823 config UACCESS_WITH_MEMCPY
824 bool "Use kernel mem{cpy,set}() for {copy_to,clear}_user()"
825- depends on MMU
826+ depends on MMU && !PAX_MEMORY_UDEREF
827 default y if CPU_FEROCEON
828 help
829 Implement faster copy_to_user and clear_user methods for CPU
830diff --git a/arch/arm/include/asm/atomic.h b/arch/arm/include/asm/atomic.h
831index da1c77d..2ee6056 100644
832--- a/arch/arm/include/asm/atomic.h
833+++ b/arch/arm/include/asm/atomic.h
834@@ -17,17 +17,35 @@
835 #include <asm/barrier.h>
836 #include <asm/cmpxchg.h>
837
838+#ifdef CONFIG_GENERIC_ATOMIC64
839+#include <asm-generic/atomic64.h>
840+#endif
841+
842 #define ATOMIC_INIT(i) { (i) }
843
844 #ifdef __KERNEL__
845
846+#define _ASM_EXTABLE(from, to) \
847+" .pushsection __ex_table,\"a\"\n"\
848+" .align 3\n" \
849+" .long " #from ", " #to"\n" \
850+" .popsection"
851+
852 /*
853 * On ARM, ordinary assignment (str instruction) doesn't clear the local
854 * strex/ldrex monitor on some implementations. The reason we can use it for
855 * atomic_set() is the clrex or dummy strex done on every exception return.
856 */
857 #define atomic_read(v) (*(volatile int *)&(v)->counter)
858+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
859+{
860+ return v->counter;
861+}
862 #define atomic_set(v,i) (((v)->counter) = (i))
863+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
864+{
865+ v->counter = i;
866+}
867
868 #if __LINUX_ARM_ARCH__ >= 6
869
870@@ -42,6 +60,35 @@ static inline void atomic_add(int i, atomic_t *v)
871 int result;
872
873 __asm__ __volatile__("@ atomic_add\n"
874+"1: ldrex %1, [%3]\n"
875+" adds %0, %1, %4\n"
876+
877+#ifdef CONFIG_PAX_REFCOUNT
878+" bvc 3f\n"
879+"2: bkpt 0xf103\n"
880+"3:\n"
881+#endif
882+
883+" strex %1, %0, [%3]\n"
884+" teq %1, #0\n"
885+" bne 1b"
886+
887+#ifdef CONFIG_PAX_REFCOUNT
888+"\n4:\n"
889+ _ASM_EXTABLE(2b, 4b)
890+#endif
891+
892+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
893+ : "r" (&v->counter), "Ir" (i)
894+ : "cc");
895+}
896+
897+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
898+{
899+ unsigned long tmp;
900+ int result;
901+
902+ __asm__ __volatile__("@ atomic_add_unchecked\n"
903 "1: ldrex %0, [%3]\n"
904 " add %0, %0, %4\n"
905 " strex %1, %0, [%3]\n"
906@@ -60,6 +107,42 @@ static inline int atomic_add_return(int i, atomic_t *v)
907 smp_mb();
908
909 __asm__ __volatile__("@ atomic_add_return\n"
910+"1: ldrex %1, [%3]\n"
911+" adds %0, %1, %4\n"
912+
913+#ifdef CONFIG_PAX_REFCOUNT
914+" bvc 3f\n"
915+" mov %0, %1\n"
916+"2: bkpt 0xf103\n"
917+"3:\n"
918+#endif
919+
920+" strex %1, %0, [%3]\n"
921+" teq %1, #0\n"
922+" bne 1b"
923+
924+#ifdef CONFIG_PAX_REFCOUNT
925+"\n4:\n"
926+ _ASM_EXTABLE(2b, 4b)
927+#endif
928+
929+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
930+ : "r" (&v->counter), "Ir" (i)
931+ : "cc");
932+
933+ smp_mb();
934+
935+ return result;
936+}
937+
938+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
939+{
940+ unsigned long tmp;
941+ int result;
942+
943+ smp_mb();
944+
945+ __asm__ __volatile__("@ atomic_add_return_unchecked\n"
946 "1: ldrex %0, [%3]\n"
947 " add %0, %0, %4\n"
948 " strex %1, %0, [%3]\n"
949@@ -80,6 +163,35 @@ static inline void atomic_sub(int i, atomic_t *v)
950 int result;
951
952 __asm__ __volatile__("@ atomic_sub\n"
953+"1: ldrex %1, [%3]\n"
954+" subs %0, %1, %4\n"
955+
956+#ifdef CONFIG_PAX_REFCOUNT
957+" bvc 3f\n"
958+"2: bkpt 0xf103\n"
959+"3:\n"
960+#endif
961+
962+" strex %1, %0, [%3]\n"
963+" teq %1, #0\n"
964+" bne 1b"
965+
966+#ifdef CONFIG_PAX_REFCOUNT
967+"\n4:\n"
968+ _ASM_EXTABLE(2b, 4b)
969+#endif
970+
971+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
972+ : "r" (&v->counter), "Ir" (i)
973+ : "cc");
974+}
975+
976+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
977+{
978+ unsigned long tmp;
979+ int result;
980+
981+ __asm__ __volatile__("@ atomic_sub_unchecked\n"
982 "1: ldrex %0, [%3]\n"
983 " sub %0, %0, %4\n"
984 " strex %1, %0, [%3]\n"
985@@ -98,11 +210,25 @@ static inline int atomic_sub_return(int i, atomic_t *v)
986 smp_mb();
987
988 __asm__ __volatile__("@ atomic_sub_return\n"
989-"1: ldrex %0, [%3]\n"
990-" sub %0, %0, %4\n"
991+"1: ldrex %1, [%3]\n"
992+" subs %0, %1, %4\n"
993+
994+#ifdef CONFIG_PAX_REFCOUNT
995+" bvc 3f\n"
996+" mov %0, %1\n"
997+"2: bkpt 0xf103\n"
998+"3:\n"
999+#endif
1000+
1001 " strex %1, %0, [%3]\n"
1002 " teq %1, #0\n"
1003 " bne 1b"
1004+
1005+#ifdef CONFIG_PAX_REFCOUNT
1006+"\n4:\n"
1007+ _ASM_EXTABLE(2b, 4b)
1008+#endif
1009+
1010 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1011 : "r" (&v->counter), "Ir" (i)
1012 : "cc");
1013@@ -134,6 +260,28 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
1014 return oldval;
1015 }
1016
1017+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *ptr, int old, int new)
1018+{
1019+ unsigned long oldval, res;
1020+
1021+ smp_mb();
1022+
1023+ do {
1024+ __asm__ __volatile__("@ atomic_cmpxchg_unchecked\n"
1025+ "ldrex %1, [%3]\n"
1026+ "mov %0, #0\n"
1027+ "teq %1, %4\n"
1028+ "strexeq %0, %5, [%3]\n"
1029+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1030+ : "r" (&ptr->counter), "Ir" (old), "r" (new)
1031+ : "cc");
1032+ } while (res);
1033+
1034+ smp_mb();
1035+
1036+ return oldval;
1037+}
1038+
1039 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1040 {
1041 unsigned long tmp, tmp2;
1042@@ -167,7 +315,17 @@ static inline int atomic_add_return(int i, atomic_t *v)
1043
1044 return val;
1045 }
1046+
1047+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
1048+{
1049+ return atomic_add_return(i, v);
1050+}
1051+
1052 #define atomic_add(i, v) (void) atomic_add_return(i, v)
1053+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
1054+{
1055+ (void) atomic_add_return(i, v);
1056+}
1057
1058 static inline int atomic_sub_return(int i, atomic_t *v)
1059 {
1060@@ -182,6 +340,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
1061 return val;
1062 }
1063 #define atomic_sub(i, v) (void) atomic_sub_return(i, v)
1064+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
1065+{
1066+ (void) atomic_sub_return(i, v);
1067+}
1068
1069 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1070 {
1071@@ -197,6 +359,11 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
1072 return ret;
1073 }
1074
1075+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
1076+{
1077+ return atomic_cmpxchg(v, old, new);
1078+}
1079+
1080 static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1081 {
1082 unsigned long flags;
1083@@ -209,6 +376,10 @@ static inline void atomic_clear_mask(unsigned long mask, unsigned long *addr)
1084 #endif /* __LINUX_ARM_ARCH__ */
1085
1086 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
1087+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
1088+{
1089+ return xchg(&v->counter, new);
1090+}
1091
1092 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1093 {
1094@@ -221,11 +392,27 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
1095 }
1096
1097 #define atomic_inc(v) atomic_add(1, v)
1098+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
1099+{
1100+ atomic_add_unchecked(1, v);
1101+}
1102 #define atomic_dec(v) atomic_sub(1, v)
1103+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
1104+{
1105+ atomic_sub_unchecked(1, v);
1106+}
1107
1108 #define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
1109+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
1110+{
1111+ return atomic_add_return_unchecked(1, v) == 0;
1112+}
1113 #define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
1114 #define atomic_inc_return(v) (atomic_add_return(1, v))
1115+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
1116+{
1117+ return atomic_add_return_unchecked(1, v);
1118+}
1119 #define atomic_dec_return(v) (atomic_sub_return(1, v))
1120 #define atomic_sub_and_test(i, v) (atomic_sub_return(i, v) == 0)
1121
1122@@ -241,6 +428,14 @@ typedef struct {
1123 u64 __aligned(8) counter;
1124 } atomic64_t;
1125
1126+#ifdef CONFIG_PAX_REFCOUNT
1127+typedef struct {
1128+ u64 __aligned(8) counter;
1129+} atomic64_unchecked_t;
1130+#else
1131+typedef atomic64_t atomic64_unchecked_t;
1132+#endif
1133+
1134 #define ATOMIC64_INIT(i) { (i) }
1135
1136 #ifdef CONFIG_ARM_LPAE
1137@@ -257,6 +452,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1138 return result;
1139 }
1140
1141+static inline u64 atomic64_read_unchecked(const atomic64_unchecked_t *v)
1142+{
1143+ u64 result;
1144+
1145+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1146+" ldrd %0, %H0, [%1]"
1147+ : "=&r" (result)
1148+ : "r" (&v->counter), "Qo" (v->counter)
1149+ );
1150+
1151+ return result;
1152+}
1153+
1154 static inline void atomic64_set(atomic64_t *v, u64 i)
1155 {
1156 __asm__ __volatile__("@ atomic64_set\n"
1157@@ -265,6 +473,15 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1158 : "r" (&v->counter), "r" (i)
1159 );
1160 }
1161+
1162+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1163+{
1164+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1165+" strd %2, %H2, [%1]"
1166+ : "=Qo" (v->counter)
1167+ : "r" (&v->counter), "r" (i)
1168+ );
1169+}
1170 #else
1171 static inline u64 atomic64_read(const atomic64_t *v)
1172 {
1173@@ -279,6 +496,19 @@ static inline u64 atomic64_read(const atomic64_t *v)
1174 return result;
1175 }
1176
1177+static inline u64 atomic64_read_unchecked(atomic64_unchecked_t *v)
1178+{
1179+ u64 result;
1180+
1181+ __asm__ __volatile__("@ atomic64_read_unchecked\n"
1182+" ldrexd %0, %H0, [%1]"
1183+ : "=&r" (result)
1184+ : "r" (&v->counter), "Qo" (v->counter)
1185+ );
1186+
1187+ return result;
1188+}
1189+
1190 static inline void atomic64_set(atomic64_t *v, u64 i)
1191 {
1192 u64 tmp;
1193@@ -292,6 +522,21 @@ static inline void atomic64_set(atomic64_t *v, u64 i)
1194 : "r" (&v->counter), "r" (i)
1195 : "cc");
1196 }
1197+
1198+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, u64 i)
1199+{
1200+ u64 tmp;
1201+
1202+ __asm__ __volatile__("@ atomic64_set_unchecked\n"
1203+"1: ldrexd %0, %H0, [%2]\n"
1204+" strexd %0, %3, %H3, [%2]\n"
1205+" teq %0, #0\n"
1206+" bne 1b"
1207+ : "=&r" (tmp), "=Qo" (v->counter)
1208+ : "r" (&v->counter), "r" (i)
1209+ : "cc");
1210+}
1211+
1212 #endif
1213
1214 static inline void atomic64_add(u64 i, atomic64_t *v)
1215@@ -302,6 +547,36 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1216 __asm__ __volatile__("@ atomic64_add\n"
1217 "1: ldrexd %0, %H0, [%3]\n"
1218 " adds %0, %0, %4\n"
1219+" adcs %H0, %H0, %H4\n"
1220+
1221+#ifdef CONFIG_PAX_REFCOUNT
1222+" bvc 3f\n"
1223+"2: bkpt 0xf103\n"
1224+"3:\n"
1225+#endif
1226+
1227+" strexd %1, %0, %H0, [%3]\n"
1228+" teq %1, #0\n"
1229+" bne 1b"
1230+
1231+#ifdef CONFIG_PAX_REFCOUNT
1232+"\n4:\n"
1233+ _ASM_EXTABLE(2b, 4b)
1234+#endif
1235+
1236+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1237+ : "r" (&v->counter), "r" (i)
1238+ : "cc");
1239+}
1240+
1241+static inline void atomic64_add_unchecked(u64 i, atomic64_unchecked_t *v)
1242+{
1243+ u64 result;
1244+ unsigned long tmp;
1245+
1246+ __asm__ __volatile__("@ atomic64_add_unchecked\n"
1247+"1: ldrexd %0, %H0, [%3]\n"
1248+" adds %0, %0, %4\n"
1249 " adc %H0, %H0, %H4\n"
1250 " strexd %1, %0, %H0, [%3]\n"
1251 " teq %1, #0\n"
1252@@ -313,12 +588,49 @@ static inline void atomic64_add(u64 i, atomic64_t *v)
1253
1254 static inline u64 atomic64_add_return(u64 i, atomic64_t *v)
1255 {
1256- u64 result;
1257- unsigned long tmp;
1258+ u64 result, tmp;
1259
1260 smp_mb();
1261
1262 __asm__ __volatile__("@ atomic64_add_return\n"
1263+"1: ldrexd %1, %H1, [%3]\n"
1264+" adds %0, %1, %4\n"
1265+" adcs %H0, %H1, %H4\n"
1266+
1267+#ifdef CONFIG_PAX_REFCOUNT
1268+" bvc 3f\n"
1269+" mov %0, %1\n"
1270+" mov %H0, %H1\n"
1271+"2: bkpt 0xf103\n"
1272+"3:\n"
1273+#endif
1274+
1275+" strexd %1, %0, %H0, [%3]\n"
1276+" teq %1, #0\n"
1277+" bne 1b"
1278+
1279+#ifdef CONFIG_PAX_REFCOUNT
1280+"\n4:\n"
1281+ _ASM_EXTABLE(2b, 4b)
1282+#endif
1283+
1284+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1285+ : "r" (&v->counter), "r" (i)
1286+ : "cc");
1287+
1288+ smp_mb();
1289+
1290+ return result;
1291+}
1292+
1293+static inline u64 atomic64_add_return_unchecked(u64 i, atomic64_unchecked_t *v)
1294+{
1295+ u64 result;
1296+ unsigned long tmp;
1297+
1298+ smp_mb();
1299+
1300+ __asm__ __volatile__("@ atomic64_add_return_unchecked\n"
1301 "1: ldrexd %0, %H0, [%3]\n"
1302 " adds %0, %0, %4\n"
1303 " adc %H0, %H0, %H4\n"
1304@@ -342,6 +654,36 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1305 __asm__ __volatile__("@ atomic64_sub\n"
1306 "1: ldrexd %0, %H0, [%3]\n"
1307 " subs %0, %0, %4\n"
1308+" sbcs %H0, %H0, %H4\n"
1309+
1310+#ifdef CONFIG_PAX_REFCOUNT
1311+" bvc 3f\n"
1312+"2: bkpt 0xf103\n"
1313+"3:\n"
1314+#endif
1315+
1316+" strexd %1, %0, %H0, [%3]\n"
1317+" teq %1, #0\n"
1318+" bne 1b"
1319+
1320+#ifdef CONFIG_PAX_REFCOUNT
1321+"\n4:\n"
1322+ _ASM_EXTABLE(2b, 4b)
1323+#endif
1324+
1325+ : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1326+ : "r" (&v->counter), "r" (i)
1327+ : "cc");
1328+}
1329+
1330+static inline void atomic64_sub_unchecked(u64 i, atomic64_unchecked_t *v)
1331+{
1332+ u64 result;
1333+ unsigned long tmp;
1334+
1335+ __asm__ __volatile__("@ atomic64_sub_unchecked\n"
1336+"1: ldrexd %0, %H0, [%3]\n"
1337+" subs %0, %0, %4\n"
1338 " sbc %H0, %H0, %H4\n"
1339 " strexd %1, %0, %H0, [%3]\n"
1340 " teq %1, #0\n"
1341@@ -353,18 +695,32 @@ static inline void atomic64_sub(u64 i, atomic64_t *v)
1342
1343 static inline u64 atomic64_sub_return(u64 i, atomic64_t *v)
1344 {
1345- u64 result;
1346- unsigned long tmp;
1347+ u64 result, tmp;
1348
1349 smp_mb();
1350
1351 __asm__ __volatile__("@ atomic64_sub_return\n"
1352-"1: ldrexd %0, %H0, [%3]\n"
1353-" subs %0, %0, %4\n"
1354-" sbc %H0, %H0, %H4\n"
1355+"1: ldrexd %1, %H1, [%3]\n"
1356+" subs %0, %1, %4\n"
1357+" sbcs %H0, %H1, %H4\n"
1358+
1359+#ifdef CONFIG_PAX_REFCOUNT
1360+" bvc 3f\n"
1361+" mov %0, %1\n"
1362+" mov %H0, %H1\n"
1363+"2: bkpt 0xf103\n"
1364+"3:\n"
1365+#endif
1366+
1367 " strexd %1, %0, %H0, [%3]\n"
1368 " teq %1, #0\n"
1369 " bne 1b"
1370+
1371+#ifdef CONFIG_PAX_REFCOUNT
1372+"\n4:\n"
1373+ _ASM_EXTABLE(2b, 4b)
1374+#endif
1375+
1376 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1377 : "r" (&v->counter), "r" (i)
1378 : "cc");
1379@@ -398,6 +754,30 @@ static inline u64 atomic64_cmpxchg(atomic64_t *ptr, u64 old, u64 new)
1380 return oldval;
1381 }
1382
1383+static inline u64 atomic64_cmpxchg_unchecked(atomic64_unchecked_t *ptr, u64 old, u64 new)
1384+{
1385+ u64 oldval;
1386+ unsigned long res;
1387+
1388+ smp_mb();
1389+
1390+ do {
1391+ __asm__ __volatile__("@ atomic64_cmpxchg_unchecked\n"
1392+ "ldrexd %1, %H1, [%3]\n"
1393+ "mov %0, #0\n"
1394+ "teq %1, %4\n"
1395+ "teqeq %H1, %H4\n"
1396+ "strexdeq %0, %5, %H5, [%3]"
1397+ : "=&r" (res), "=&r" (oldval), "+Qo" (ptr->counter)
1398+ : "r" (&ptr->counter), "r" (old), "r" (new)
1399+ : "cc");
1400+ } while (res);
1401+
1402+ smp_mb();
1403+
1404+ return oldval;
1405+}
1406+
1407 static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1408 {
1409 u64 result;
1410@@ -421,21 +801,34 @@ static inline u64 atomic64_xchg(atomic64_t *ptr, u64 new)
1411
1412 static inline u64 atomic64_dec_if_positive(atomic64_t *v)
1413 {
1414- u64 result;
1415- unsigned long tmp;
1416+ u64 result, tmp;
1417
1418 smp_mb();
1419
1420 __asm__ __volatile__("@ atomic64_dec_if_positive\n"
1421-"1: ldrexd %0, %H0, [%3]\n"
1422-" subs %0, %0, #1\n"
1423-" sbc %H0, %H0, #0\n"
1424+"1: ldrexd %1, %H1, [%3]\n"
1425+" subs %0, %1, #1\n"
1426+" sbcs %H0, %H1, #0\n"
1427+
1428+#ifdef CONFIG_PAX_REFCOUNT
1429+" bvc 3f\n"
1430+" mov %0, %1\n"
1431+" mov %H0, %H1\n"
1432+"2: bkpt 0xf103\n"
1433+"3:\n"
1434+#endif
1435+
1436 " teq %H0, #0\n"
1437-" bmi 2f\n"
1438+" bmi 4f\n"
1439 " strexd %1, %0, %H0, [%3]\n"
1440 " teq %1, #0\n"
1441 " bne 1b\n"
1442-"2:"
1443+"4:\n"
1444+
1445+#ifdef CONFIG_PAX_REFCOUNT
1446+ _ASM_EXTABLE(2b, 4b)
1447+#endif
1448+
1449 : "=&r" (result), "=&r" (tmp), "+Qo" (v->counter)
1450 : "r" (&v->counter)
1451 : "cc");
1452@@ -458,13 +851,25 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1453 " teq %0, %5\n"
1454 " teqeq %H0, %H5\n"
1455 " moveq %1, #0\n"
1456-" beq 2f\n"
1457+" beq 4f\n"
1458 " adds %0, %0, %6\n"
1459-" adc %H0, %H0, %H6\n"
1460+" adcs %H0, %H0, %H6\n"
1461+
1462+#ifdef CONFIG_PAX_REFCOUNT
1463+" bvc 3f\n"
1464+"2: bkpt 0xf103\n"
1465+"3:\n"
1466+#endif
1467+
1468 " strexd %2, %0, %H0, [%4]\n"
1469 " teq %2, #0\n"
1470 " bne 1b\n"
1471-"2:"
1472+"4:\n"
1473+
1474+#ifdef CONFIG_PAX_REFCOUNT
1475+ _ASM_EXTABLE(2b, 4b)
1476+#endif
1477+
1478 : "=&r" (val), "+r" (ret), "=&r" (tmp), "+Qo" (v->counter)
1479 : "r" (&v->counter), "r" (u), "r" (a)
1480 : "cc");
1481@@ -477,10 +882,13 @@ static inline int atomic64_add_unless(atomic64_t *v, u64 a, u64 u)
1482
1483 #define atomic64_add_negative(a, v) (atomic64_add_return((a), (v)) < 0)
1484 #define atomic64_inc(v) atomic64_add(1LL, (v))
1485+#define atomic64_inc_unchecked(v) atomic64_add_unchecked(1LL, (v))
1486 #define atomic64_inc_return(v) atomic64_add_return(1LL, (v))
1487+#define atomic64_inc_return_unchecked(v) atomic64_add_return_unchecked(1LL, (v))
1488 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
1489 #define atomic64_sub_and_test(a, v) (atomic64_sub_return((a), (v)) == 0)
1490 #define atomic64_dec(v) atomic64_sub(1LL, (v))
1491+#define atomic64_dec_unchecked(v) atomic64_sub_unchecked(1LL, (v))
1492 #define atomic64_dec_return(v) atomic64_sub_return(1LL, (v))
1493 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
1494 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
1495diff --git a/arch/arm/include/asm/cache.h b/arch/arm/include/asm/cache.h
1496index 75fe66b..ba3dee4 100644
1497--- a/arch/arm/include/asm/cache.h
1498+++ b/arch/arm/include/asm/cache.h
1499@@ -4,8 +4,10 @@
1500 #ifndef __ASMARM_CACHE_H
1501 #define __ASMARM_CACHE_H
1502
1503+#include <linux/const.h>
1504+
1505 #define L1_CACHE_SHIFT CONFIG_ARM_L1_CACHE_SHIFT
1506-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
1507+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
1508
1509 /*
1510 * Memory returned by kmalloc() may be used for DMA, so we must make
1511@@ -24,5 +26,6 @@
1512 #endif
1513
1514 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
1515+#define __read_only __attribute__ ((__section__(".data..read_only")))
1516
1517 #endif
1518diff --git a/arch/arm/include/asm/cacheflush.h b/arch/arm/include/asm/cacheflush.h
1519index 17d0ae8..014e350 100644
1520--- a/arch/arm/include/asm/cacheflush.h
1521+++ b/arch/arm/include/asm/cacheflush.h
1522@@ -116,7 +116,7 @@ struct cpu_cache_fns {
1523 void (*dma_unmap_area)(const void *, size_t, int);
1524
1525 void (*dma_flush_range)(const void *, const void *);
1526-};
1527+} __no_const;
1528
1529 /*
1530 * Select the calling method
1531diff --git a/arch/arm/include/asm/checksum.h b/arch/arm/include/asm/checksum.h
1532index 6dcc164..b14d917 100644
1533--- a/arch/arm/include/asm/checksum.h
1534+++ b/arch/arm/include/asm/checksum.h
1535@@ -37,7 +37,19 @@ __wsum
1536 csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum);
1537
1538 __wsum
1539-csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1540+__csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr);
1541+
1542+static inline __wsum
1543+csum_partial_copy_from_user(const void __user *src, void *dst, int len, __wsum sum, int *err_ptr)
1544+{
1545+ __wsum ret;
1546+ pax_open_userland();
1547+ ret = __csum_partial_copy_from_user(src, dst, len, sum, err_ptr);
1548+ pax_close_userland();
1549+ return ret;
1550+}
1551+
1552+
1553
1554 /*
1555 * Fold a partial checksum without adding pseudo headers
1556diff --git a/arch/arm/include/asm/cmpxchg.h b/arch/arm/include/asm/cmpxchg.h
1557index 4f009c1..466c59b 100644
1558--- a/arch/arm/include/asm/cmpxchg.h
1559+++ b/arch/arm/include/asm/cmpxchg.h
1560@@ -102,6 +102,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
1561
1562 #define xchg(ptr,x) \
1563 ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1564+#define xchg_unchecked(ptr,x) \
1565+ ((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
1566
1567 #include <asm-generic/cmpxchg-local.h>
1568
1569diff --git a/arch/arm/include/asm/domain.h b/arch/arm/include/asm/domain.h
1570index 6ddbe44..b5e38b1 100644
1571--- a/arch/arm/include/asm/domain.h
1572+++ b/arch/arm/include/asm/domain.h
1573@@ -48,18 +48,37 @@
1574 * Domain types
1575 */
1576 #define DOMAIN_NOACCESS 0
1577-#define DOMAIN_CLIENT 1
1578 #ifdef CONFIG_CPU_USE_DOMAINS
1579+#define DOMAIN_USERCLIENT 1
1580+#define DOMAIN_KERNELCLIENT 1
1581 #define DOMAIN_MANAGER 3
1582+#define DOMAIN_VECTORS DOMAIN_USER
1583 #else
1584+
1585+#ifdef CONFIG_PAX_KERNEXEC
1586 #define DOMAIN_MANAGER 1
1587+#define DOMAIN_KERNEXEC 3
1588+#else
1589+#define DOMAIN_MANAGER 1
1590+#endif
1591+
1592+#ifdef CONFIG_PAX_MEMORY_UDEREF
1593+#define DOMAIN_USERCLIENT 0
1594+#define DOMAIN_UDEREF 1
1595+#define DOMAIN_VECTORS DOMAIN_KERNEL
1596+#else
1597+#define DOMAIN_USERCLIENT 1
1598+#define DOMAIN_VECTORS DOMAIN_USER
1599+#endif
1600+#define DOMAIN_KERNELCLIENT 1
1601+
1602 #endif
1603
1604 #define domain_val(dom,type) ((type) << (2*(dom)))
1605
1606 #ifndef __ASSEMBLY__
1607
1608-#ifdef CONFIG_CPU_USE_DOMAINS
1609+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1610 static inline void set_domain(unsigned val)
1611 {
1612 asm volatile(
1613@@ -68,15 +87,7 @@ static inline void set_domain(unsigned val)
1614 isb();
1615 }
1616
1617-#define modify_domain(dom,type) \
1618- do { \
1619- struct thread_info *thread = current_thread_info(); \
1620- unsigned int domain = thread->cpu_domain; \
1621- domain &= ~domain_val(dom, DOMAIN_MANAGER); \
1622- thread->cpu_domain = domain | domain_val(dom, type); \
1623- set_domain(thread->cpu_domain); \
1624- } while (0)
1625-
1626+extern void modify_domain(unsigned int dom, unsigned int type);
1627 #else
1628 static inline void set_domain(unsigned val) { }
1629 static inline void modify_domain(unsigned dom, unsigned type) { }
1630diff --git a/arch/arm/include/asm/elf.h b/arch/arm/include/asm/elf.h
1631index 38050b1..9d90e8b 100644
1632--- a/arch/arm/include/asm/elf.h
1633+++ b/arch/arm/include/asm/elf.h
1634@@ -116,7 +116,14 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1635 the loader. We need to make sure that it is out of the way of the program
1636 that it will "exec", and that there is sufficient room for the brk. */
1637
1638-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
1639+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
1640+
1641+#ifdef CONFIG_PAX_ASLR
1642+#define PAX_ELF_ET_DYN_BASE 0x00008000UL
1643+
1644+#define PAX_DELTA_MMAP_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1645+#define PAX_DELTA_STACK_LEN ((current->personality == PER_LINUX_32BIT) ? 16 : 10)
1646+#endif
1647
1648 /* When the program starts, a1 contains a pointer to a function to be
1649 registered with atexit, as per the SVR4 ABI. A value of 0 means we
1650@@ -126,8 +133,4 @@ int dump_task_regs(struct task_struct *t, elf_gregset_t *elfregs);
1651 extern void elf_set_personality(const struct elf32_hdr *);
1652 #define SET_PERSONALITY(ex) elf_set_personality(&(ex))
1653
1654-struct mm_struct;
1655-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
1656-#define arch_randomize_brk arch_randomize_brk
1657-
1658 #endif
1659diff --git a/arch/arm/include/asm/fncpy.h b/arch/arm/include/asm/fncpy.h
1660index de53547..52b9a28 100644
1661--- a/arch/arm/include/asm/fncpy.h
1662+++ b/arch/arm/include/asm/fncpy.h
1663@@ -81,7 +81,9 @@
1664 BUG_ON((uintptr_t)(dest_buf) & (FNCPY_ALIGN - 1) || \
1665 (__funcp_address & ~(uintptr_t)1 & (FNCPY_ALIGN - 1))); \
1666 \
1667+ pax_open_kernel(); \
1668 memcpy(dest_buf, (void const *)(__funcp_address & ~1), size); \
1669+ pax_close_kernel(); \
1670 flush_icache_range((unsigned long)(dest_buf), \
1671 (unsigned long)(dest_buf) + (size)); \
1672 \
1673diff --git a/arch/arm/include/asm/futex.h b/arch/arm/include/asm/futex.h
1674index e42cf59..7b94b8f 100644
1675--- a/arch/arm/include/asm/futex.h
1676+++ b/arch/arm/include/asm/futex.h
1677@@ -50,6 +50,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1678 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1679 return -EFAULT;
1680
1681+ pax_open_userland();
1682+
1683 smp_mb();
1684 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1685 "1: ldrex %1, [%4]\n"
1686@@ -65,6 +67,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1687 : "cc", "memory");
1688 smp_mb();
1689
1690+ pax_close_userland();
1691+
1692 *uval = val;
1693 return ret;
1694 }
1695@@ -95,6 +99,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1696 if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
1697 return -EFAULT;
1698
1699+ pax_open_userland();
1700+
1701 __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
1702 "1: " TUSER(ldr) " %1, [%4]\n"
1703 " teq %1, %2\n"
1704@@ -105,6 +111,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
1705 : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT)
1706 : "cc", "memory");
1707
1708+ pax_close_userland();
1709+
1710 *uval = val;
1711 return ret;
1712 }
1713@@ -127,6 +135,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1714 return -EFAULT;
1715
1716 pagefault_disable(); /* implies preempt_disable() */
1717+ pax_open_userland();
1718
1719 switch (op) {
1720 case FUTEX_OP_SET:
1721@@ -148,6 +157,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
1722 ret = -ENOSYS;
1723 }
1724
1725+ pax_close_userland();
1726 pagefault_enable(); /* subsumes preempt_enable() */
1727
1728 if (!ret) {
1729diff --git a/arch/arm/include/asm/kmap_types.h b/arch/arm/include/asm/kmap_types.h
1730index 83eb2f7..ed77159 100644
1731--- a/arch/arm/include/asm/kmap_types.h
1732+++ b/arch/arm/include/asm/kmap_types.h
1733@@ -4,6 +4,6 @@
1734 /*
1735 * This is the "bare minimum". AIO seems to require this.
1736 */
1737-#define KM_TYPE_NR 16
1738+#define KM_TYPE_NR 17
1739
1740 #endif
1741diff --git a/arch/arm/include/asm/mach/dma.h b/arch/arm/include/asm/mach/dma.h
1742index 9e614a1..3302cca 100644
1743--- a/arch/arm/include/asm/mach/dma.h
1744+++ b/arch/arm/include/asm/mach/dma.h
1745@@ -22,7 +22,7 @@ struct dma_ops {
1746 int (*residue)(unsigned int, dma_t *); /* optional */
1747 int (*setspeed)(unsigned int, dma_t *, int); /* optional */
1748 const char *type;
1749-};
1750+} __do_const;
1751
1752 struct dma_struct {
1753 void *addr; /* single DMA address */
1754diff --git a/arch/arm/include/asm/mach/map.h b/arch/arm/include/asm/mach/map.h
1755index 2fe141f..192dc01 100644
1756--- a/arch/arm/include/asm/mach/map.h
1757+++ b/arch/arm/include/asm/mach/map.h
1758@@ -27,13 +27,16 @@ struct map_desc {
1759 #define MT_MINICLEAN 6
1760 #define MT_LOW_VECTORS 7
1761 #define MT_HIGH_VECTORS 8
1762-#define MT_MEMORY 9
1763+#define MT_MEMORY_RWX 9
1764 #define MT_ROM 10
1765-#define MT_MEMORY_NONCACHED 11
1766+#define MT_MEMORY_NONCACHED_RX 11
1767 #define MT_MEMORY_DTCM 12
1768 #define MT_MEMORY_ITCM 13
1769 #define MT_MEMORY_SO 14
1770 #define MT_MEMORY_DMA_READY 15
1771+#define MT_MEMORY_RW 16
1772+#define MT_MEMORY_RX 17
1773+#define MT_MEMORY_NONCACHED_RW 18
1774
1775 #ifdef CONFIG_MMU
1776 extern void iotable_init(struct map_desc *, int);
1777diff --git a/arch/arm/include/asm/outercache.h b/arch/arm/include/asm/outercache.h
1778index 12f71a1..04e063c 100644
1779--- a/arch/arm/include/asm/outercache.h
1780+++ b/arch/arm/include/asm/outercache.h
1781@@ -35,7 +35,7 @@ struct outer_cache_fns {
1782 #endif
1783 void (*set_debug)(unsigned long);
1784 void (*resume)(void);
1785-};
1786+} __no_const;
1787
1788 #ifdef CONFIG_OUTER_CACHE
1789
1790diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h
1791index 812a494..71fc0b6 100644
1792--- a/arch/arm/include/asm/page.h
1793+++ b/arch/arm/include/asm/page.h
1794@@ -114,7 +114,7 @@ struct cpu_user_fns {
1795 void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr);
1796 void (*cpu_copy_user_highpage)(struct page *to, struct page *from,
1797 unsigned long vaddr, struct vm_area_struct *vma);
1798-};
1799+} __no_const;
1800
1801 #ifdef MULTI_USER
1802 extern struct cpu_user_fns cpu_user;
1803diff --git a/arch/arm/include/asm/pgalloc.h b/arch/arm/include/asm/pgalloc.h
1804index 943504f..c37a730 100644
1805--- a/arch/arm/include/asm/pgalloc.h
1806+++ b/arch/arm/include/asm/pgalloc.h
1807@@ -17,6 +17,7 @@
1808 #include <asm/processor.h>
1809 #include <asm/cacheflush.h>
1810 #include <asm/tlbflush.h>
1811+#include <asm/system_info.h>
1812
1813 #define check_pgt_cache() do { } while (0)
1814
1815@@ -43,6 +44,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1816 set_pud(pud, __pud(__pa(pmd) | PMD_TYPE_TABLE));
1817 }
1818
1819+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1820+{
1821+ pud_populate(mm, pud, pmd);
1822+}
1823+
1824 #else /* !CONFIG_ARM_LPAE */
1825
1826 /*
1827@@ -51,6 +57,7 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
1828 #define pmd_alloc_one(mm,addr) ({ BUG(); ((pmd_t *)2); })
1829 #define pmd_free(mm, pmd) do { } while (0)
1830 #define pud_populate(mm,pmd,pte) BUG()
1831+#define pud_populate_kernel(mm,pmd,pte) BUG()
1832
1833 #endif /* CONFIG_ARM_LPAE */
1834
1835@@ -126,6 +133,19 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
1836 __free_page(pte);
1837 }
1838
1839+static inline void __section_update(pmd_t *pmdp, unsigned long addr, pmdval_t prot)
1840+{
1841+#ifdef CONFIG_ARM_LPAE
1842+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1843+#else
1844+ if (addr & SECTION_SIZE)
1845+ pmdp[1] = __pmd(pmd_val(pmdp[1]) | prot);
1846+ else
1847+ pmdp[0] = __pmd(pmd_val(pmdp[0]) | prot);
1848+#endif
1849+ flush_pmd_entry(pmdp);
1850+}
1851+
1852 static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
1853 pmdval_t prot)
1854 {
1855@@ -155,7 +175,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
1856 static inline void
1857 pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
1858 {
1859- __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
1860+ __pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE | __supported_pmd_mask);
1861 }
1862 #define pmd_pgtable(pmd) pmd_page(pmd)
1863
1864diff --git a/arch/arm/include/asm/pgtable-2level-hwdef.h b/arch/arm/include/asm/pgtable-2level-hwdef.h
1865index 5cfba15..f415e1a 100644
1866--- a/arch/arm/include/asm/pgtable-2level-hwdef.h
1867+++ b/arch/arm/include/asm/pgtable-2level-hwdef.h
1868@@ -20,12 +20,15 @@
1869 #define PMD_TYPE_FAULT (_AT(pmdval_t, 0) << 0)
1870 #define PMD_TYPE_TABLE (_AT(pmdval_t, 1) << 0)
1871 #define PMD_TYPE_SECT (_AT(pmdval_t, 2) << 0)
1872+#define PMD_PXNTABLE (_AT(pmdval_t, 1) << 2) /* v7 */
1873 #define PMD_BIT4 (_AT(pmdval_t, 1) << 4)
1874 #define PMD_DOMAIN(x) (_AT(pmdval_t, (x)) << 5)
1875 #define PMD_PROTECTION (_AT(pmdval_t, 1) << 9) /* v5 */
1876+
1877 /*
1878 * - section
1879 */
1880+#define PMD_SECT_PXN (_AT(pmdval_t, 1) << 0) /* v7 */
1881 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1882 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1883 #define PMD_SECT_XN (_AT(pmdval_t, 1) << 4) /* v6 */
1884@@ -37,6 +40,7 @@
1885 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 17) /* v6 */
1886 #define PMD_SECT_SUPER (_AT(pmdval_t, 1) << 18) /* v6 */
1887 #define PMD_SECT_AF (_AT(pmdval_t, 0))
1888+#define PMD_SECT_RDONLY (_AT(pmdval_t, 0))
1889
1890 #define PMD_SECT_UNCACHED (_AT(pmdval_t, 0))
1891 #define PMD_SECT_BUFFERED (PMD_SECT_BUFFERABLE)
1892@@ -66,6 +70,7 @@
1893 * - extended small page/tiny page
1894 */
1895 #define PTE_EXT_XN (_AT(pteval_t, 1) << 0) /* v6 */
1896+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 2) /* v7 */
1897 #define PTE_EXT_AP_MASK (_AT(pteval_t, 3) << 4)
1898 #define PTE_EXT_AP0 (_AT(pteval_t, 1) << 4)
1899 #define PTE_EXT_AP1 (_AT(pteval_t, 2) << 4)
1900diff --git a/arch/arm/include/asm/pgtable-2level.h b/arch/arm/include/asm/pgtable-2level.h
1901index f97ee02..07f1be5 100644
1902--- a/arch/arm/include/asm/pgtable-2level.h
1903+++ b/arch/arm/include/asm/pgtable-2level.h
1904@@ -125,6 +125,7 @@
1905 #define L_PTE_XN (_AT(pteval_t, 1) << 9)
1906 #define L_PTE_SHARED (_AT(pteval_t, 1) << 10) /* shared(v6), coherent(xsc3) */
1907 #define L_PTE_NONE (_AT(pteval_t, 1) << 11)
1908+#define L_PTE_PXN (_AT(pteval_t, 1) << 12) /* v7*/
1909
1910 /*
1911 * These are the memory types, defined to be compatible with
1912diff --git a/arch/arm/include/asm/pgtable-3level-hwdef.h b/arch/arm/include/asm/pgtable-3level-hwdef.h
1913index 18f5cef..25b8f43 100644
1914--- a/arch/arm/include/asm/pgtable-3level-hwdef.h
1915+++ b/arch/arm/include/asm/pgtable-3level-hwdef.h
1916@@ -41,6 +41,7 @@
1917 */
1918 #define PMD_SECT_BUFFERABLE (_AT(pmdval_t, 1) << 2)
1919 #define PMD_SECT_CACHEABLE (_AT(pmdval_t, 1) << 3)
1920+#define PMD_SECT_RDONLY (_AT(pmdval_t, 1) << 7)
1921 #define PMD_SECT_S (_AT(pmdval_t, 3) << 8)
1922 #define PMD_SECT_AF (_AT(pmdval_t, 1) << 10)
1923 #define PMD_SECT_nG (_AT(pmdval_t, 1) << 11)
1924@@ -71,6 +72,7 @@
1925 #define PTE_EXT_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1926 #define PTE_EXT_AF (_AT(pteval_t, 1) << 10) /* Access Flag */
1927 #define PTE_EXT_NG (_AT(pteval_t, 1) << 11) /* nG */
1928+#define PTE_EXT_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1929 #define PTE_EXT_XN (_AT(pteval_t, 1) << 54) /* XN */
1930
1931 /*
1932diff --git a/arch/arm/include/asm/pgtable-3level.h b/arch/arm/include/asm/pgtable-3level.h
1933index 86b8fe3..e25f975 100644
1934--- a/arch/arm/include/asm/pgtable-3level.h
1935+++ b/arch/arm/include/asm/pgtable-3level.h
1936@@ -74,6 +74,7 @@
1937 #define L_PTE_RDONLY (_AT(pteval_t, 1) << 7) /* AP[2] */
1938 #define L_PTE_SHARED (_AT(pteval_t, 3) << 8) /* SH[1:0], inner shareable */
1939 #define L_PTE_YOUNG (_AT(pteval_t, 1) << 10) /* AF */
1940+#define L_PTE_PXN (_AT(pteval_t, 1) << 53) /* PXN */
1941 #define L_PTE_XN (_AT(pteval_t, 1) << 54) /* XN */
1942 #define L_PTE_DIRTY (_AT(pteval_t, 1) << 55) /* unused */
1943 #define L_PTE_SPECIAL (_AT(pteval_t, 1) << 56) /* unused */
1944@@ -82,6 +83,7 @@
1945 /*
1946 * To be used in assembly code with the upper page attributes.
1947 */
1948+#define L_PTE_PXN_HIGH (1 << (53 - 32))
1949 #define L_PTE_XN_HIGH (1 << (54 - 32))
1950 #define L_PTE_DIRTY_HIGH (1 << (55 - 32))
1951
1952diff --git a/arch/arm/include/asm/pgtable.h b/arch/arm/include/asm/pgtable.h
1953index 9bcd262..fba731c 100644
1954--- a/arch/arm/include/asm/pgtable.h
1955+++ b/arch/arm/include/asm/pgtable.h
1956@@ -30,6 +30,9 @@
1957 #include <asm/pgtable-2level.h>
1958 #endif
1959
1960+#define ktla_ktva(addr) (addr)
1961+#define ktva_ktla(addr) (addr)
1962+
1963 /*
1964 * Just any arbitrary offset to the start of the vmalloc VM area: the
1965 * current 8MB value just means that there will be a 8MB "hole" after the
1966@@ -45,6 +48,9 @@
1967 #define LIBRARY_TEXT_START 0x0c000000
1968
1969 #ifndef __ASSEMBLY__
1970+extern pteval_t __supported_pte_mask;
1971+extern pmdval_t __supported_pmd_mask;
1972+
1973 extern void __pte_error(const char *file, int line, pte_t);
1974 extern void __pmd_error(const char *file, int line, pmd_t);
1975 extern void __pgd_error(const char *file, int line, pgd_t);
1976@@ -53,6 +59,50 @@ extern void __pgd_error(const char *file, int line, pgd_t);
1977 #define pmd_ERROR(pmd) __pmd_error(__FILE__, __LINE__, pmd)
1978 #define pgd_ERROR(pgd) __pgd_error(__FILE__, __LINE__, pgd)
1979
1980+#define __HAVE_ARCH_PAX_OPEN_KERNEL
1981+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
1982+
1983+#ifdef CONFIG_PAX_KERNEXEC
1984+#include <asm/domain.h>
1985+#include <linux/thread_info.h>
1986+#include <linux/preempt.h>
1987+#endif
1988+
1989+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
1990+static inline int test_domain(int domain, int domaintype)
1991+{
1992+ return ((current_thread_info()->cpu_domain) & domain_val(domain, 3)) == domain_val(domain, domaintype);
1993+}
1994+#endif
1995+
1996+#ifdef CONFIG_PAX_KERNEXEC
1997+static inline unsigned long pax_open_kernel(void) {
1998+#ifdef CONFIG_ARM_LPAE
1999+ /* TODO */
2000+#else
2001+ preempt_disable();
2002+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC));
2003+ modify_domain(DOMAIN_KERNEL, DOMAIN_KERNEXEC);
2004+#endif
2005+ return 0;
2006+}
2007+
2008+static inline unsigned long pax_close_kernel(void) {
2009+#ifdef CONFIG_ARM_LPAE
2010+ /* TODO */
2011+#else
2012+ BUG_ON(test_domain(DOMAIN_KERNEL, DOMAIN_MANAGER));
2013+ /* DOMAIN_MANAGER = "client" under KERNEXEC */
2014+ modify_domain(DOMAIN_KERNEL, DOMAIN_MANAGER);
2015+ preempt_enable_no_resched();
2016+#endif
2017+ return 0;
2018+}
2019+#else
2020+static inline unsigned long pax_open_kernel(void) { return 0; }
2021+static inline unsigned long pax_close_kernel(void) { return 0; }
2022+#endif
2023+
2024 /*
2025 * This is the lowest virtual address we can permit any user space
2026 * mapping to be mapped at. This is particularly important for
2027@@ -72,8 +122,8 @@ extern void __pgd_error(const char *file, int line, pgd_t);
2028 /*
2029 * The pgprot_* and protection_map entries will be fixed up in runtime
2030 * to include the cachable and bufferable bits based on memory policy,
2031- * as well as any architecture dependent bits like global/ASID and SMP
2032- * shared mapping bits.
2033+ * as well as any architecture dependent bits like global/ASID, PXN,
2034+ * and SMP shared mapping bits.
2035 */
2036 #define _L_PTE_DEFAULT L_PTE_PRESENT | L_PTE_YOUNG
2037
2038@@ -257,7 +307,7 @@ static inline pte_t pte_mkspecial(pte_t pte) { return pte; }
2039 static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
2040 {
2041 const pteval_t mask = L_PTE_XN | L_PTE_RDONLY | L_PTE_USER |
2042- L_PTE_NONE | L_PTE_VALID;
2043+ L_PTE_NONE | L_PTE_VALID | __supported_pte_mask;
2044 pte_val(pte) = (pte_val(pte) & ~mask) | (pgprot_val(newprot) & mask);
2045 return pte;
2046 }
2047diff --git a/arch/arm/include/asm/proc-fns.h b/arch/arm/include/asm/proc-fns.h
2048index f3628fb..a0672dd 100644
2049--- a/arch/arm/include/asm/proc-fns.h
2050+++ b/arch/arm/include/asm/proc-fns.h
2051@@ -75,7 +75,7 @@ extern struct processor {
2052 unsigned int suspend_size;
2053 void (*do_suspend)(void *);
2054 void (*do_resume)(void *);
2055-} processor;
2056+} __do_const processor;
2057
2058 #ifndef MULTI_CPU
2059 extern void cpu_proc_init(void);
2060diff --git a/arch/arm/include/asm/processor.h b/arch/arm/include/asm/processor.h
2061index 06e7d50..8a8e251 100644
2062--- a/arch/arm/include/asm/processor.h
2063+++ b/arch/arm/include/asm/processor.h
2064@@ -65,9 +65,8 @@ struct thread_struct {
2065 regs->ARM_cpsr |= PSR_ENDSTATE; \
2066 regs->ARM_pc = pc & ~1; /* pc */ \
2067 regs->ARM_sp = sp; /* sp */ \
2068- regs->ARM_r2 = stack[2]; /* r2 (envp) */ \
2069- regs->ARM_r1 = stack[1]; /* r1 (argv) */ \
2070- regs->ARM_r0 = stack[0]; /* r0 (argc) */ \
2071+ /* r2 (envp), r1 (argv), r0 (argc) */ \
2072+ (void)copy_from_user(&regs->ARM_r0, (const char __user *)stack, 3 * sizeof(unsigned long)); \
2073 nommu_start_thread(regs); \
2074 })
2075
2076diff --git a/arch/arm/include/asm/psci.h b/arch/arm/include/asm/psci.h
2077index ce0dbe7..c085b6f 100644
2078--- a/arch/arm/include/asm/psci.h
2079+++ b/arch/arm/include/asm/psci.h
2080@@ -29,7 +29,7 @@ struct psci_operations {
2081 int (*cpu_off)(struct psci_power_state state);
2082 int (*cpu_on)(unsigned long cpuid, unsigned long entry_point);
2083 int (*migrate)(unsigned long cpuid);
2084-};
2085+} __no_const;
2086
2087 extern struct psci_operations psci_ops;
2088
2089diff --git a/arch/arm/include/asm/smp.h b/arch/arm/include/asm/smp.h
2090index d3a22be..3a69ad5 100644
2091--- a/arch/arm/include/asm/smp.h
2092+++ b/arch/arm/include/asm/smp.h
2093@@ -107,7 +107,7 @@ struct smp_operations {
2094 int (*cpu_disable)(unsigned int cpu);
2095 #endif
2096 #endif
2097-};
2098+} __no_const;
2099
2100 /*
2101 * set platform specific SMP operations
2102diff --git a/arch/arm/include/asm/thread_info.h b/arch/arm/include/asm/thread_info.h
2103index 1995d1a..76693a2 100644
2104--- a/arch/arm/include/asm/thread_info.h
2105+++ b/arch/arm/include/asm/thread_info.h
2106@@ -77,9 +77,9 @@ struct thread_info {
2107 .flags = 0, \
2108 .preempt_count = INIT_PREEMPT_COUNT, \
2109 .addr_limit = KERNEL_DS, \
2110- .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2111- domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2112- domain_val(DOMAIN_IO, DOMAIN_CLIENT), \
2113+ .cpu_domain = domain_val(DOMAIN_USER, DOMAIN_USERCLIENT) | \
2114+ domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT) | \
2115+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT), \
2116 .restart_block = { \
2117 .fn = do_no_restart_syscall, \
2118 }, \
2119@@ -152,7 +152,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2120 #define TIF_SYSCALL_AUDIT 9
2121 #define TIF_SYSCALL_TRACEPOINT 10
2122 #define TIF_SECCOMP 11 /* seccomp syscall filtering active */
2123-#define TIF_NOHZ 12 /* in adaptive nohz mode */
2124+/* within 8 bits of TIF_SYSCALL_TRACE
2125+ * to meet flexible second operand requirements
2126+ */
2127+#define TIF_GRSEC_SETXID 12
2128+#define TIF_NOHZ 13 /* in adaptive nohz mode */
2129 #define TIF_USING_IWMMXT 17
2130 #define TIF_MEMDIE 18 /* is terminating due to OOM killer */
2131 #define TIF_RESTORE_SIGMASK 20
2132@@ -166,10 +170,11 @@ extern int vfp_restore_user_hwstate(struct user_vfp __user *,
2133 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
2134 #define _TIF_SECCOMP (1 << TIF_SECCOMP)
2135 #define _TIF_USING_IWMMXT (1 << TIF_USING_IWMMXT)
2136+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
2137
2138 /* Checks for any syscall work in entry-common.S */
2139 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
2140- _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP)
2141+ _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | _TIF_GRSEC_SETXID)
2142
2143 /*
2144 * Change these and you break ASM code in entry-common.S
2145diff --git a/arch/arm/include/asm/uaccess.h b/arch/arm/include/asm/uaccess.h
2146index 7e1f760..d42d7f8 100644
2147--- a/arch/arm/include/asm/uaccess.h
2148+++ b/arch/arm/include/asm/uaccess.h
2149@@ -18,6 +18,7 @@
2150 #include <asm/domain.h>
2151 #include <asm/unified.h>
2152 #include <asm/compiler.h>
2153+#include <asm/pgtable.h>
2154
2155 #define VERIFY_READ 0
2156 #define VERIFY_WRITE 1
2157@@ -63,11 +64,35 @@ extern int __put_user_bad(void);
2158 static inline void set_fs(mm_segment_t fs)
2159 {
2160 current_thread_info()->addr_limit = fs;
2161- modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_CLIENT : DOMAIN_MANAGER);
2162+ modify_domain(DOMAIN_KERNEL, fs ? DOMAIN_KERNELCLIENT : DOMAIN_MANAGER);
2163 }
2164
2165 #define segment_eq(a,b) ((a) == (b))
2166
2167+static inline void pax_open_userland(void)
2168+{
2169+
2170+#ifdef CONFIG_PAX_MEMORY_UDEREF
2171+ if (segment_eq(get_fs(), USER_DS)) {
2172+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_UDEREF));
2173+ modify_domain(DOMAIN_USER, DOMAIN_UDEREF);
2174+ }
2175+#endif
2176+
2177+}
2178+
2179+static inline void pax_close_userland(void)
2180+{
2181+
2182+#ifdef CONFIG_PAX_MEMORY_UDEREF
2183+ if (segment_eq(get_fs(), USER_DS)) {
2184+ BUG_ON(test_domain(DOMAIN_USER, DOMAIN_NOACCESS));
2185+ modify_domain(DOMAIN_USER, DOMAIN_NOACCESS);
2186+ }
2187+#endif
2188+
2189+}
2190+
2191 #define __addr_ok(addr) ({ \
2192 unsigned long flag; \
2193 __asm__("cmp %2, %0; movlo %0, #0" \
2194@@ -143,8 +168,12 @@ extern int __get_user_4(void *);
2195
2196 #define get_user(x,p) \
2197 ({ \
2198+ int __e; \
2199 might_fault(); \
2200- __get_user_check(x,p); \
2201+ pax_open_userland(); \
2202+ __e = __get_user_check(x,p); \
2203+ pax_close_userland(); \
2204+ __e; \
2205 })
2206
2207 extern int __put_user_1(void *, unsigned int);
2208@@ -188,8 +217,12 @@ extern int __put_user_8(void *, unsigned long long);
2209
2210 #define put_user(x,p) \
2211 ({ \
2212+ int __e; \
2213 might_fault(); \
2214- __put_user_check(x,p); \
2215+ pax_open_userland(); \
2216+ __e = __put_user_check(x,p); \
2217+ pax_close_userland(); \
2218+ __e; \
2219 })
2220
2221 #else /* CONFIG_MMU */
2222@@ -230,13 +263,17 @@ static inline void set_fs(mm_segment_t fs)
2223 #define __get_user(x,ptr) \
2224 ({ \
2225 long __gu_err = 0; \
2226+ pax_open_userland(); \
2227 __get_user_err((x),(ptr),__gu_err); \
2228+ pax_close_userland(); \
2229 __gu_err; \
2230 })
2231
2232 #define __get_user_error(x,ptr,err) \
2233 ({ \
2234+ pax_open_userland(); \
2235 __get_user_err((x),(ptr),err); \
2236+ pax_close_userland(); \
2237 (void) 0; \
2238 })
2239
2240@@ -312,13 +349,17 @@ do { \
2241 #define __put_user(x,ptr) \
2242 ({ \
2243 long __pu_err = 0; \
2244+ pax_open_userland(); \
2245 __put_user_err((x),(ptr),__pu_err); \
2246+ pax_close_userland(); \
2247 __pu_err; \
2248 })
2249
2250 #define __put_user_error(x,ptr,err) \
2251 ({ \
2252+ pax_open_userland(); \
2253 __put_user_err((x),(ptr),err); \
2254+ pax_close_userland(); \
2255 (void) 0; \
2256 })
2257
2258@@ -418,11 +459,44 @@ do { \
2259
2260
2261 #ifdef CONFIG_MMU
2262-extern unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n);
2263-extern unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n);
2264+extern unsigned long __must_check ___copy_from_user(void *to, const void __user *from, unsigned long n);
2265+extern unsigned long __must_check ___copy_to_user(void __user *to, const void *from, unsigned long n);
2266+
2267+static inline unsigned long __must_check __copy_from_user(void *to, const void __user *from, unsigned long n)
2268+{
2269+ unsigned long ret;
2270+
2271+ check_object_size(to, n, false);
2272+ pax_open_userland();
2273+ ret = ___copy_from_user(to, from, n);
2274+ pax_close_userland();
2275+ return ret;
2276+}
2277+
2278+static inline unsigned long __must_check __copy_to_user(void __user *to, const void *from, unsigned long n)
2279+{
2280+ unsigned long ret;
2281+
2282+ check_object_size(from, n, true);
2283+ pax_open_userland();
2284+ ret = ___copy_to_user(to, from, n);
2285+ pax_close_userland();
2286+ return ret;
2287+}
2288+
2289 extern unsigned long __must_check __copy_to_user_std(void __user *to, const void *from, unsigned long n);
2290-extern unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
2291+extern unsigned long __must_check ___clear_user(void __user *addr, unsigned long n);
2292 extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned long n);
2293+
2294+static inline unsigned long __must_check __clear_user(void __user *addr, unsigned long n)
2295+{
2296+ unsigned long ret;
2297+ pax_open_userland();
2298+ ret = ___clear_user(addr, n);
2299+ pax_close_userland();
2300+ return ret;
2301+}
2302+
2303 #else
2304 #define __copy_from_user(to,from,n) (memcpy(to, (void __force *)from, n), 0)
2305 #define __copy_to_user(to,from,n) (memcpy((void __force *)to, from, n), 0)
2306@@ -431,6 +505,9 @@ extern unsigned long __must_check __clear_user_std(void __user *addr, unsigned l
2307
2308 static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
2309 {
2310+ if ((long)n < 0)
2311+ return n;
2312+
2313 if (access_ok(VERIFY_READ, from, n))
2314 n = __copy_from_user(to, from, n);
2315 else /* security hole - plug it */
2316@@ -440,6 +517,9 @@ static inline unsigned long __must_check copy_from_user(void *to, const void __u
2317
2318 static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
2319 {
2320+ if ((long)n < 0)
2321+ return n;
2322+
2323 if (access_ok(VERIFY_WRITE, to, n))
2324 n = __copy_to_user(to, from, n);
2325 return n;
2326diff --git a/arch/arm/include/uapi/asm/ptrace.h b/arch/arm/include/uapi/asm/ptrace.h
2327index 96ee092..37f1844 100644
2328--- a/arch/arm/include/uapi/asm/ptrace.h
2329+++ b/arch/arm/include/uapi/asm/ptrace.h
2330@@ -73,7 +73,7 @@
2331 * ARMv7 groups of PSR bits
2332 */
2333 #define APSR_MASK 0xf80f0000 /* N, Z, C, V, Q and GE flags */
2334-#define PSR_ISET_MASK 0x01000010 /* ISA state (J, T) mask */
2335+#define PSR_ISET_MASK 0x01000020 /* ISA state (J, T) mask */
2336 #define PSR_IT_MASK 0x0600fc00 /* If-Then execution state mask */
2337 #define PSR_ENDIAN_MASK 0x00000200 /* Endianness state mask */
2338
2339diff --git a/arch/arm/kernel/armksyms.c b/arch/arm/kernel/armksyms.c
2340index 60d3b73..e5a0f22 100644
2341--- a/arch/arm/kernel/armksyms.c
2342+++ b/arch/arm/kernel/armksyms.c
2343@@ -53,7 +53,7 @@ EXPORT_SYMBOL(arm_delay_ops);
2344
2345 /* networking */
2346 EXPORT_SYMBOL(csum_partial);
2347-EXPORT_SYMBOL(csum_partial_copy_from_user);
2348+EXPORT_SYMBOL(__csum_partial_copy_from_user);
2349 EXPORT_SYMBOL(csum_partial_copy_nocheck);
2350 EXPORT_SYMBOL(__csum_ipv6_magic);
2351
2352@@ -89,9 +89,9 @@ EXPORT_SYMBOL(__memzero);
2353 #ifdef CONFIG_MMU
2354 EXPORT_SYMBOL(copy_page);
2355
2356-EXPORT_SYMBOL(__copy_from_user);
2357-EXPORT_SYMBOL(__copy_to_user);
2358-EXPORT_SYMBOL(__clear_user);
2359+EXPORT_SYMBOL(___copy_from_user);
2360+EXPORT_SYMBOL(___copy_to_user);
2361+EXPORT_SYMBOL(___clear_user);
2362
2363 EXPORT_SYMBOL(__get_user_1);
2364 EXPORT_SYMBOL(__get_user_2);
2365diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S
2366index 582b405..50351b7 100644
2367--- a/arch/arm/kernel/entry-armv.S
2368+++ b/arch/arm/kernel/entry-armv.S
2369@@ -47,6 +47,87 @@
2370 9997:
2371 .endm
2372
2373+ .macro pax_enter_kernel
2374+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2375+ @ make aligned space for saved DACR
2376+ sub sp, sp, #8
2377+ @ save regs
2378+ stmdb sp!, {r1, r2}
2379+ @ read DACR from cpu_domain into r1
2380+ mov r2, sp
2381+ @ assume 8K pages, since we have to split the immediate in two
2382+ bic r2, r2, #(0x1fc0)
2383+ bic r2, r2, #(0x3f)
2384+ ldr r1, [r2, #TI_CPU_DOMAIN]
2385+ @ store old DACR on stack
2386+ str r1, [sp, #8]
2387+#ifdef CONFIG_PAX_KERNEXEC
2388+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2389+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2390+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2391+#endif
2392+#ifdef CONFIG_PAX_MEMORY_UDEREF
2393+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2394+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2395+#endif
2396+ @ write r1 to current_thread_info()->cpu_domain
2397+ str r1, [r2, #TI_CPU_DOMAIN]
2398+ @ write r1 to DACR
2399+ mcr p15, 0, r1, c3, c0, 0
2400+ @ instruction sync
2401+ instr_sync
2402+ @ restore regs
2403+ ldmia sp!, {r1, r2}
2404+#endif
2405+ .endm
2406+
2407+ .macro pax_open_userland
2408+#ifdef CONFIG_PAX_MEMORY_UDEREF
2409+ @ save regs
2410+ stmdb sp!, {r0, r1}
2411+ @ read DACR from cpu_domain into r1
2412+ mov r0, sp
2413+ @ assume 8K pages, since we have to split the immediate in two
2414+ bic r0, r0, #(0x1fc0)
2415+ bic r0, r0, #(0x3f)
2416+ ldr r1, [r0, #TI_CPU_DOMAIN]
2417+ @ set current DOMAIN_USER to DOMAIN_CLIENT
2418+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2419+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2420+ @ write r1 to current_thread_info()->cpu_domain
2421+ str r1, [r0, #TI_CPU_DOMAIN]
2422+ @ write r1 to DACR
2423+ mcr p15, 0, r1, c3, c0, 0
2424+ @ instruction sync
2425+ instr_sync
2426+ @ restore regs
2427+ ldmia sp!, {r0, r1}
2428+#endif
2429+ .endm
2430+
2431+ .macro pax_close_userland
2432+#ifdef CONFIG_PAX_MEMORY_UDEREF
2433+ @ save regs
2434+ stmdb sp!, {r0, r1}
2435+ @ read DACR from cpu_domain into r1
2436+ mov r0, sp
2437+ @ assume 8K pages, since we have to split the immediate in two
2438+ bic r0, r0, #(0x1fc0)
2439+ bic r0, r0, #(0x3f)
2440+ ldr r1, [r0, #TI_CPU_DOMAIN]
2441+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2442+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2443+ @ write r1 to current_thread_info()->cpu_domain
2444+ str r1, [r0, #TI_CPU_DOMAIN]
2445+ @ write r1 to DACR
2446+ mcr p15, 0, r1, c3, c0, 0
2447+ @ instruction sync
2448+ instr_sync
2449+ @ restore regs
2450+ ldmia sp!, {r0, r1}
2451+#endif
2452+ .endm
2453+
2454 .macro pabt_helper
2455 @ PABORT handler takes pt_regs in r2, fault address in r4 and psr in r5
2456 #ifdef MULTI_PABORT
2457@@ -89,11 +170,15 @@
2458 * Invalid mode handlers
2459 */
2460 .macro inv_entry, reason
2461+
2462+ pax_enter_kernel
2463+
2464 sub sp, sp, #S_FRAME_SIZE
2465 ARM( stmib sp, {r1 - lr} )
2466 THUMB( stmia sp, {r0 - r12} )
2467 THUMB( str sp, [sp, #S_SP] )
2468 THUMB( str lr, [sp, #S_LR] )
2469+
2470 mov r1, #\reason
2471 .endm
2472
2473@@ -149,7 +234,11 @@ ENDPROC(__und_invalid)
2474 .macro svc_entry, stack_hole=0
2475 UNWIND(.fnstart )
2476 UNWIND(.save {r0 - pc} )
2477+
2478+ pax_enter_kernel
2479+
2480 sub sp, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2481+
2482 #ifdef CONFIG_THUMB2_KERNEL
2483 SPFIX( str r0, [sp] ) @ temporarily saved
2484 SPFIX( mov r0, sp )
2485@@ -164,7 +253,12 @@ ENDPROC(__und_invalid)
2486 ldmia r0, {r3 - r5}
2487 add r7, sp, #S_SP - 4 @ here for interlock avoidance
2488 mov r6, #-1 @ "" "" "" ""
2489+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2490+ @ offset sp by 8 as done in pax_enter_kernel
2491+ add r2, sp, #(S_FRAME_SIZE + \stack_hole + 4)
2492+#else
2493 add r2, sp, #(S_FRAME_SIZE + \stack_hole - 4)
2494+#endif
2495 SPFIX( addeq r2, r2, #4 )
2496 str r3, [sp, #-4]! @ save the "real" r0 copied
2497 @ from the exception stack
2498@@ -316,6 +410,9 @@ ENDPROC(__pabt_svc)
2499 .macro usr_entry
2500 UNWIND(.fnstart )
2501 UNWIND(.cantunwind ) @ don't unwind the user space
2502+
2503+ pax_enter_kernel_user
2504+
2505 sub sp, sp, #S_FRAME_SIZE
2506 ARM( stmib sp, {r1 - r12} )
2507 THUMB( stmia sp, {r0 - r12} )
2508@@ -414,7 +511,9 @@ __und_usr:
2509 tst r3, #PSR_T_BIT @ Thumb mode?
2510 bne __und_usr_thumb
2511 sub r4, r2, #4 @ ARM instr at LR - 4
2512+ pax_open_userland
2513 1: ldrt r0, [r4]
2514+ pax_close_userland
2515 #ifdef CONFIG_CPU_ENDIAN_BE8
2516 rev r0, r0 @ little endian instruction
2517 #endif
2518@@ -449,10 +548,14 @@ __und_usr_thumb:
2519 */
2520 .arch armv6t2
2521 #endif
2522+ pax_open_userland
2523 2: ldrht r5, [r4]
2524+ pax_close_userland
2525 cmp r5, #0xe800 @ 32bit instruction if xx != 0
2526 blo __und_usr_fault_16 @ 16bit undefined instruction
2527+ pax_open_userland
2528 3: ldrht r0, [r2]
2529+ pax_close_userland
2530 add r2, r2, #2 @ r2 is PC + 2, make it PC + 4
2531 str r2, [sp, #S_PC] @ it's a 2x16bit instr, update
2532 orr r0, r0, r5, lsl #16
2533@@ -690,7 +793,7 @@ ENTRY(__switch_to)
2534 THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
2535 THUMB( str sp, [ip], #4 )
2536 THUMB( str lr, [ip], #4 )
2537-#ifdef CONFIG_CPU_USE_DOMAINS
2538+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2539 ldr r6, [r2, #TI_CPU_DOMAIN]
2540 #endif
2541 set_tls r3, r4, r5
2542@@ -699,7 +802,7 @@ ENTRY(__switch_to)
2543 ldr r8, =__stack_chk_guard
2544 ldr r7, [r7, #TSK_STACK_CANARY]
2545 #endif
2546-#ifdef CONFIG_CPU_USE_DOMAINS
2547+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2548 mcr p15, 0, r6, c3, c0, 0 @ Set domain register
2549 #endif
2550 mov r5, r0
2551diff --git a/arch/arm/kernel/entry-common.S b/arch/arm/kernel/entry-common.S
2552index bc5bc0a..d0998ca 100644
2553--- a/arch/arm/kernel/entry-common.S
2554+++ b/arch/arm/kernel/entry-common.S
2555@@ -10,18 +10,46 @@
2556
2557 #include <asm/unistd.h>
2558 #include <asm/ftrace.h>
2559+#include <asm/domain.h>
2560 #include <asm/unwind.h>
2561
2562+#include "entry-header.S"
2563+
2564 #ifdef CONFIG_NEED_RET_TO_USER
2565 #include <mach/entry-macro.S>
2566 #else
2567 .macro arch_ret_to_user, tmp1, tmp2
2568+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2569+ @ save regs
2570+ stmdb sp!, {r1, r2}
2571+ @ read DACR from cpu_domain into r1
2572+ mov r2, sp
2573+ @ assume 8K pages, since we have to split the immediate in two
2574+ bic r2, r2, #(0x1fc0)
2575+ bic r2, r2, #(0x3f)
2576+ ldr r1, [r2, #TI_CPU_DOMAIN]
2577+#ifdef CONFIG_PAX_KERNEXEC
2578+ @ set type of DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2579+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2580+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2581+#endif
2582+#ifdef CONFIG_PAX_MEMORY_UDEREF
2583+ @ set current DOMAIN_USER to DOMAIN_UDEREF
2584+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2585+ orr r1, r1, #(domain_val(DOMAIN_USER, DOMAIN_UDEREF))
2586+#endif
2587+ @ write r1 to current_thread_info()->cpu_domain
2588+ str r1, [r2, #TI_CPU_DOMAIN]
2589+ @ write r1 to DACR
2590+ mcr p15, 0, r1, c3, c0, 0
2591+ @ instruction sync
2592+ instr_sync
2593+ @ restore regs
2594+ ldmia sp!, {r1, r2}
2595+#endif
2596 .endm
2597 #endif
2598
2599-#include "entry-header.S"
2600-
2601-
2602 .align 5
2603 /*
2604 * This is the fast syscall return path. We do as little as
2605@@ -350,6 +378,7 @@ ENDPROC(ftrace_stub)
2606
2607 .align 5
2608 ENTRY(vector_swi)
2609+
2610 sub sp, sp, #S_FRAME_SIZE
2611 stmia sp, {r0 - r12} @ Calling r0 - r12
2612 ARM( add r8, sp, #S_PC )
2613@@ -399,6 +428,12 @@ ENTRY(vector_swi)
2614 ldr scno, [lr, #-4] @ get SWI instruction
2615 #endif
2616
2617+ /*
2618+ * do this here to avoid a performance hit of wrapping the code above
2619+ * that directly dereferences userland to parse the SWI instruction
2620+ */
2621+ pax_enter_kernel_user
2622+
2623 #ifdef CONFIG_ALIGNMENT_TRAP
2624 ldr ip, __cr_alignment
2625 ldr ip, [ip]
2626diff --git a/arch/arm/kernel/entry-header.S b/arch/arm/kernel/entry-header.S
2627index 160f337..db67ee4 100644
2628--- a/arch/arm/kernel/entry-header.S
2629+++ b/arch/arm/kernel/entry-header.S
2630@@ -73,6 +73,60 @@
2631 msr cpsr_c, \rtemp @ switch back to the SVC mode
2632 .endm
2633
2634+ .macro pax_enter_kernel_user
2635+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2636+ @ save regs
2637+ stmdb sp!, {r0, r1}
2638+ @ read DACR from cpu_domain into r1
2639+ mov r0, sp
2640+ @ assume 8K pages, since we have to split the immediate in two
2641+ bic r0, r0, #(0x1fc0)
2642+ bic r0, r0, #(0x3f)
2643+ ldr r1, [r0, #TI_CPU_DOMAIN]
2644+#ifdef CONFIG_PAX_MEMORY_UDEREF
2645+ @ set current DOMAIN_USER to DOMAIN_NOACCESS
2646+ bic r1, r1, #(domain_val(DOMAIN_USER, 3))
2647+#endif
2648+#ifdef CONFIG_PAX_KERNEXEC
2649+ @ set current DOMAIN_KERNEL to DOMAIN_KERNELCLIENT
2650+ bic r1, r1, #(domain_val(DOMAIN_KERNEL, 3))
2651+ orr r1, r1, #(domain_val(DOMAIN_KERNEL, DOMAIN_KERNELCLIENT))
2652+#endif
2653+ @ write r1 to current_thread_info()->cpu_domain
2654+ str r1, [r0, #TI_CPU_DOMAIN]
2655+ @ write r1 to DACR
2656+ mcr p15, 0, r1, c3, c0, 0
2657+ @ instruction sync
2658+ instr_sync
2659+ @ restore regs
2660+ ldmia sp!, {r0, r1}
2661+#endif
2662+ .endm
2663+
2664+ .macro pax_exit_kernel
2665+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
2666+ @ save regs
2667+ stmdb sp!, {r0, r1}
2668+ @ read old DACR from stack into r1
2669+ ldr r1, [sp, #(8 + S_SP)]
2670+ sub r1, r1, #8
2671+ ldr r1, [r1]
2672+
2673+ @ write r1 to current_thread_info()->cpu_domain
2674+ mov r0, sp
2675+ @ assume 8K pages, since we have to split the immediate in two
2676+ bic r0, r0, #(0x1fc0)
2677+ bic r0, r0, #(0x3f)
2678+ str r1, [r0, #TI_CPU_DOMAIN]
2679+ @ write r1 to DACR
2680+ mcr p15, 0, r1, c3, c0, 0
2681+ @ instruction sync
2682+ instr_sync
2683+ @ restore regs
2684+ ldmia sp!, {r0, r1}
2685+#endif
2686+ .endm
2687+
2688 #ifndef CONFIG_THUMB2_KERNEL
2689 .macro svc_exit, rpsr, irq = 0
2690 .if \irq != 0
2691@@ -92,6 +146,9 @@
2692 blne trace_hardirqs_off
2693 #endif
2694 .endif
2695+
2696+ pax_exit_kernel
2697+
2698 msr spsr_cxsf, \rpsr
2699 #if defined(CONFIG_CPU_V6)
2700 ldr r0, [sp]
2701@@ -155,6 +212,9 @@
2702 blne trace_hardirqs_off
2703 #endif
2704 .endif
2705+
2706+ pax_exit_kernel
2707+
2708 ldr lr, [sp, #S_SP] @ top of the stack
2709 ldrd r0, r1, [sp, #S_LR] @ calling lr and pc
2710 clrex @ clear the exclusive monitor
2711diff --git a/arch/arm/kernel/fiq.c b/arch/arm/kernel/fiq.c
2712index 2adda11..7fbe958 100644
2713--- a/arch/arm/kernel/fiq.c
2714+++ b/arch/arm/kernel/fiq.c
2715@@ -82,7 +82,9 @@ void set_fiq_handler(void *start, unsigned int length)
2716 #if defined(CONFIG_CPU_USE_DOMAINS)
2717 memcpy((void *)0xffff001c, start, length);
2718 #else
2719+ pax_open_kernel();
2720 memcpy(vectors_page + 0x1c, start, length);
2721+ pax_close_kernel();
2722 #endif
2723 flush_icache_range(0xffff001c, 0xffff001c + length);
2724 if (!vectors_high())
2725diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S
2726index 8bac553..caee108 100644
2727--- a/arch/arm/kernel/head.S
2728+++ b/arch/arm/kernel/head.S
2729@@ -52,7 +52,9 @@
2730 .equ swapper_pg_dir, KERNEL_RAM_VADDR - PG_DIR_SIZE
2731
2732 .macro pgtbl, rd, phys
2733- add \rd, \phys, #TEXT_OFFSET - PG_DIR_SIZE
2734+ mov \rd, #TEXT_OFFSET
2735+ sub \rd, #PG_DIR_SIZE
2736+ add \rd, \rd, \phys
2737 .endm
2738
2739 /*
2740@@ -434,7 +436,7 @@ __enable_mmu:
2741 mov r5, #(domain_val(DOMAIN_USER, DOMAIN_MANAGER) | \
2742 domain_val(DOMAIN_KERNEL, DOMAIN_MANAGER) | \
2743 domain_val(DOMAIN_TABLE, DOMAIN_MANAGER) | \
2744- domain_val(DOMAIN_IO, DOMAIN_CLIENT))
2745+ domain_val(DOMAIN_IO, DOMAIN_KERNELCLIENT))
2746 mcr p15, 0, r5, c3, c0, 0 @ load domain access register
2747 mcr p15, 0, r4, c2, c0, 0 @ load page table pointer
2748 #endif
2749diff --git a/arch/arm/kernel/hw_breakpoint.c b/arch/arm/kernel/hw_breakpoint.c
2750index 1fd749e..47adb08 100644
2751--- a/arch/arm/kernel/hw_breakpoint.c
2752+++ b/arch/arm/kernel/hw_breakpoint.c
2753@@ -1029,7 +1029,7 @@ static int __cpuinit dbg_reset_notify(struct notifier_block *self,
2754 return NOTIFY_OK;
2755 }
2756
2757-static struct notifier_block __cpuinitdata dbg_reset_nb = {
2758+static struct notifier_block dbg_reset_nb = {
2759 .notifier_call = dbg_reset_notify,
2760 };
2761
2762diff --git a/arch/arm/kernel/module.c b/arch/arm/kernel/module.c
2763index 1e9be5d..03edbc2 100644
2764--- a/arch/arm/kernel/module.c
2765+++ b/arch/arm/kernel/module.c
2766@@ -37,12 +37,37 @@
2767 #endif
2768
2769 #ifdef CONFIG_MMU
2770-void *module_alloc(unsigned long size)
2771+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
2772 {
2773+ if (!size || PAGE_ALIGN(size) > MODULES_END - MODULES_VADDR)
2774+ return NULL;
2775 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
2776- GFP_KERNEL, PAGE_KERNEL_EXEC, -1,
2777+ GFP_KERNEL, prot, -1,
2778 __builtin_return_address(0));
2779 }
2780+
2781+void *module_alloc(unsigned long size)
2782+{
2783+
2784+#ifdef CONFIG_PAX_KERNEXEC
2785+ return __module_alloc(size, PAGE_KERNEL);
2786+#else
2787+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2788+#endif
2789+
2790+}
2791+
2792+#ifdef CONFIG_PAX_KERNEXEC
2793+void module_free_exec(struct module *mod, void *module_region)
2794+{
2795+ module_free(mod, module_region);
2796+}
2797+
2798+void *module_alloc_exec(unsigned long size)
2799+{
2800+ return __module_alloc(size, PAGE_KERNEL_EXEC);
2801+}
2802+#endif
2803 #endif
2804
2805 int
2806diff --git a/arch/arm/kernel/patch.c b/arch/arm/kernel/patch.c
2807index 07314af..c46655c 100644
2808--- a/arch/arm/kernel/patch.c
2809+++ b/arch/arm/kernel/patch.c
2810@@ -18,6 +18,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2811 bool thumb2 = IS_ENABLED(CONFIG_THUMB2_KERNEL);
2812 int size;
2813
2814+ pax_open_kernel();
2815 if (thumb2 && __opcode_is_thumb16(insn)) {
2816 *(u16 *)addr = __opcode_to_mem_thumb16(insn);
2817 size = sizeof(u16);
2818@@ -39,6 +40,7 @@ void __kprobes __patch_text(void *addr, unsigned int insn)
2819 *(u32 *)addr = insn;
2820 size = sizeof(u32);
2821 }
2822+ pax_close_kernel();
2823
2824 flush_icache_range((uintptr_t)(addr),
2825 (uintptr_t)(addr) + size);
2826diff --git a/arch/arm/kernel/perf_event_cpu.c b/arch/arm/kernel/perf_event_cpu.c
2827index 1f2740e..b36e225 100644
2828--- a/arch/arm/kernel/perf_event_cpu.c
2829+++ b/arch/arm/kernel/perf_event_cpu.c
2830@@ -171,7 +171,7 @@ static int __cpuinit cpu_pmu_notify(struct notifier_block *b,
2831 return NOTIFY_OK;
2832 }
2833
2834-static struct notifier_block __cpuinitdata cpu_pmu_hotplug_notifier = {
2835+static struct notifier_block cpu_pmu_hotplug_notifier = {
2836 .notifier_call = cpu_pmu_notify,
2837 };
2838
2839diff --git a/arch/arm/kernel/process.c b/arch/arm/kernel/process.c
2840index 6e8931c..82ec6a5 100644
2841--- a/arch/arm/kernel/process.c
2842+++ b/arch/arm/kernel/process.c
2843@@ -28,7 +28,6 @@
2844 #include <linux/tick.h>
2845 #include <linux/utsname.h>
2846 #include <linux/uaccess.h>
2847-#include <linux/random.h>
2848 #include <linux/hw_breakpoint.h>
2849 #include <linux/cpuidle.h>
2850 #include <linux/leds.h>
2851@@ -223,6 +222,7 @@ void machine_power_off(void)
2852
2853 if (pm_power_off)
2854 pm_power_off();
2855+ BUG();
2856 }
2857
2858 /*
2859@@ -236,7 +236,7 @@ void machine_power_off(void)
2860 * executing pre-reset code, and using RAM that the primary CPU's code wishes
2861 * to use. Implementing such co-ordination would be essentially impossible.
2862 */
2863-void machine_restart(char *cmd)
2864+__noreturn void machine_restart(char *cmd)
2865 {
2866 smp_send_stop();
2867
2868@@ -258,8 +258,8 @@ void __show_regs(struct pt_regs *regs)
2869
2870 show_regs_print_info(KERN_DEFAULT);
2871
2872- print_symbol("PC is at %s\n", instruction_pointer(regs));
2873- print_symbol("LR is at %s\n", regs->ARM_lr);
2874+ printk("PC is at %pA\n", instruction_pointer(regs));
2875+ printk("LR is at %pA\n", regs->ARM_lr);
2876 printk("pc : [<%08lx>] lr : [<%08lx>] psr: %08lx\n"
2877 "sp : %08lx ip : %08lx fp : %08lx\n",
2878 regs->ARM_pc, regs->ARM_lr, regs->ARM_cpsr,
2879@@ -426,12 +426,6 @@ unsigned long get_wchan(struct task_struct *p)
2880 return 0;
2881 }
2882
2883-unsigned long arch_randomize_brk(struct mm_struct *mm)
2884-{
2885- unsigned long range_end = mm->brk + 0x02000000;
2886- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
2887-}
2888-
2889 #ifdef CONFIG_MMU
2890 /*
2891 * The vectors page is always readable from user space for the
2892@@ -441,12 +435,12 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
2893 static struct vm_area_struct gate_vma = {
2894 .vm_start = 0xffff0000,
2895 .vm_end = 0xffff0000 + PAGE_SIZE,
2896- .vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC,
2897+ .vm_flags = VM_NONE,
2898 };
2899
2900 static int __init gate_vma_init(void)
2901 {
2902- gate_vma.vm_page_prot = PAGE_READONLY_EXEC;
2903+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
2904 return 0;
2905 }
2906 arch_initcall(gate_vma_init);
2907diff --git a/arch/arm/kernel/psci.c b/arch/arm/kernel/psci.c
2908index 3653164..d83e55d 100644
2909--- a/arch/arm/kernel/psci.c
2910+++ b/arch/arm/kernel/psci.c
2911@@ -24,7 +24,7 @@
2912 #include <asm/opcodes-virt.h>
2913 #include <asm/psci.h>
2914
2915-struct psci_operations psci_ops;
2916+struct psci_operations psci_ops __read_only;
2917
2918 static int (*invoke_psci_fn)(u32, u32, u32, u32);
2919
2920diff --git a/arch/arm/kernel/ptrace.c b/arch/arm/kernel/ptrace.c
2921index 03deeff..741ce88 100644
2922--- a/arch/arm/kernel/ptrace.c
2923+++ b/arch/arm/kernel/ptrace.c
2924@@ -937,10 +937,19 @@ static int tracehook_report_syscall(struct pt_regs *regs,
2925 return current_thread_info()->syscall;
2926 }
2927
2928+#ifdef CONFIG_GRKERNSEC_SETXID
2929+extern void gr_delayed_cred_worker(void);
2930+#endif
2931+
2932 asmlinkage int syscall_trace_enter(struct pt_regs *regs, int scno)
2933 {
2934 current_thread_info()->syscall = scno;
2935
2936+#ifdef CONFIG_GRKERNSEC_SETXID
2937+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
2938+ gr_delayed_cred_worker();
2939+#endif
2940+
2941 /* Do the secure computing check first; failures should be fast. */
2942 if (secure_computing(scno) == -1)
2943 return -1;
2944diff --git a/arch/arm/kernel/setup.c b/arch/arm/kernel/setup.c
2945index b4b1d39..efdc9be 100644
2946--- a/arch/arm/kernel/setup.c
2947+++ b/arch/arm/kernel/setup.c
2948@@ -97,21 +97,23 @@ EXPORT_SYMBOL(system_serial_high);
2949 unsigned int elf_hwcap __read_mostly;
2950 EXPORT_SYMBOL(elf_hwcap);
2951
2952+pteval_t __supported_pte_mask __read_only;
2953+pmdval_t __supported_pmd_mask __read_only;
2954
2955 #ifdef MULTI_CPU
2956-struct processor processor __read_mostly;
2957+struct processor processor;
2958 #endif
2959 #ifdef MULTI_TLB
2960-struct cpu_tlb_fns cpu_tlb __read_mostly;
2961+struct cpu_tlb_fns cpu_tlb __read_only;
2962 #endif
2963 #ifdef MULTI_USER
2964-struct cpu_user_fns cpu_user __read_mostly;
2965+struct cpu_user_fns cpu_user __read_only;
2966 #endif
2967 #ifdef MULTI_CACHE
2968-struct cpu_cache_fns cpu_cache __read_mostly;
2969+struct cpu_cache_fns cpu_cache __read_only;
2970 #endif
2971 #ifdef CONFIG_OUTER_CACHE
2972-struct outer_cache_fns outer_cache __read_mostly;
2973+struct outer_cache_fns outer_cache __read_only;
2974 EXPORT_SYMBOL(outer_cache);
2975 #endif
2976
2977@@ -236,9 +238,13 @@ static int __get_cpu_architecture(void)
2978 asm("mrc p15, 0, %0, c0, c1, 4"
2979 : "=r" (mmfr0));
2980 if ((mmfr0 & 0x0000000f) >= 0x00000003 ||
2981- (mmfr0 & 0x000000f0) >= 0x00000030)
2982+ (mmfr0 & 0x000000f0) >= 0x00000030) {
2983 cpu_arch = CPU_ARCH_ARMv7;
2984- else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2985+ if ((mmfr0 & 0x0000000f) == 0x00000005 || (mmfr0 & 0x0000000f) == 0x00000004) {
2986+ __supported_pte_mask |= L_PTE_PXN;
2987+ __supported_pmd_mask |= PMD_PXNTABLE;
2988+ }
2989+ } else if ((mmfr0 & 0x0000000f) == 0x00000002 ||
2990 (mmfr0 & 0x000000f0) == 0x00000020)
2991 cpu_arch = CPU_ARCH_ARMv6;
2992 else
2993@@ -479,7 +485,7 @@ static void __init setup_processor(void)
2994 __cpu_architecture = __get_cpu_architecture();
2995
2996 #ifdef MULTI_CPU
2997- processor = *list->proc;
2998+ memcpy((void *)&processor, list->proc, sizeof processor);
2999 #endif
3000 #ifdef MULTI_TLB
3001 cpu_tlb = *list->tlb;
3002diff --git a/arch/arm/kernel/signal.c b/arch/arm/kernel/signal.c
3003index 296786b..a8d4dd5 100644
3004--- a/arch/arm/kernel/signal.c
3005+++ b/arch/arm/kernel/signal.c
3006@@ -396,22 +396,14 @@ setup_return(struct pt_regs *regs, struct ksignal *ksig,
3007 __put_user(sigreturn_codes[idx+1], rc+1))
3008 return 1;
3009
3010- if (cpsr & MODE32_BIT) {
3011- /*
3012- * 32-bit code can use the new high-page
3013- * signal return code support.
3014- */
3015- retcode = KERN_SIGRETURN_CODE + (idx << 2) + thumb;
3016- } else {
3017- /*
3018- * Ensure that the instruction cache sees
3019- * the return code written onto the stack.
3020- */
3021- flush_icache_range((unsigned long)rc,
3022- (unsigned long)(rc + 2));
3023+ /*
3024+ * Ensure that the instruction cache sees
3025+ * the return code written onto the stack.
3026+ */
3027+ flush_icache_range((unsigned long)rc,
3028+ (unsigned long)(rc + 2));
3029
3030- retcode = ((unsigned long)rc) + thumb;
3031- }
3032+ retcode = ((unsigned long)rc) + thumb;
3033 }
3034
3035 regs->ARM_r0 = map_sig(ksig->sig);
3036diff --git a/arch/arm/kernel/smp.c b/arch/arm/kernel/smp.c
3037index 5919eb4..b5d6dfe 100644
3038--- a/arch/arm/kernel/smp.c
3039+++ b/arch/arm/kernel/smp.c
3040@@ -70,7 +70,7 @@ enum ipi_msg_type {
3041
3042 static DECLARE_COMPLETION(cpu_running);
3043
3044-static struct smp_operations smp_ops;
3045+static struct smp_operations smp_ops __read_only;
3046
3047 void __init smp_set_ops(struct smp_operations *ops)
3048 {
3049diff --git a/arch/arm/kernel/traps.c b/arch/arm/kernel/traps.c
3050index 18b32e8..b0c8dca 100644
3051--- a/arch/arm/kernel/traps.c
3052+++ b/arch/arm/kernel/traps.c
3053@@ -57,7 +57,7 @@ static void dump_mem(const char *, const char *, unsigned long, unsigned long);
3054 void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
3055 {
3056 #ifdef CONFIG_KALLSYMS
3057- printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
3058+ printk("[<%08lx>] (%pA) from [<%08lx>] (%pA)\n", where, (void *)where, from, (void *)from);
3059 #else
3060 printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
3061 #endif
3062@@ -259,6 +259,8 @@ static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
3063 static int die_owner = -1;
3064 static unsigned int die_nest_count;
3065
3066+extern void gr_handle_kernel_exploit(void);
3067+
3068 static unsigned long oops_begin(void)
3069 {
3070 int cpu;
3071@@ -301,6 +303,9 @@ static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
3072 panic("Fatal exception in interrupt");
3073 if (panic_on_oops)
3074 panic("Fatal exception");
3075+
3076+ gr_handle_kernel_exploit();
3077+
3078 if (signr)
3079 do_exit(signr);
3080 }
3081@@ -594,7 +599,9 @@ asmlinkage int arm_syscall(int no, struct pt_regs *regs)
3082 * The user helper at 0xffff0fe0 must be used instead.
3083 * (see entry-armv.S for details)
3084 */
3085+ pax_open_kernel();
3086 *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
3087+ pax_close_kernel();
3088 }
3089 return 0;
3090
3091@@ -834,13 +841,10 @@ void __init early_trap_init(void *vectors_base)
3092 */
3093 kuser_get_tls_init(vectors);
3094
3095- /*
3096- * Copy signal return handlers into the vector page, and
3097- * set sigreturn to be a pointer to these.
3098- */
3099- memcpy((void *)(vectors + KERN_SIGRETURN_CODE - CONFIG_VECTORS_BASE),
3100- sigreturn_codes, sizeof(sigreturn_codes));
3101-
3102 flush_icache_range(vectors, vectors + PAGE_SIZE);
3103- modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
3104+
3105+#ifndef CONFIG_PAX_MEMORY_UDEREF
3106+ modify_domain(DOMAIN_USER, DOMAIN_USERCLIENT);
3107+#endif
3108+
3109 }
3110diff --git a/arch/arm/kernel/vmlinux.lds.S b/arch/arm/kernel/vmlinux.lds.S
3111index a871b8e..123b00a 100644
3112--- a/arch/arm/kernel/vmlinux.lds.S
3113+++ b/arch/arm/kernel/vmlinux.lds.S
3114@@ -8,7 +8,11 @@
3115 #include <asm/thread_info.h>
3116 #include <asm/memory.h>
3117 #include <asm/page.h>
3118-
3119+
3120+#ifdef CONFIG_PAX_KERNEXEC
3121+#include <asm/pgtable.h>
3122+#endif
3123+
3124 #define PROC_INFO \
3125 . = ALIGN(4); \
3126 VMLINUX_SYMBOL(__proc_info_begin) = .; \
3127@@ -94,6 +98,11 @@ SECTIONS
3128 _text = .;
3129 HEAD_TEXT
3130 }
3131+
3132+#ifdef CONFIG_PAX_KERNEXEC
3133+ . = ALIGN(1<<SECTION_SHIFT);
3134+#endif
3135+
3136 .text : { /* Real text segment */
3137 _stext = .; /* Text and read-only data */
3138 __exception_text_start = .;
3139@@ -116,6 +125,8 @@ SECTIONS
3140 ARM_CPU_KEEP(PROC_INFO)
3141 }
3142
3143+ _etext = .; /* End of text section */
3144+
3145 RO_DATA(PAGE_SIZE)
3146
3147 . = ALIGN(4);
3148@@ -146,7 +157,9 @@ SECTIONS
3149
3150 NOTES
3151
3152- _etext = .; /* End of text and rodata section */
3153+#ifdef CONFIG_PAX_KERNEXEC
3154+ . = ALIGN(1<<SECTION_SHIFT);
3155+#endif
3156
3157 #ifndef CONFIG_XIP_KERNEL
3158 . = ALIGN(PAGE_SIZE);
3159@@ -207,6 +220,11 @@ SECTIONS
3160 . = PAGE_OFFSET + TEXT_OFFSET;
3161 #else
3162 __init_end = .;
3163+
3164+#ifdef CONFIG_PAX_KERNEXEC
3165+ . = ALIGN(1<<SECTION_SHIFT);
3166+#endif
3167+
3168 . = ALIGN(THREAD_SIZE);
3169 __data_loc = .;
3170 #endif
3171diff --git a/arch/arm/lib/clear_user.S b/arch/arm/lib/clear_user.S
3172index 14a0d98..7771a7d 100644
3173--- a/arch/arm/lib/clear_user.S
3174+++ b/arch/arm/lib/clear_user.S
3175@@ -12,14 +12,14 @@
3176
3177 .text
3178
3179-/* Prototype: int __clear_user(void *addr, size_t sz)
3180+/* Prototype: int ___clear_user(void *addr, size_t sz)
3181 * Purpose : clear some user memory
3182 * Params : addr - user memory address to clear
3183 * : sz - number of bytes to clear
3184 * Returns : number of bytes NOT cleared
3185 */
3186 ENTRY(__clear_user_std)
3187-WEAK(__clear_user)
3188+WEAK(___clear_user)
3189 stmfd sp!, {r1, lr}
3190 mov r2, #0
3191 cmp r1, #4
3192@@ -44,7 +44,7 @@ WEAK(__clear_user)
3193 USER( strnebt r2, [r0])
3194 mov r0, #0
3195 ldmfd sp!, {r1, pc}
3196-ENDPROC(__clear_user)
3197+ENDPROC(___clear_user)
3198 ENDPROC(__clear_user_std)
3199
3200 .pushsection .fixup,"ax"
3201diff --git a/arch/arm/lib/copy_from_user.S b/arch/arm/lib/copy_from_user.S
3202index 66a477a..bee61d3 100644
3203--- a/arch/arm/lib/copy_from_user.S
3204+++ b/arch/arm/lib/copy_from_user.S
3205@@ -16,7 +16,7 @@
3206 /*
3207 * Prototype:
3208 *
3209- * size_t __copy_from_user(void *to, const void *from, size_t n)
3210+ * size_t ___copy_from_user(void *to, const void *from, size_t n)
3211 *
3212 * Purpose:
3213 *
3214@@ -84,11 +84,11 @@
3215
3216 .text
3217
3218-ENTRY(__copy_from_user)
3219+ENTRY(___copy_from_user)
3220
3221 #include "copy_template.S"
3222
3223-ENDPROC(__copy_from_user)
3224+ENDPROC(___copy_from_user)
3225
3226 .pushsection .fixup,"ax"
3227 .align 0
3228diff --git a/arch/arm/lib/copy_page.S b/arch/arm/lib/copy_page.S
3229index 6ee2f67..d1cce76 100644
3230--- a/arch/arm/lib/copy_page.S
3231+++ b/arch/arm/lib/copy_page.S
3232@@ -10,6 +10,7 @@
3233 * ASM optimised string functions
3234 */
3235 #include <linux/linkage.h>
3236+#include <linux/const.h>
3237 #include <asm/assembler.h>
3238 #include <asm/asm-offsets.h>
3239 #include <asm/cache.h>
3240diff --git a/arch/arm/lib/copy_to_user.S b/arch/arm/lib/copy_to_user.S
3241index d066df6..df28194 100644
3242--- a/arch/arm/lib/copy_to_user.S
3243+++ b/arch/arm/lib/copy_to_user.S
3244@@ -16,7 +16,7 @@
3245 /*
3246 * Prototype:
3247 *
3248- * size_t __copy_to_user(void *to, const void *from, size_t n)
3249+ * size_t ___copy_to_user(void *to, const void *from, size_t n)
3250 *
3251 * Purpose:
3252 *
3253@@ -88,11 +88,11 @@
3254 .text
3255
3256 ENTRY(__copy_to_user_std)
3257-WEAK(__copy_to_user)
3258+WEAK(___copy_to_user)
3259
3260 #include "copy_template.S"
3261
3262-ENDPROC(__copy_to_user)
3263+ENDPROC(___copy_to_user)
3264 ENDPROC(__copy_to_user_std)
3265
3266 .pushsection .fixup,"ax"
3267diff --git a/arch/arm/lib/csumpartialcopyuser.S b/arch/arm/lib/csumpartialcopyuser.S
3268index 7d08b43..f7ca7ea 100644
3269--- a/arch/arm/lib/csumpartialcopyuser.S
3270+++ b/arch/arm/lib/csumpartialcopyuser.S
3271@@ -57,8 +57,8 @@
3272 * Returns : r0 = checksum, [[sp, #0], #0] = 0 or -EFAULT
3273 */
3274
3275-#define FN_ENTRY ENTRY(csum_partial_copy_from_user)
3276-#define FN_EXIT ENDPROC(csum_partial_copy_from_user)
3277+#define FN_ENTRY ENTRY(__csum_partial_copy_from_user)
3278+#define FN_EXIT ENDPROC(__csum_partial_copy_from_user)
3279
3280 #include "csumpartialcopygeneric.S"
3281
3282diff --git a/arch/arm/lib/delay.c b/arch/arm/lib/delay.c
3283index 64dbfa5..84a3fd9 100644
3284--- a/arch/arm/lib/delay.c
3285+++ b/arch/arm/lib/delay.c
3286@@ -28,7 +28,7 @@
3287 /*
3288 * Default to the loop-based delay implementation.
3289 */
3290-struct arm_delay_ops arm_delay_ops = {
3291+struct arm_delay_ops arm_delay_ops __read_only = {
3292 .delay = __loop_delay,
3293 .const_udelay = __loop_const_udelay,
3294 .udelay = __loop_udelay,
3295diff --git a/arch/arm/lib/uaccess_with_memcpy.c b/arch/arm/lib/uaccess_with_memcpy.c
3296index 025f742..8432b08 100644
3297--- a/arch/arm/lib/uaccess_with_memcpy.c
3298+++ b/arch/arm/lib/uaccess_with_memcpy.c
3299@@ -104,7 +104,7 @@ out:
3300 }
3301
3302 unsigned long
3303-__copy_to_user(void __user *to, const void *from, unsigned long n)
3304+___copy_to_user(void __user *to, const void *from, unsigned long n)
3305 {
3306 /*
3307 * This test is stubbed out of the main function above to keep
3308diff --git a/arch/arm/mach-kirkwood/common.c b/arch/arm/mach-kirkwood/common.c
3309index f389228..592ef66 100644
3310--- a/arch/arm/mach-kirkwood/common.c
3311+++ b/arch/arm/mach-kirkwood/common.c
3312@@ -149,7 +149,16 @@ static void clk_gate_fn_disable(struct clk_hw *hw)
3313 clk_gate_ops.disable(hw);
3314 }
3315
3316-static struct clk_ops clk_gate_fn_ops;
3317+static int clk_gate_fn_is_enabled(struct clk_hw *hw)
3318+{
3319+ return clk_gate_ops.is_enabled(hw);
3320+}
3321+
3322+static struct clk_ops clk_gate_fn_ops = {
3323+ .enable = clk_gate_fn_enable,
3324+ .disable = clk_gate_fn_disable,
3325+ .is_enabled = clk_gate_fn_is_enabled,
3326+};
3327
3328 static struct clk __init *clk_register_gate_fn(struct device *dev,
3329 const char *name,
3330@@ -183,14 +192,6 @@ static struct clk __init *clk_register_gate_fn(struct device *dev,
3331 gate_fn->fn_en = fn_en;
3332 gate_fn->fn_dis = fn_dis;
3333
3334- /* ops is the gate ops, but with our enable/disable functions */
3335- if (clk_gate_fn_ops.enable != clk_gate_fn_enable ||
3336- clk_gate_fn_ops.disable != clk_gate_fn_disable) {
3337- clk_gate_fn_ops = clk_gate_ops;
3338- clk_gate_fn_ops.enable = clk_gate_fn_enable;
3339- clk_gate_fn_ops.disable = clk_gate_fn_disable;
3340- }
3341-
3342 clk = clk_register(dev, &gate_fn->gate.hw);
3343
3344 if (IS_ERR(clk))
3345diff --git a/arch/arm/mach-omap2/board-n8x0.c b/arch/arm/mach-omap2/board-n8x0.c
3346index f6eeb87..cc90868 100644
3347--- a/arch/arm/mach-omap2/board-n8x0.c
3348+++ b/arch/arm/mach-omap2/board-n8x0.c
3349@@ -631,7 +631,7 @@ static int n8x0_menelaus_late_init(struct device *dev)
3350 }
3351 #endif
3352
3353-static struct menelaus_platform_data n8x0_menelaus_platform_data __initdata = {
3354+static struct menelaus_platform_data n8x0_menelaus_platform_data __initconst = {
3355 .late_init = n8x0_menelaus_late_init,
3356 };
3357
3358diff --git a/arch/arm/mach-omap2/gpmc.c b/arch/arm/mach-omap2/gpmc.c
3359index 6c4da12..d9ca72d 100644
3360--- a/arch/arm/mach-omap2/gpmc.c
3361+++ b/arch/arm/mach-omap2/gpmc.c
3362@@ -147,7 +147,6 @@ struct omap3_gpmc_regs {
3363 };
3364
3365 static struct gpmc_client_irq gpmc_client_irq[GPMC_NR_IRQ];
3366-static struct irq_chip gpmc_irq_chip;
3367 static unsigned gpmc_irq_start;
3368
3369 static struct resource gpmc_mem_root;
3370@@ -711,6 +710,18 @@ static void gpmc_irq_noop(struct irq_data *data) { }
3371
3372 static unsigned int gpmc_irq_noop_ret(struct irq_data *data) { return 0; }
3373
3374+static struct irq_chip gpmc_irq_chip = {
3375+ .name = "gpmc",
3376+ .irq_startup = gpmc_irq_noop_ret,
3377+ .irq_enable = gpmc_irq_enable,
3378+ .irq_disable = gpmc_irq_disable,
3379+ .irq_shutdown = gpmc_irq_noop,
3380+ .irq_ack = gpmc_irq_noop,
3381+ .irq_mask = gpmc_irq_noop,
3382+ .irq_unmask = gpmc_irq_noop,
3383+
3384+};
3385+
3386 static int gpmc_setup_irq(void)
3387 {
3388 int i;
3389@@ -725,15 +736,6 @@ static int gpmc_setup_irq(void)
3390 return gpmc_irq_start;
3391 }
3392
3393- gpmc_irq_chip.name = "gpmc";
3394- gpmc_irq_chip.irq_startup = gpmc_irq_noop_ret;
3395- gpmc_irq_chip.irq_enable = gpmc_irq_enable;
3396- gpmc_irq_chip.irq_disable = gpmc_irq_disable;
3397- gpmc_irq_chip.irq_shutdown = gpmc_irq_noop;
3398- gpmc_irq_chip.irq_ack = gpmc_irq_noop;
3399- gpmc_irq_chip.irq_mask = gpmc_irq_noop;
3400- gpmc_irq_chip.irq_unmask = gpmc_irq_noop;
3401-
3402 gpmc_client_irq[0].bitmask = GPMC_IRQ_FIFOEVENTENABLE;
3403 gpmc_client_irq[1].bitmask = GPMC_IRQ_COUNT_EVENT;
3404
3405diff --git a/arch/arm/mach-omap2/omap-wakeupgen.c b/arch/arm/mach-omap2/omap-wakeupgen.c
3406index f8bb3b9..831e7b8 100644
3407--- a/arch/arm/mach-omap2/omap-wakeupgen.c
3408+++ b/arch/arm/mach-omap2/omap-wakeupgen.c
3409@@ -339,7 +339,7 @@ static int __cpuinit irq_cpu_hotplug_notify(struct notifier_block *self,
3410 return NOTIFY_OK;
3411 }
3412
3413-static struct notifier_block __refdata irq_hotplug_notifier = {
3414+static struct notifier_block irq_hotplug_notifier = {
3415 .notifier_call = irq_cpu_hotplug_notify,
3416 };
3417
3418diff --git a/arch/arm/mach-omap2/omap_device.c b/arch/arm/mach-omap2/omap_device.c
3419index e6d2307..d057195 100644
3420--- a/arch/arm/mach-omap2/omap_device.c
3421+++ b/arch/arm/mach-omap2/omap_device.c
3422@@ -499,7 +499,7 @@ void omap_device_delete(struct omap_device *od)
3423 struct platform_device __init *omap_device_build(const char *pdev_name,
3424 int pdev_id,
3425 struct omap_hwmod *oh,
3426- void *pdata, int pdata_len)
3427+ const void *pdata, int pdata_len)
3428 {
3429 struct omap_hwmod *ohs[] = { oh };
3430
3431@@ -527,7 +527,7 @@ struct platform_device __init *omap_device_build(const char *pdev_name,
3432 struct platform_device __init *omap_device_build_ss(const char *pdev_name,
3433 int pdev_id,
3434 struct omap_hwmod **ohs,
3435- int oh_cnt, void *pdata,
3436+ int oh_cnt, const void *pdata,
3437 int pdata_len)
3438 {
3439 int ret = -ENOMEM;
3440diff --git a/arch/arm/mach-omap2/omap_device.h b/arch/arm/mach-omap2/omap_device.h
3441index 044c31d..2ee0861 100644
3442--- a/arch/arm/mach-omap2/omap_device.h
3443+++ b/arch/arm/mach-omap2/omap_device.h
3444@@ -72,12 +72,12 @@ int omap_device_idle(struct platform_device *pdev);
3445 /* Core code interface */
3446
3447 struct platform_device *omap_device_build(const char *pdev_name, int pdev_id,
3448- struct omap_hwmod *oh, void *pdata,
3449+ struct omap_hwmod *oh, const void *pdata,
3450 int pdata_len);
3451
3452 struct platform_device *omap_device_build_ss(const char *pdev_name, int pdev_id,
3453 struct omap_hwmod **oh, int oh_cnt,
3454- void *pdata, int pdata_len);
3455+ const void *pdata, int pdata_len);
3456
3457 struct omap_device *omap_device_alloc(struct platform_device *pdev,
3458 struct omap_hwmod **ohs, int oh_cnt);
3459diff --git a/arch/arm/mach-omap2/omap_hwmod.c b/arch/arm/mach-omap2/omap_hwmod.c
3460index 7341eff..fd75e34 100644
3461--- a/arch/arm/mach-omap2/omap_hwmod.c
3462+++ b/arch/arm/mach-omap2/omap_hwmod.c
3463@@ -194,10 +194,10 @@ struct omap_hwmod_soc_ops {
3464 int (*init_clkdm)(struct omap_hwmod *oh);
3465 void (*update_context_lost)(struct omap_hwmod *oh);
3466 int (*get_context_lost)(struct omap_hwmod *oh);
3467-};
3468+} __no_const;
3469
3470 /* soc_ops: adapts the omap_hwmod code to the currently-booted SoC */
3471-static struct omap_hwmod_soc_ops soc_ops;
3472+static struct omap_hwmod_soc_ops soc_ops __read_only;
3473
3474 /* omap_hwmod_list contains all registered struct omap_hwmods */
3475 static LIST_HEAD(omap_hwmod_list);
3476diff --git a/arch/arm/mach-omap2/wd_timer.c b/arch/arm/mach-omap2/wd_timer.c
3477index d15c7bb..b2d1f0c 100644
3478--- a/arch/arm/mach-omap2/wd_timer.c
3479+++ b/arch/arm/mach-omap2/wd_timer.c
3480@@ -110,7 +110,9 @@ static int __init omap_init_wdt(void)
3481 struct omap_hwmod *oh;
3482 char *oh_name = "wd_timer2";
3483 char *dev_name = "omap_wdt";
3484- struct omap_wd_timer_platform_data pdata;
3485+ static struct omap_wd_timer_platform_data pdata = {
3486+ .read_reset_sources = prm_read_reset_sources
3487+ };
3488
3489 if (!cpu_class_is_omap2() || of_have_populated_dt())
3490 return 0;
3491@@ -121,8 +123,6 @@ static int __init omap_init_wdt(void)
3492 return -EINVAL;
3493 }
3494
3495- pdata.read_reset_sources = prm_read_reset_sources;
3496-
3497 pdev = omap_device_build(dev_name, id, oh, &pdata,
3498 sizeof(struct omap_wd_timer_platform_data));
3499 WARN(IS_ERR(pdev), "Can't build omap_device for %s:%s.\n",
3500diff --git a/arch/arm/mach-tegra/cpuidle-tegra20.c b/arch/arm/mach-tegra/cpuidle-tegra20.c
3501index 0cdba8d..297993e 100644
3502--- a/arch/arm/mach-tegra/cpuidle-tegra20.c
3503+++ b/arch/arm/mach-tegra/cpuidle-tegra20.c
3504@@ -181,7 +181,7 @@ static int tegra20_idle_lp2_coupled(struct cpuidle_device *dev,
3505 bool entered_lp2 = false;
3506
3507 if (tegra_pending_sgi())
3508- ACCESS_ONCE(abort_flag) = true;
3509+ ACCESS_ONCE_RW(abort_flag) = true;
3510
3511 cpuidle_coupled_parallel_barrier(dev, &abort_barrier);
3512
3513diff --git a/arch/arm/mach-ux500/setup.h b/arch/arm/mach-ux500/setup.h
3514index cad3ca86..1d79e0f 100644
3515--- a/arch/arm/mach-ux500/setup.h
3516+++ b/arch/arm/mach-ux500/setup.h
3517@@ -37,13 +37,6 @@ extern void ux500_timer_init(void);
3518 .type = MT_DEVICE, \
3519 }
3520
3521-#define __MEM_DEV_DESC(x, sz) { \
3522- .virtual = IO_ADDRESS(x), \
3523- .pfn = __phys_to_pfn(x), \
3524- .length = sz, \
3525- .type = MT_MEMORY, \
3526-}
3527-
3528 extern struct smp_operations ux500_smp_ops;
3529 extern void ux500_cpu_die(unsigned int cpu);
3530
3531diff --git a/arch/arm/mm/Kconfig b/arch/arm/mm/Kconfig
3532index 35955b5..b475042 100644
3533--- a/arch/arm/mm/Kconfig
3534+++ b/arch/arm/mm/Kconfig
3535@@ -432,7 +432,7 @@ config CPU_32v5
3536
3537 config CPU_32v6
3538 bool
3539- select CPU_USE_DOMAINS if CPU_V6 && MMU
3540+ select CPU_USE_DOMAINS if CPU_V6 && MMU && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
3541 select TLS_REG_EMUL if !CPU_32v6K && !MMU
3542
3543 config CPU_32v6K
3544@@ -581,6 +581,7 @@ config CPU_CP15_MPU
3545
3546 config CPU_USE_DOMAINS
3547 bool
3548+ depends on !ARM_LPAE && !PAX_KERNEXEC
3549 help
3550 This option enables or disables the use of domain switching
3551 via the set_fs() function.
3552diff --git a/arch/arm/mm/alignment.c b/arch/arm/mm/alignment.c
3553index 6f4585b..7b6f52b 100644
3554--- a/arch/arm/mm/alignment.c
3555+++ b/arch/arm/mm/alignment.c
3556@@ -211,10 +211,12 @@ union offset_union {
3557 #define __get16_unaligned_check(ins,val,addr) \
3558 do { \
3559 unsigned int err = 0, v, a = addr; \
3560+ pax_open_userland(); \
3561 __get8_unaligned_check(ins,v,a,err); \
3562 val = v << ((BE) ? 8 : 0); \
3563 __get8_unaligned_check(ins,v,a,err); \
3564 val |= v << ((BE) ? 0 : 8); \
3565+ pax_close_userland(); \
3566 if (err) \
3567 goto fault; \
3568 } while (0)
3569@@ -228,6 +230,7 @@ union offset_union {
3570 #define __get32_unaligned_check(ins,val,addr) \
3571 do { \
3572 unsigned int err = 0, v, a = addr; \
3573+ pax_open_userland(); \
3574 __get8_unaligned_check(ins,v,a,err); \
3575 val = v << ((BE) ? 24 : 0); \
3576 __get8_unaligned_check(ins,v,a,err); \
3577@@ -236,6 +239,7 @@ union offset_union {
3578 val |= v << ((BE) ? 8 : 16); \
3579 __get8_unaligned_check(ins,v,a,err); \
3580 val |= v << ((BE) ? 0 : 24); \
3581+ pax_close_userland(); \
3582 if (err) \
3583 goto fault; \
3584 } while (0)
3585@@ -249,6 +253,7 @@ union offset_union {
3586 #define __put16_unaligned_check(ins,val,addr) \
3587 do { \
3588 unsigned int err = 0, v = val, a = addr; \
3589+ pax_open_userland(); \
3590 __asm__( FIRST_BYTE_16 \
3591 ARM( "1: "ins" %1, [%2], #1\n" ) \
3592 THUMB( "1: "ins" %1, [%2]\n" ) \
3593@@ -268,6 +273,7 @@ union offset_union {
3594 " .popsection\n" \
3595 : "=r" (err), "=&r" (v), "=&r" (a) \
3596 : "0" (err), "1" (v), "2" (a)); \
3597+ pax_close_userland(); \
3598 if (err) \
3599 goto fault; \
3600 } while (0)
3601@@ -281,6 +287,7 @@ union offset_union {
3602 #define __put32_unaligned_check(ins,val,addr) \
3603 do { \
3604 unsigned int err = 0, v = val, a = addr; \
3605+ pax_open_userland(); \
3606 __asm__( FIRST_BYTE_32 \
3607 ARM( "1: "ins" %1, [%2], #1\n" ) \
3608 THUMB( "1: "ins" %1, [%2]\n" ) \
3609@@ -310,6 +317,7 @@ union offset_union {
3610 " .popsection\n" \
3611 : "=r" (err), "=&r" (v), "=&r" (a) \
3612 : "0" (err), "1" (v), "2" (a)); \
3613+ pax_close_userland(); \
3614 if (err) \
3615 goto fault; \
3616 } while (0)
3617diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
3618index 5dbf13f..1a60561 100644
3619--- a/arch/arm/mm/fault.c
3620+++ b/arch/arm/mm/fault.c
3621@@ -25,6 +25,7 @@
3622 #include <asm/system_misc.h>
3623 #include <asm/system_info.h>
3624 #include <asm/tlbflush.h>
3625+#include <asm/sections.h>
3626
3627 #include "fault.h"
3628
3629@@ -138,6 +139,20 @@ __do_kernel_fault(struct mm_struct *mm, unsigned long addr, unsigned int fsr,
3630 if (fixup_exception(regs))
3631 return;
3632
3633+#ifdef CONFIG_PAX_KERNEXEC
3634+ if ((fsr & FSR_WRITE) &&
3635+ (((unsigned long)_stext <= addr && addr < init_mm.end_code) ||
3636+ (MODULES_VADDR <= addr && addr < MODULES_END)))
3637+ {
3638+ if (current->signal->curr_ip)
3639+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3640+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3641+ else
3642+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
3643+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
3644+ }
3645+#endif
3646+
3647 /*
3648 * No handler, we'll have to terminate things with extreme prejudice.
3649 */
3650@@ -174,6 +189,13 @@ __do_user_fault(struct task_struct *tsk, unsigned long addr,
3651 }
3652 #endif
3653
3654+#ifdef CONFIG_PAX_PAGEEXEC
3655+ if (fsr & FSR_LNX_PF) {
3656+ pax_report_fault(regs, (void *)regs->ARM_pc, (void *)regs->ARM_sp);
3657+ do_group_exit(SIGKILL);
3658+ }
3659+#endif
3660+
3661 tsk->thread.address = addr;
3662 tsk->thread.error_code = fsr;
3663 tsk->thread.trap_no = 14;
3664@@ -398,6 +420,33 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3665 }
3666 #endif /* CONFIG_MMU */
3667
3668+#ifdef CONFIG_PAX_PAGEEXEC
3669+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
3670+{
3671+ long i;
3672+
3673+ printk(KERN_ERR "PAX: bytes at PC: ");
3674+ for (i = 0; i < 20; i++) {
3675+ unsigned char c;
3676+ if (get_user(c, (__force unsigned char __user *)pc+i))
3677+ printk(KERN_CONT "?? ");
3678+ else
3679+ printk(KERN_CONT "%02x ", c);
3680+ }
3681+ printk("\n");
3682+
3683+ printk(KERN_ERR "PAX: bytes at SP-4: ");
3684+ for (i = -1; i < 20; i++) {
3685+ unsigned long c;
3686+ if (get_user(c, (__force unsigned long __user *)sp+i))
3687+ printk(KERN_CONT "???????? ");
3688+ else
3689+ printk(KERN_CONT "%08lx ", c);
3690+ }
3691+ printk("\n");
3692+}
3693+#endif
3694+
3695 /*
3696 * First Level Translation Fault Handler
3697 *
3698@@ -543,9 +592,22 @@ do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
3699 const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
3700 struct siginfo info;
3701
3702+#ifdef CONFIG_PAX_MEMORY_UDEREF
3703+ if (addr < TASK_SIZE && is_domain_fault(fsr)) {
3704+ if (current->signal->curr_ip)
3705+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3706+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3707+ else
3708+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to access userland memory at %08lx\n", current->comm, task_pid_nr(current),
3709+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()), addr);
3710+ goto die;
3711+ }
3712+#endif
3713+
3714 if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
3715 return;
3716
3717+die:
3718 printk(KERN_ALERT "Unhandled fault: %s (0x%03x) at 0x%08lx\n",
3719 inf->name, fsr, addr);
3720
3721@@ -575,9 +637,49 @@ do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
3722 const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
3723 struct siginfo info;
3724
3725+ if (user_mode(regs)) {
3726+ if (addr == 0xffff0fe0UL) {
3727+ /*
3728+ * PaX: __kuser_get_tls emulation
3729+ */
3730+ regs->ARM_r0 = current_thread_info()->tp_value;
3731+ regs->ARM_pc = regs->ARM_lr;
3732+ return;
3733+ }
3734+ }
3735+
3736+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3737+ else if (is_domain_fault(ifsr) || is_xn_fault(ifsr)) {
3738+ if (current->signal->curr_ip)
3739+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", &current->signal->curr_ip, current->comm, task_pid_nr(current),
3740+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3741+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3742+ else
3743+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to execute %s memory at %08lx\n", current->comm, task_pid_nr(current),
3744+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()),
3745+ addr >= TASK_SIZE ? "non-executable kernel" : "userland", addr);
3746+ goto die;
3747+ }
3748+#endif
3749+
3750+#ifdef CONFIG_PAX_REFCOUNT
3751+ if (fsr_fs(ifsr) == FAULT_CODE_DEBUG) {
3752+ unsigned int bkpt;
3753+
3754+ if (!probe_kernel_address((unsigned int *)addr, bkpt) && bkpt == 0xe12f1073) {
3755+ current->thread.error_code = ifsr;
3756+ current->thread.trap_no = 0;
3757+ pax_report_refcount_overflow(regs);
3758+ fixup_exception(regs);
3759+ return;
3760+ }
3761+ }
3762+#endif
3763+
3764 if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
3765 return;
3766
3767+die:
3768 printk(KERN_ALERT "Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
3769 inf->name, ifsr, addr);
3770
3771diff --git a/arch/arm/mm/fault.h b/arch/arm/mm/fault.h
3772index cf08bdf..772656c 100644
3773--- a/arch/arm/mm/fault.h
3774+++ b/arch/arm/mm/fault.h
3775@@ -3,6 +3,7 @@
3776
3777 /*
3778 * Fault status register encodings. We steal bit 31 for our own purposes.
3779+ * Set when the FSR value is from an instruction fault.
3780 */
3781 #define FSR_LNX_PF (1 << 31)
3782 #define FSR_WRITE (1 << 11)
3783@@ -22,6 +23,17 @@ static inline int fsr_fs(unsigned int fsr)
3784 }
3785 #endif
3786
3787+/* valid for LPAE and !LPAE */
3788+static inline int is_xn_fault(unsigned int fsr)
3789+{
3790+ return ((fsr_fs(fsr) & 0x3c) == 0xc);
3791+}
3792+
3793+static inline int is_domain_fault(unsigned int fsr)
3794+{
3795+ return ((fsr_fs(fsr) & 0xD) == 0x9);
3796+}
3797+
3798 void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs);
3799 unsigned long search_exception_table(unsigned long addr);
3800
3801diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
3802index 0ecc43f..190b956 100644
3803--- a/arch/arm/mm/init.c
3804+++ b/arch/arm/mm/init.c
3805@@ -30,6 +30,8 @@
3806 #include <asm/setup.h>
3807 #include <asm/tlb.h>
3808 #include <asm/fixmap.h>
3809+#include <asm/system_info.h>
3810+#include <asm/cp15.h>
3811
3812 #include <asm/mach/arch.h>
3813 #include <asm/mach/map.h>
3814@@ -726,7 +728,46 @@ void free_initmem(void)
3815 {
3816 #ifdef CONFIG_HAVE_TCM
3817 extern char __tcm_start, __tcm_end;
3818+#endif
3819
3820+#ifdef CONFIG_PAX_KERNEXEC
3821+ unsigned long addr;
3822+ pgd_t *pgd;
3823+ pud_t *pud;
3824+ pmd_t *pmd;
3825+ int cpu_arch = cpu_architecture();
3826+ unsigned int cr = get_cr();
3827+
3828+ if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
3829+ /* make pages tables, etc before .text NX */
3830+ for (addr = PAGE_OFFSET; addr < (unsigned long)_stext; addr += SECTION_SIZE) {
3831+ pgd = pgd_offset_k(addr);
3832+ pud = pud_offset(pgd, addr);
3833+ pmd = pmd_offset(pud, addr);
3834+ __section_update(pmd, addr, PMD_SECT_XN);
3835+ }
3836+ /* make init NX */
3837+ for (addr = (unsigned long)__init_begin; addr < (unsigned long)_sdata; addr += SECTION_SIZE) {
3838+ pgd = pgd_offset_k(addr);
3839+ pud = pud_offset(pgd, addr);
3840+ pmd = pmd_offset(pud, addr);
3841+ __section_update(pmd, addr, PMD_SECT_XN);
3842+ }
3843+ /* make kernel code/rodata RX */
3844+ for (addr = (unsigned long)_stext; addr < (unsigned long)__init_begin; addr += SECTION_SIZE) {
3845+ pgd = pgd_offset_k(addr);
3846+ pud = pud_offset(pgd, addr);
3847+ pmd = pmd_offset(pud, addr);
3848+#ifdef CONFIG_ARM_LPAE
3849+ __section_update(pmd, addr, PMD_SECT_RDONLY);
3850+#else
3851+ __section_update(pmd, addr, PMD_SECT_APX|PMD_SECT_AP_WRITE);
3852+#endif
3853+ }
3854+ }
3855+#endif
3856+
3857+#ifdef CONFIG_HAVE_TCM
3858 poison_init_mem(&__tcm_start, &__tcm_end - &__tcm_start);
3859 free_reserved_area(&__tcm_start, &__tcm_end, 0, "TCM link");
3860 #endif
3861diff --git a/arch/arm/mm/ioremap.c b/arch/arm/mm/ioremap.c
3862index 04d9006..c547d85 100644
3863--- a/arch/arm/mm/ioremap.c
3864+++ b/arch/arm/mm/ioremap.c
3865@@ -392,9 +392,9 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
3866 unsigned int mtype;
3867
3868 if (cached)
3869- mtype = MT_MEMORY;
3870+ mtype = MT_MEMORY_RX;
3871 else
3872- mtype = MT_MEMORY_NONCACHED;
3873+ mtype = MT_MEMORY_NONCACHED_RX;
3874
3875 return __arm_ioremap_caller(phys_addr, size, mtype,
3876 __builtin_return_address(0));
3877diff --git a/arch/arm/mm/mmap.c b/arch/arm/mm/mmap.c
3878index 10062ce..8695745 100644
3879--- a/arch/arm/mm/mmap.c
3880+++ b/arch/arm/mm/mmap.c
3881@@ -59,6 +59,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3882 struct vm_area_struct *vma;
3883 int do_align = 0;
3884 int aliasing = cache_is_vipt_aliasing();
3885+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3886 struct vm_unmapped_area_info info;
3887
3888 /*
3889@@ -81,6 +82,10 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3890 if (len > TASK_SIZE)
3891 return -ENOMEM;
3892
3893+#ifdef CONFIG_PAX_RANDMMAP
3894+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3895+#endif
3896+
3897 if (addr) {
3898 if (do_align)
3899 addr = COLOUR_ALIGN(addr, pgoff);
3900@@ -88,8 +93,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3901 addr = PAGE_ALIGN(addr);
3902
3903 vma = find_vma(mm, addr);
3904- if (TASK_SIZE - len >= addr &&
3905- (!vma || addr + len <= vma->vm_start))
3906+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3907 return addr;
3908 }
3909
3910@@ -99,6 +103,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
3911 info.high_limit = TASK_SIZE;
3912 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3913 info.align_offset = pgoff << PAGE_SHIFT;
3914+ info.threadstack_offset = offset;
3915 return vm_unmapped_area(&info);
3916 }
3917
3918@@ -112,6 +117,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3919 unsigned long addr = addr0;
3920 int do_align = 0;
3921 int aliasing = cache_is_vipt_aliasing();
3922+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
3923 struct vm_unmapped_area_info info;
3924
3925 /*
3926@@ -132,6 +138,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3927 return addr;
3928 }
3929
3930+#ifdef CONFIG_PAX_RANDMMAP
3931+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3932+#endif
3933+
3934 /* requesting a specific address */
3935 if (addr) {
3936 if (do_align)
3937@@ -139,8 +149,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3938 else
3939 addr = PAGE_ALIGN(addr);
3940 vma = find_vma(mm, addr);
3941- if (TASK_SIZE - len >= addr &&
3942- (!vma || addr + len <= vma->vm_start))
3943+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
3944 return addr;
3945 }
3946
3947@@ -150,6 +159,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
3948 info.high_limit = mm->mmap_base;
3949 info.align_mask = do_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
3950 info.align_offset = pgoff << PAGE_SHIFT;
3951+ info.threadstack_offset = offset;
3952 addr = vm_unmapped_area(&info);
3953
3954 /*
3955@@ -173,6 +183,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3956 {
3957 unsigned long random_factor = 0UL;
3958
3959+#ifdef CONFIG_PAX_RANDMMAP
3960+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
3961+#endif
3962+
3963 /* 8 bits of randomness in 20 address space bits */
3964 if ((current->flags & PF_RANDOMIZE) &&
3965 !(current->personality & ADDR_NO_RANDOMIZE))
3966@@ -180,10 +194,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
3967
3968 if (mmap_is_legacy()) {
3969 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
3970+
3971+#ifdef CONFIG_PAX_RANDMMAP
3972+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3973+ mm->mmap_base += mm->delta_mmap;
3974+#endif
3975+
3976 mm->get_unmapped_area = arch_get_unmapped_area;
3977 mm->unmap_area = arch_unmap_area;
3978 } else {
3979 mm->mmap_base = mmap_base(random_factor);
3980+
3981+#ifdef CONFIG_PAX_RANDMMAP
3982+ if (mm->pax_flags & MF_PAX_RANDMMAP)
3983+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
3984+#endif
3985+
3986 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
3987 mm->unmap_area = arch_unmap_area_topdown;
3988 }
3989diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
3990index 4d409e6..f375351 100644
3991--- a/arch/arm/mm/mmu.c
3992+++ b/arch/arm/mm/mmu.c
3993@@ -36,6 +36,22 @@
3994 #include "mm.h"
3995 #include "tcm.h"
3996
3997+#if defined(CONFIG_CPU_USE_DOMAINS) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
3998+void modify_domain(unsigned int dom, unsigned int type)
3999+{
4000+ struct thread_info *thread = current_thread_info();
4001+ unsigned int domain = thread->cpu_domain;
4002+ /*
4003+ * DOMAIN_MANAGER might be defined to some other value,
4004+ * use the arch-defined constant
4005+ */
4006+ domain &= ~domain_val(dom, 3);
4007+ thread->cpu_domain = domain | domain_val(dom, type);
4008+ set_domain(thread->cpu_domain);
4009+}
4010+EXPORT_SYMBOL(modify_domain);
4011+#endif
4012+
4013 /*
4014 * empty_zero_page is a special page that is used for
4015 * zero-initialized data and COW.
4016@@ -228,10 +244,18 @@ __setup("noalign", noalign_setup);
4017
4018 #endif /* ifdef CONFIG_CPU_CP15 / else */
4019
4020-#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
4021+#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY
4022 #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
4023
4024-static struct mem_type mem_types[] = {
4025+#ifdef CONFIG_PAX_KERNEXEC
4026+#define L_PTE_KERNEXEC L_PTE_RDONLY
4027+#define PMD_SECT_KERNEXEC PMD_SECT_RDONLY
4028+#else
4029+#define L_PTE_KERNEXEC L_PTE_DIRTY
4030+#define PMD_SECT_KERNEXEC PMD_SECT_AP_WRITE
4031+#endif
4032+
4033+static struct mem_type mem_types[] __read_only = {
4034 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
4035 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
4036 L_PTE_SHARED,
4037@@ -260,16 +284,16 @@ static struct mem_type mem_types[] = {
4038 [MT_UNCACHED] = {
4039 .prot_pte = PROT_PTE_DEVICE,
4040 .prot_l1 = PMD_TYPE_TABLE,
4041- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4042+ .prot_sect = PROT_SECT_DEVICE,
4043 .domain = DOMAIN_IO,
4044 },
4045 [MT_CACHECLEAN] = {
4046- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4047+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4048 .domain = DOMAIN_KERNEL,
4049 },
4050 #ifndef CONFIG_ARM_LPAE
4051 [MT_MINICLEAN] = {
4052- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
4053+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE | PMD_SECT_RDONLY,
4054 .domain = DOMAIN_KERNEL,
4055 },
4056 #endif
4057@@ -277,36 +301,65 @@ static struct mem_type mem_types[] = {
4058 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4059 L_PTE_RDONLY,
4060 .prot_l1 = PMD_TYPE_TABLE,
4061- .domain = DOMAIN_USER,
4062+ .domain = DOMAIN_VECTORS,
4063 },
4064 [MT_HIGH_VECTORS] = {
4065- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4066- L_PTE_USER | L_PTE_RDONLY,
4067+ /* we always want the vector page to be noaccess for userland on archs with
4068+ XN where we can enforce some reasonable measure of security
4069+ therefore, when kernexec is disabled, instead of L_PTE_USER | L_PTE_RDONLY
4070+ which turns into supervisor rwx, userland rx, we instead omit that entirely,
4071+ leaving it as supervisor rwx only
4072+ */
4073+#ifdef CONFIG_PAX_KERNEXEC
4074+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_RDONLY,
4075+#elif __LINUX_ARM_ARCH__ >= 6
4076+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4077+#else
4078+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | L_PTE_USER | L_PTE_RDONLY,
4079+#endif
4080 .prot_l1 = PMD_TYPE_TABLE,
4081- .domain = DOMAIN_USER,
4082+ .domain = DOMAIN_VECTORS,
4083 },
4084- [MT_MEMORY] = {
4085+ [MT_MEMORY_RWX] = {
4086 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4087 .prot_l1 = PMD_TYPE_TABLE,
4088 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4089 .domain = DOMAIN_KERNEL,
4090 },
4091+ [MT_MEMORY_RW] = {
4092+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4093+ .prot_l1 = PMD_TYPE_TABLE,
4094+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4095+ .domain = DOMAIN_KERNEL,
4096+ },
4097+ [MT_MEMORY_RX] = {
4098+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC,
4099+ .prot_l1 = PMD_TYPE_TABLE,
4100+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4101+ .domain = DOMAIN_KERNEL,
4102+ },
4103 [MT_ROM] = {
4104- .prot_sect = PMD_TYPE_SECT,
4105+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4106 .domain = DOMAIN_KERNEL,
4107 },
4108- [MT_MEMORY_NONCACHED] = {
4109+ [MT_MEMORY_NONCACHED_RW] = {
4110 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4111 L_PTE_MT_BUFFERABLE,
4112 .prot_l1 = PMD_TYPE_TABLE,
4113 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
4114 .domain = DOMAIN_KERNEL,
4115 },
4116+ [MT_MEMORY_NONCACHED_RX] = {
4117+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_KERNEXEC |
4118+ L_PTE_MT_BUFFERABLE,
4119+ .prot_l1 = PMD_TYPE_TABLE,
4120+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_KERNEXEC,
4121+ .domain = DOMAIN_KERNEL,
4122+ },
4123 [MT_MEMORY_DTCM] = {
4124- .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4125- L_PTE_XN,
4126+ .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
4127 .prot_l1 = PMD_TYPE_TABLE,
4128- .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
4129+ .prot_sect = PMD_TYPE_SECT | PMD_SECT_RDONLY,
4130 .domain = DOMAIN_KERNEL,
4131 },
4132 [MT_MEMORY_ITCM] = {
4133@@ -316,10 +369,10 @@ static struct mem_type mem_types[] = {
4134 },
4135 [MT_MEMORY_SO] = {
4136 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
4137- L_PTE_MT_UNCACHED | L_PTE_XN,
4138+ L_PTE_MT_UNCACHED,
4139 .prot_l1 = PMD_TYPE_TABLE,
4140 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
4141- PMD_SECT_UNCACHED | PMD_SECT_XN,
4142+ PMD_SECT_UNCACHED,
4143 .domain = DOMAIN_KERNEL,
4144 },
4145 [MT_MEMORY_DMA_READY] = {
4146@@ -405,9 +458,35 @@ static void __init build_mem_type_table(void)
4147 * to prevent speculative instruction fetches.
4148 */
4149 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
4150+ mem_types[MT_DEVICE].prot_pte |= L_PTE_XN;
4151 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
4152+ mem_types[MT_DEVICE_NONSHARED].prot_pte |= L_PTE_XN;
4153 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
4154+ mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_XN;
4155 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
4156+ mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_XN;
4157+
4158+ /* Mark other regions on ARMv6+ as execute-never */
4159+
4160+#ifdef CONFIG_PAX_KERNEXEC
4161+ mem_types[MT_UNCACHED].prot_sect |= PMD_SECT_XN;
4162+ mem_types[MT_UNCACHED].prot_pte |= L_PTE_XN;
4163+ mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_XN;
4164+ mem_types[MT_CACHECLEAN].prot_pte |= L_PTE_XN;
4165+#ifndef CONFIG_ARM_LPAE
4166+ mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_XN;
4167+ mem_types[MT_MINICLEAN].prot_pte |= L_PTE_XN;
4168+#endif
4169+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_XN;
4170+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_XN;
4171+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_XN;
4172+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= PMD_SECT_XN;
4173+ mem_types[MT_MEMORY_DTCM].prot_sect |= PMD_SECT_XN;
4174+ mem_types[MT_MEMORY_DTCM].prot_pte |= L_PTE_XN;
4175+#endif
4176+
4177+ mem_types[MT_MEMORY_SO].prot_sect |= PMD_SECT_XN;
4178+ mem_types[MT_MEMORY_SO].prot_pte |= L_PTE_XN;
4179 }
4180 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4181 /*
4182@@ -468,6 +547,9 @@ static void __init build_mem_type_table(void)
4183 * from SVC mode and no access from userspace.
4184 */
4185 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4186+#ifdef CONFIG_PAX_KERNEXEC
4187+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4188+#endif
4189 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4190 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
4191 #endif
4192@@ -485,11 +567,17 @@ static void __init build_mem_type_table(void)
4193 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
4194 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
4195 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
4196- mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
4197- mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
4198+ mem_types[MT_MEMORY_RWX].prot_sect |= PMD_SECT_S;
4199+ mem_types[MT_MEMORY_RWX].prot_pte |= L_PTE_SHARED;
4200+ mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
4201+ mem_types[MT_MEMORY_RW].prot_pte |= L_PTE_SHARED;
4202+ mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
4203+ mem_types[MT_MEMORY_RX].prot_pte |= L_PTE_SHARED;
4204 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
4205- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
4206- mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
4207+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_S;
4208+ mem_types[MT_MEMORY_NONCACHED_RW].prot_pte |= L_PTE_SHARED;
4209+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_S;
4210+ mem_types[MT_MEMORY_NONCACHED_RX].prot_pte |= L_PTE_SHARED;
4211 }
4212 }
4213
4214@@ -500,15 +588,20 @@ static void __init build_mem_type_table(void)
4215 if (cpu_arch >= CPU_ARCH_ARMv6) {
4216 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
4217 /* Non-cacheable Normal is XCB = 001 */
4218- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4219+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4220+ PMD_SECT_BUFFERED;
4221+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4222 PMD_SECT_BUFFERED;
4223 } else {
4224 /* For both ARMv6 and non-TEX-remapping ARMv7 */
4225- mem_types[MT_MEMORY_NONCACHED].prot_sect |=
4226+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |=
4227+ PMD_SECT_TEX(1);
4228+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |=
4229 PMD_SECT_TEX(1);
4230 }
4231 } else {
4232- mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
4233+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= PMD_SECT_BUFFERABLE;
4234+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= PMD_SECT_BUFFERABLE;
4235 }
4236
4237 #ifdef CONFIG_ARM_LPAE
4238@@ -524,6 +617,8 @@ static void __init build_mem_type_table(void)
4239 vecs_pgprot |= PTE_EXT_AF;
4240 #endif
4241
4242+ user_pgprot |= __supported_pte_mask;
4243+
4244 for (i = 0; i < 16; i++) {
4245 pteval_t v = pgprot_val(protection_map[i]);
4246 protection_map[i] = __pgprot(v | user_pgprot);
4247@@ -541,10 +636,15 @@ static void __init build_mem_type_table(void)
4248
4249 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
4250 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
4251- mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
4252- mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
4253+ mem_types[MT_MEMORY_RWX].prot_sect |= ecc_mask | cp->pmd;
4254+ mem_types[MT_MEMORY_RWX].prot_pte |= kern_pgprot;
4255+ mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
4256+ mem_types[MT_MEMORY_RW].prot_pte |= kern_pgprot;
4257+ mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
4258+ mem_types[MT_MEMORY_RX].prot_pte |= kern_pgprot;
4259 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
4260- mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
4261+ mem_types[MT_MEMORY_NONCACHED_RW].prot_sect |= ecc_mask;
4262+ mem_types[MT_MEMORY_NONCACHED_RX].prot_sect |= ecc_mask;
4263 mem_types[MT_ROM].prot_sect |= cp->pmd;
4264
4265 switch (cp->pmd) {
4266@@ -1166,18 +1266,15 @@ void __init arm_mm_memblock_reserve(void)
4267 * called function. This means you can't use any function or debugging
4268 * method which may touch any device, otherwise the kernel _will_ crash.
4269 */
4270+
4271+static char vectors[PAGE_SIZE] __read_only __aligned(PAGE_SIZE);
4272+
4273 static void __init devicemaps_init(struct machine_desc *mdesc)
4274 {
4275 struct map_desc map;
4276 unsigned long addr;
4277- void *vectors;
4278
4279- /*
4280- * Allocate the vector page early.
4281- */
4282- vectors = early_alloc(PAGE_SIZE);
4283-
4284- early_trap_init(vectors);
4285+ early_trap_init(&vectors);
4286
4287 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
4288 pmd_clear(pmd_off_k(addr));
4289@@ -1217,7 +1314,7 @@ static void __init devicemaps_init(struct machine_desc *mdesc)
4290 * location (0xffff0000). If we aren't using high-vectors, also
4291 * create a mapping at the low-vectors virtual address.
4292 */
4293- map.pfn = __phys_to_pfn(virt_to_phys(vectors));
4294+ map.pfn = __phys_to_pfn(virt_to_phys(&vectors));
4295 map.virtual = 0xffff0000;
4296 map.length = PAGE_SIZE;
4297 map.type = MT_HIGH_VECTORS;
4298@@ -1275,8 +1372,39 @@ static void __init map_lowmem(void)
4299 map.pfn = __phys_to_pfn(start);
4300 map.virtual = __phys_to_virt(start);
4301 map.length = end - start;
4302- map.type = MT_MEMORY;
4303
4304+#ifdef CONFIG_PAX_KERNEXEC
4305+ if (map.virtual <= (unsigned long)_stext && ((unsigned long)_end < (map.virtual + map.length))) {
4306+ struct map_desc kernel;
4307+ struct map_desc initmap;
4308+
4309+ /* when freeing initmem we will make this RW */
4310+ initmap.pfn = __phys_to_pfn(__pa(__init_begin));
4311+ initmap.virtual = (unsigned long)__init_begin;
4312+ initmap.length = _sdata - __init_begin;
4313+ initmap.type = MT_MEMORY_RWX;
4314+ create_mapping(&initmap);
4315+
4316+ /* when freeing initmem we will make this RX */
4317+ kernel.pfn = __phys_to_pfn(__pa(_stext));
4318+ kernel.virtual = (unsigned long)_stext;
4319+ kernel.length = __init_begin - _stext;
4320+ kernel.type = MT_MEMORY_RWX;
4321+ create_mapping(&kernel);
4322+
4323+ if (map.virtual < (unsigned long)_stext) {
4324+ map.length = (unsigned long)_stext - map.virtual;
4325+ map.type = MT_MEMORY_RWX;
4326+ create_mapping(&map);
4327+ }
4328+
4329+ map.pfn = __phys_to_pfn(__pa(_sdata));
4330+ map.virtual = (unsigned long)_sdata;
4331+ map.length = end - __pa(_sdata);
4332+ }
4333+#endif
4334+
4335+ map.type = MT_MEMORY_RW;
4336 create_mapping(&map);
4337 }
4338 }
4339diff --git a/arch/arm/mm/proc-v7-2level.S b/arch/arm/mm/proc-v7-2level.S
4340index 9704097..3e36dde 100644
4341--- a/arch/arm/mm/proc-v7-2level.S
4342+++ b/arch/arm/mm/proc-v7-2level.S
4343@@ -99,6 +99,9 @@ ENTRY(cpu_v7_set_pte_ext)
4344 tst r1, #L_PTE_XN
4345 orrne r3, r3, #PTE_EXT_XN
4346
4347+ tst r1, #L_PTE_PXN
4348+ orrne r3, r3, #PTE_EXT_PXN
4349+
4350 tst r1, #L_PTE_YOUNG
4351 tstne r1, #L_PTE_VALID
4352 #ifndef CONFIG_CPU_USE_DOMAINS
4353diff --git a/arch/arm/plat-omap/sram.c b/arch/arm/plat-omap/sram.c
4354index a5bc92d..0bb4730 100644
4355--- a/arch/arm/plat-omap/sram.c
4356+++ b/arch/arm/plat-omap/sram.c
4357@@ -93,6 +93,8 @@ void __init omap_map_sram(unsigned long start, unsigned long size,
4358 * Looks like we need to preserve some bootloader code at the
4359 * beginning of SRAM for jumping to flash for reboot to work...
4360 */
4361+ pax_open_kernel();
4362 memset_io(omap_sram_base + omap_sram_skip, 0,
4363 omap_sram_size - omap_sram_skip);
4364+ pax_close_kernel();
4365 }
4366diff --git a/arch/arm/plat-samsung/include/plat/dma-ops.h b/arch/arm/plat-samsung/include/plat/dma-ops.h
4367index ce6d763..cfea917 100644
4368--- a/arch/arm/plat-samsung/include/plat/dma-ops.h
4369+++ b/arch/arm/plat-samsung/include/plat/dma-ops.h
4370@@ -47,7 +47,7 @@ struct samsung_dma_ops {
4371 int (*started)(unsigned ch);
4372 int (*flush)(unsigned ch);
4373 int (*stop)(unsigned ch);
4374-};
4375+} __no_const;
4376
4377 extern void *samsung_dmadev_get_ops(void);
4378 extern void *s3c_dma_get_ops(void);
4379diff --git a/arch/arm64/kernel/debug-monitors.c b/arch/arm64/kernel/debug-monitors.c
4380index f4726dc..39ed646 100644
4381--- a/arch/arm64/kernel/debug-monitors.c
4382+++ b/arch/arm64/kernel/debug-monitors.c
4383@@ -149,7 +149,7 @@ static int __cpuinit os_lock_notify(struct notifier_block *self,
4384 return NOTIFY_OK;
4385 }
4386
4387-static struct notifier_block __cpuinitdata os_lock_nb = {
4388+static struct notifier_block os_lock_nb = {
4389 .notifier_call = os_lock_notify,
4390 };
4391
4392diff --git a/arch/arm64/kernel/hw_breakpoint.c b/arch/arm64/kernel/hw_breakpoint.c
4393index 5ab825c..96aaec8 100644
4394--- a/arch/arm64/kernel/hw_breakpoint.c
4395+++ b/arch/arm64/kernel/hw_breakpoint.c
4396@@ -831,7 +831,7 @@ static int __cpuinit hw_breakpoint_reset_notify(struct notifier_block *self,
4397 return NOTIFY_OK;
4398 }
4399
4400-static struct notifier_block __cpuinitdata hw_breakpoint_reset_nb = {
4401+static struct notifier_block hw_breakpoint_reset_nb = {
4402 .notifier_call = hw_breakpoint_reset_notify,
4403 };
4404
4405diff --git a/arch/avr32/include/asm/cache.h b/arch/avr32/include/asm/cache.h
4406index c3a58a1..78fbf54 100644
4407--- a/arch/avr32/include/asm/cache.h
4408+++ b/arch/avr32/include/asm/cache.h
4409@@ -1,8 +1,10 @@
4410 #ifndef __ASM_AVR32_CACHE_H
4411 #define __ASM_AVR32_CACHE_H
4412
4413+#include <linux/const.h>
4414+
4415 #define L1_CACHE_SHIFT 5
4416-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4417+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4418
4419 /*
4420 * Memory returned by kmalloc() may be used for DMA, so we must make
4421diff --git a/arch/avr32/include/asm/elf.h b/arch/avr32/include/asm/elf.h
4422index d232888..87c8df1 100644
4423--- a/arch/avr32/include/asm/elf.h
4424+++ b/arch/avr32/include/asm/elf.h
4425@@ -84,8 +84,14 @@ typedef struct user_fpu_struct elf_fpregset_t;
4426 the loader. We need to make sure that it is out of the way of the program
4427 that it will "exec", and that there is sufficient room for the brk. */
4428
4429-#define ELF_ET_DYN_BASE (2 * TASK_SIZE / 3)
4430+#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
4431
4432+#ifdef CONFIG_PAX_ASLR
4433+#define PAX_ELF_ET_DYN_BASE 0x00001000UL
4434+
4435+#define PAX_DELTA_MMAP_LEN 15
4436+#define PAX_DELTA_STACK_LEN 15
4437+#endif
4438
4439 /* This yields a mask that user programs can use to figure out what
4440 instruction set this CPU supports. This could be done in user space,
4441diff --git a/arch/avr32/include/asm/kmap_types.h b/arch/avr32/include/asm/kmap_types.h
4442index 479330b..53717a8 100644
4443--- a/arch/avr32/include/asm/kmap_types.h
4444+++ b/arch/avr32/include/asm/kmap_types.h
4445@@ -2,9 +2,9 @@
4446 #define __ASM_AVR32_KMAP_TYPES_H
4447
4448 #ifdef CONFIG_DEBUG_HIGHMEM
4449-# define KM_TYPE_NR 29
4450+# define KM_TYPE_NR 30
4451 #else
4452-# define KM_TYPE_NR 14
4453+# define KM_TYPE_NR 15
4454 #endif
4455
4456 #endif /* __ASM_AVR32_KMAP_TYPES_H */
4457diff --git a/arch/avr32/mm/fault.c b/arch/avr32/mm/fault.c
4458index b2f2d2d..d1c85cb 100644
4459--- a/arch/avr32/mm/fault.c
4460+++ b/arch/avr32/mm/fault.c
4461@@ -41,6 +41,23 @@ static inline int notify_page_fault(struct pt_regs *regs, int trap)
4462
4463 int exception_trace = 1;
4464
4465+#ifdef CONFIG_PAX_PAGEEXEC
4466+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
4467+{
4468+ unsigned long i;
4469+
4470+ printk(KERN_ERR "PAX: bytes at PC: ");
4471+ for (i = 0; i < 20; i++) {
4472+ unsigned char c;
4473+ if (get_user(c, (unsigned char *)pc+i))
4474+ printk(KERN_CONT "???????? ");
4475+ else
4476+ printk(KERN_CONT "%02x ", c);
4477+ }
4478+ printk("\n");
4479+}
4480+#endif
4481+
4482 /*
4483 * This routine handles page faults. It determines the address and the
4484 * problem, and then passes it off to one of the appropriate routines.
4485@@ -174,6 +191,16 @@ bad_area:
4486 up_read(&mm->mmap_sem);
4487
4488 if (user_mode(regs)) {
4489+
4490+#ifdef CONFIG_PAX_PAGEEXEC
4491+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
4492+ if (ecr == ECR_PROTECTION_X || ecr == ECR_TLB_MISS_X) {
4493+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->sp);
4494+ do_group_exit(SIGKILL);
4495+ }
4496+ }
4497+#endif
4498+
4499 if (exception_trace && printk_ratelimit())
4500 printk("%s%s[%d]: segfault at %08lx pc %08lx "
4501 "sp %08lx ecr %lu\n",
4502diff --git a/arch/blackfin/include/asm/cache.h b/arch/blackfin/include/asm/cache.h
4503index 568885a..f8008df 100644
4504--- a/arch/blackfin/include/asm/cache.h
4505+++ b/arch/blackfin/include/asm/cache.h
4506@@ -7,6 +7,7 @@
4507 #ifndef __ARCH_BLACKFIN_CACHE_H
4508 #define __ARCH_BLACKFIN_CACHE_H
4509
4510+#include <linux/const.h>
4511 #include <linux/linkage.h> /* for asmlinkage */
4512
4513 /*
4514@@ -14,7 +15,7 @@
4515 * Blackfin loads 32 bytes for cache
4516 */
4517 #define L1_CACHE_SHIFT 5
4518-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4519+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4520 #define SMP_CACHE_BYTES L1_CACHE_BYTES
4521
4522 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
4523diff --git a/arch/cris/include/arch-v10/arch/cache.h b/arch/cris/include/arch-v10/arch/cache.h
4524index aea2718..3639a60 100644
4525--- a/arch/cris/include/arch-v10/arch/cache.h
4526+++ b/arch/cris/include/arch-v10/arch/cache.h
4527@@ -1,8 +1,9 @@
4528 #ifndef _ASM_ARCH_CACHE_H
4529 #define _ASM_ARCH_CACHE_H
4530
4531+#include <linux/const.h>
4532 /* Etrax 100LX have 32-byte cache-lines. */
4533-#define L1_CACHE_BYTES 32
4534 #define L1_CACHE_SHIFT 5
4535+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4536
4537 #endif /* _ASM_ARCH_CACHE_H */
4538diff --git a/arch/cris/include/arch-v32/arch/cache.h b/arch/cris/include/arch-v32/arch/cache.h
4539index 7caf25d..ee65ac5 100644
4540--- a/arch/cris/include/arch-v32/arch/cache.h
4541+++ b/arch/cris/include/arch-v32/arch/cache.h
4542@@ -1,11 +1,12 @@
4543 #ifndef _ASM_CRIS_ARCH_CACHE_H
4544 #define _ASM_CRIS_ARCH_CACHE_H
4545
4546+#include <linux/const.h>
4547 #include <arch/hwregs/dma.h>
4548
4549 /* A cache-line is 32 bytes. */
4550-#define L1_CACHE_BYTES 32
4551 #define L1_CACHE_SHIFT 5
4552+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4553
4554 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
4555
4556diff --git a/arch/frv/include/asm/atomic.h b/arch/frv/include/asm/atomic.h
4557index b86329d..6709906 100644
4558--- a/arch/frv/include/asm/atomic.h
4559+++ b/arch/frv/include/asm/atomic.h
4560@@ -186,6 +186,16 @@ static inline void atomic64_dec(atomic64_t *v)
4561 #define atomic64_cmpxchg(v, old, new) (__cmpxchg_64(old, new, &(v)->counter))
4562 #define atomic64_xchg(v, new) (__xchg_64(new, &(v)->counter))
4563
4564+#define atomic64_read_unchecked(v) atomic64_read(v)
4565+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4566+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4567+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4568+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4569+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4570+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4571+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4572+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4573+
4574 static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
4575 {
4576 int c, old;
4577diff --git a/arch/frv/include/asm/cache.h b/arch/frv/include/asm/cache.h
4578index 2797163..c2a401d 100644
4579--- a/arch/frv/include/asm/cache.h
4580+++ b/arch/frv/include/asm/cache.h
4581@@ -12,10 +12,11 @@
4582 #ifndef __ASM_CACHE_H
4583 #define __ASM_CACHE_H
4584
4585+#include <linux/const.h>
4586
4587 /* bytes per L1 cache line */
4588 #define L1_CACHE_SHIFT (CONFIG_FRV_L1_CACHE_SHIFT)
4589-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4590+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4591
4592 #define __cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4593 #define ____cacheline_aligned __attribute__((aligned(L1_CACHE_BYTES)))
4594diff --git a/arch/frv/include/asm/kmap_types.h b/arch/frv/include/asm/kmap_types.h
4595index 43901f2..0d8b865 100644
4596--- a/arch/frv/include/asm/kmap_types.h
4597+++ b/arch/frv/include/asm/kmap_types.h
4598@@ -2,6 +2,6 @@
4599 #ifndef _ASM_KMAP_TYPES_H
4600 #define _ASM_KMAP_TYPES_H
4601
4602-#define KM_TYPE_NR 17
4603+#define KM_TYPE_NR 18
4604
4605 #endif
4606diff --git a/arch/frv/mm/elf-fdpic.c b/arch/frv/mm/elf-fdpic.c
4607index 836f147..4cf23f5 100644
4608--- a/arch/frv/mm/elf-fdpic.c
4609+++ b/arch/frv/mm/elf-fdpic.c
4610@@ -61,6 +61,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4611 {
4612 struct vm_area_struct *vma;
4613 struct vm_unmapped_area_info info;
4614+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
4615
4616 if (len > TASK_SIZE)
4617 return -ENOMEM;
4618@@ -73,8 +74,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4619 if (addr) {
4620 addr = PAGE_ALIGN(addr);
4621 vma = find_vma(current->mm, addr);
4622- if (TASK_SIZE - len >= addr &&
4623- (!vma || addr + len <= vma->vm_start))
4624+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
4625 goto success;
4626 }
4627
4628@@ -85,6 +85,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
4629 info.high_limit = (current->mm->start_stack - 0x00200000);
4630 info.align_mask = 0;
4631 info.align_offset = 0;
4632+ info.threadstack_offset = offset;
4633 addr = vm_unmapped_area(&info);
4634 if (!(addr & ~PAGE_MASK))
4635 goto success;
4636diff --git a/arch/hexagon/include/asm/cache.h b/arch/hexagon/include/asm/cache.h
4637index f4ca594..adc72fd6 100644
4638--- a/arch/hexagon/include/asm/cache.h
4639+++ b/arch/hexagon/include/asm/cache.h
4640@@ -21,9 +21,11 @@
4641 #ifndef __ASM_CACHE_H
4642 #define __ASM_CACHE_H
4643
4644+#include <linux/const.h>
4645+
4646 /* Bytes per L1 cache line */
4647-#define L1_CACHE_SHIFT (5)
4648-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4649+#define L1_CACHE_SHIFT 5
4650+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4651
4652 #define __cacheline_aligned __aligned(L1_CACHE_BYTES)
4653 #define ____cacheline_aligned __aligned(L1_CACHE_BYTES)
4654diff --git a/arch/ia64/include/asm/atomic.h b/arch/ia64/include/asm/atomic.h
4655index 6e6fe18..a6ae668 100644
4656--- a/arch/ia64/include/asm/atomic.h
4657+++ b/arch/ia64/include/asm/atomic.h
4658@@ -208,6 +208,16 @@ atomic64_add_negative (__s64 i, atomic64_t *v)
4659 #define atomic64_inc(v) atomic64_add(1, (v))
4660 #define atomic64_dec(v) atomic64_sub(1, (v))
4661
4662+#define atomic64_read_unchecked(v) atomic64_read(v)
4663+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
4664+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
4665+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
4666+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
4667+#define atomic64_inc_unchecked(v) atomic64_inc(v)
4668+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
4669+#define atomic64_dec_unchecked(v) atomic64_dec(v)
4670+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
4671+
4672 /* Atomic operations are already serializing */
4673 #define smp_mb__before_atomic_dec() barrier()
4674 #define smp_mb__after_atomic_dec() barrier()
4675diff --git a/arch/ia64/include/asm/cache.h b/arch/ia64/include/asm/cache.h
4676index 988254a..e1ee885 100644
4677--- a/arch/ia64/include/asm/cache.h
4678+++ b/arch/ia64/include/asm/cache.h
4679@@ -1,6 +1,7 @@
4680 #ifndef _ASM_IA64_CACHE_H
4681 #define _ASM_IA64_CACHE_H
4682
4683+#include <linux/const.h>
4684
4685 /*
4686 * Copyright (C) 1998-2000 Hewlett-Packard Co
4687@@ -9,7 +10,7 @@
4688
4689 /* Bytes per L1 (data) cache line. */
4690 #define L1_CACHE_SHIFT CONFIG_IA64_L1_CACHE_SHIFT
4691-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
4692+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
4693
4694 #ifdef CONFIG_SMP
4695 # define SMP_CACHE_SHIFT L1_CACHE_SHIFT
4696diff --git a/arch/ia64/include/asm/elf.h b/arch/ia64/include/asm/elf.h
4697index 5a83c5c..4d7f553 100644
4698--- a/arch/ia64/include/asm/elf.h
4699+++ b/arch/ia64/include/asm/elf.h
4700@@ -42,6 +42,13 @@
4701 */
4702 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x800000000UL)
4703
4704+#ifdef CONFIG_PAX_ASLR
4705+#define PAX_ELF_ET_DYN_BASE (current->personality == PER_LINUX32 ? 0x08048000UL : 0x4000000000000000UL)
4706+
4707+#define PAX_DELTA_MMAP_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4708+#define PAX_DELTA_STACK_LEN (current->personality == PER_LINUX32 ? 16 : 3*PAGE_SHIFT - 13)
4709+#endif
4710+
4711 #define PT_IA_64_UNWIND 0x70000001
4712
4713 /* IA-64 relocations: */
4714diff --git a/arch/ia64/include/asm/pgalloc.h b/arch/ia64/include/asm/pgalloc.h
4715index 96a8d92..617a1cf 100644
4716--- a/arch/ia64/include/asm/pgalloc.h
4717+++ b/arch/ia64/include/asm/pgalloc.h
4718@@ -39,6 +39,12 @@ pgd_populate(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4719 pgd_val(*pgd_entry) = __pa(pud);
4720 }
4721
4722+static inline void
4723+pgd_populate_kernel(struct mm_struct *mm, pgd_t * pgd_entry, pud_t * pud)
4724+{
4725+ pgd_populate(mm, pgd_entry, pud);
4726+}
4727+
4728 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
4729 {
4730 return quicklist_alloc(0, GFP_KERNEL, NULL);
4731@@ -57,6 +63,12 @@ pud_populate(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4732 pud_val(*pud_entry) = __pa(pmd);
4733 }
4734
4735+static inline void
4736+pud_populate_kernel(struct mm_struct *mm, pud_t * pud_entry, pmd_t * pmd)
4737+{
4738+ pud_populate(mm, pud_entry, pmd);
4739+}
4740+
4741 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
4742 {
4743 return quicklist_alloc(0, GFP_KERNEL, NULL);
4744diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
4745index 815810c..d60bd4c 100644
4746--- a/arch/ia64/include/asm/pgtable.h
4747+++ b/arch/ia64/include/asm/pgtable.h
4748@@ -12,7 +12,7 @@
4749 * David Mosberger-Tang <davidm@hpl.hp.com>
4750 */
4751
4752-
4753+#include <linux/const.h>
4754 #include <asm/mman.h>
4755 #include <asm/page.h>
4756 #include <asm/processor.h>
4757@@ -142,6 +142,17 @@
4758 #define PAGE_READONLY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4759 #define PAGE_COPY __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4760 #define PAGE_COPY_EXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RX)
4761+
4762+#ifdef CONFIG_PAX_PAGEEXEC
4763+# define PAGE_SHARED_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_RW)
4764+# define PAGE_READONLY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4765+# define PAGE_COPY_NOEXEC __pgprot(__ACCESS_BITS | _PAGE_PL_3 | _PAGE_AR_R)
4766+#else
4767+# define PAGE_SHARED_NOEXEC PAGE_SHARED
4768+# define PAGE_READONLY_NOEXEC PAGE_READONLY
4769+# define PAGE_COPY_NOEXEC PAGE_COPY
4770+#endif
4771+
4772 #define PAGE_GATE __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_X_RX)
4773 #define PAGE_KERNEL __pgprot(__DIRTY_BITS | _PAGE_PL_0 | _PAGE_AR_RWX)
4774 #define PAGE_KERNELRX __pgprot(__ACCESS_BITS | _PAGE_PL_0 | _PAGE_AR_RX)
4775diff --git a/arch/ia64/include/asm/spinlock.h b/arch/ia64/include/asm/spinlock.h
4776index 54ff557..70c88b7 100644
4777--- a/arch/ia64/include/asm/spinlock.h
4778+++ b/arch/ia64/include/asm/spinlock.h
4779@@ -71,7 +71,7 @@ static __always_inline void __ticket_spin_unlock(arch_spinlock_t *lock)
4780 unsigned short *p = (unsigned short *)&lock->lock + 1, tmp;
4781
4782 asm volatile ("ld2.bias %0=[%1]" : "=r"(tmp) : "r"(p));
4783- ACCESS_ONCE(*p) = (tmp + 2) & ~1;
4784+ ACCESS_ONCE_RW(*p) = (tmp + 2) & ~1;
4785 }
4786
4787 static __always_inline void __ticket_spin_unlock_wait(arch_spinlock_t *lock)
4788diff --git a/arch/ia64/include/asm/uaccess.h b/arch/ia64/include/asm/uaccess.h
4789index 449c8c0..18965fb 100644
4790--- a/arch/ia64/include/asm/uaccess.h
4791+++ b/arch/ia64/include/asm/uaccess.h
4792@@ -240,12 +240,24 @@ extern unsigned long __must_check __copy_user (void __user *to, const void __use
4793 static inline unsigned long
4794 __copy_to_user (void __user *to, const void *from, unsigned long count)
4795 {
4796+ if (count > INT_MAX)
4797+ return count;
4798+
4799+ if (!__builtin_constant_p(count))
4800+ check_object_size(from, count, true);
4801+
4802 return __copy_user(to, (__force void __user *) from, count);
4803 }
4804
4805 static inline unsigned long
4806 __copy_from_user (void *to, const void __user *from, unsigned long count)
4807 {
4808+ if (count > INT_MAX)
4809+ return count;
4810+
4811+ if (!__builtin_constant_p(count))
4812+ check_object_size(to, count, false);
4813+
4814 return __copy_user((__force void __user *) to, from, count);
4815 }
4816
4817@@ -255,10 +267,13 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4818 ({ \
4819 void __user *__cu_to = (to); \
4820 const void *__cu_from = (from); \
4821- long __cu_len = (n); \
4822+ unsigned long __cu_len = (n); \
4823 \
4824- if (__access_ok(__cu_to, __cu_len, get_fs())) \
4825+ if (__cu_len <= INT_MAX && __access_ok(__cu_to, __cu_len, get_fs())) { \
4826+ if (!__builtin_constant_p(n)) \
4827+ check_object_size(__cu_from, __cu_len, true); \
4828 __cu_len = __copy_user(__cu_to, (__force void __user *) __cu_from, __cu_len); \
4829+ } \
4830 __cu_len; \
4831 })
4832
4833@@ -266,11 +281,14 @@ __copy_from_user (void *to, const void __user *from, unsigned long count)
4834 ({ \
4835 void *__cu_to = (to); \
4836 const void __user *__cu_from = (from); \
4837- long __cu_len = (n); \
4838+ unsigned long __cu_len = (n); \
4839 \
4840 __chk_user_ptr(__cu_from); \
4841- if (__access_ok(__cu_from, __cu_len, get_fs())) \
4842+ if (__cu_len <= INT_MAX && __access_ok(__cu_from, __cu_len, get_fs())) { \
4843+ if (!__builtin_constant_p(n)) \
4844+ check_object_size(__cu_to, __cu_len, false); \
4845 __cu_len = __copy_user((__force void __user *) __cu_to, __cu_from, __cu_len); \
4846+ } \
4847 __cu_len; \
4848 })
4849
4850diff --git a/arch/ia64/kernel/err_inject.c b/arch/ia64/kernel/err_inject.c
4851index 2d67317..07d8bfa 100644
4852--- a/arch/ia64/kernel/err_inject.c
4853+++ b/arch/ia64/kernel/err_inject.c
4854@@ -256,7 +256,7 @@ static int __cpuinit err_inject_cpu_callback(struct notifier_block *nfb,
4855 return NOTIFY_OK;
4856 }
4857
4858-static struct notifier_block __cpuinitdata err_inject_cpu_notifier =
4859+static struct notifier_block err_inject_cpu_notifier =
4860 {
4861 .notifier_call = err_inject_cpu_callback,
4862 };
4863diff --git a/arch/ia64/kernel/mca.c b/arch/ia64/kernel/mca.c
4864index d7396db..b33e873 100644
4865--- a/arch/ia64/kernel/mca.c
4866+++ b/arch/ia64/kernel/mca.c
4867@@ -1922,7 +1922,7 @@ static int __cpuinit mca_cpu_callback(struct notifier_block *nfb,
4868 return NOTIFY_OK;
4869 }
4870
4871-static struct notifier_block mca_cpu_notifier __cpuinitdata = {
4872+static struct notifier_block mca_cpu_notifier = {
4873 .notifier_call = mca_cpu_callback
4874 };
4875
4876diff --git a/arch/ia64/kernel/module.c b/arch/ia64/kernel/module.c
4877index 24603be..948052d 100644
4878--- a/arch/ia64/kernel/module.c
4879+++ b/arch/ia64/kernel/module.c
4880@@ -307,8 +307,7 @@ plt_target (struct plt_entry *plt)
4881 void
4882 module_free (struct module *mod, void *module_region)
4883 {
4884- if (mod && mod->arch.init_unw_table &&
4885- module_region == mod->module_init) {
4886+ if (mod && mod->arch.init_unw_table && module_region == mod->module_init_rx) {
4887 unw_remove_unwind_table(mod->arch.init_unw_table);
4888 mod->arch.init_unw_table = NULL;
4889 }
4890@@ -494,15 +493,39 @@ module_frob_arch_sections (Elf_Ehdr *ehdr, Elf_Shdr *sechdrs, char *secstrings,
4891 }
4892
4893 static inline int
4894+in_init_rx (const struct module *mod, uint64_t addr)
4895+{
4896+ return addr - (uint64_t) mod->module_init_rx < mod->init_size_rx;
4897+}
4898+
4899+static inline int
4900+in_init_rw (const struct module *mod, uint64_t addr)
4901+{
4902+ return addr - (uint64_t) mod->module_init_rw < mod->init_size_rw;
4903+}
4904+
4905+static inline int
4906 in_init (const struct module *mod, uint64_t addr)
4907 {
4908- return addr - (uint64_t) mod->module_init < mod->init_size;
4909+ return in_init_rx(mod, addr) || in_init_rw(mod, addr);
4910+}
4911+
4912+static inline int
4913+in_core_rx (const struct module *mod, uint64_t addr)
4914+{
4915+ return addr - (uint64_t) mod->module_core_rx < mod->core_size_rx;
4916+}
4917+
4918+static inline int
4919+in_core_rw (const struct module *mod, uint64_t addr)
4920+{
4921+ return addr - (uint64_t) mod->module_core_rw < mod->core_size_rw;
4922 }
4923
4924 static inline int
4925 in_core (const struct module *mod, uint64_t addr)
4926 {
4927- return addr - (uint64_t) mod->module_core < mod->core_size;
4928+ return in_core_rx(mod, addr) || in_core_rw(mod, addr);
4929 }
4930
4931 static inline int
4932@@ -685,7 +708,14 @@ do_reloc (struct module *mod, uint8_t r_type, Elf64_Sym *sym, uint64_t addend,
4933 break;
4934
4935 case RV_BDREL:
4936- val -= (uint64_t) (in_init(mod, val) ? mod->module_init : mod->module_core);
4937+ if (in_init_rx(mod, val))
4938+ val -= (uint64_t) mod->module_init_rx;
4939+ else if (in_init_rw(mod, val))
4940+ val -= (uint64_t) mod->module_init_rw;
4941+ else if (in_core_rx(mod, val))
4942+ val -= (uint64_t) mod->module_core_rx;
4943+ else if (in_core_rw(mod, val))
4944+ val -= (uint64_t) mod->module_core_rw;
4945 break;
4946
4947 case RV_LTV:
4948@@ -820,15 +850,15 @@ apply_relocate_add (Elf64_Shdr *sechdrs, const char *strtab, unsigned int symind
4949 * addresses have been selected...
4950 */
4951 uint64_t gp;
4952- if (mod->core_size > MAX_LTOFF)
4953+ if (mod->core_size_rx + mod->core_size_rw > MAX_LTOFF)
4954 /*
4955 * This takes advantage of fact that SHF_ARCH_SMALL gets allocated
4956 * at the end of the module.
4957 */
4958- gp = mod->core_size - MAX_LTOFF / 2;
4959+ gp = mod->core_size_rx + mod->core_size_rw - MAX_LTOFF / 2;
4960 else
4961- gp = mod->core_size / 2;
4962- gp = (uint64_t) mod->module_core + ((gp + 7) & -8);
4963+ gp = (mod->core_size_rx + mod->core_size_rw) / 2;
4964+ gp = (uint64_t) mod->module_core_rx + ((gp + 7) & -8);
4965 mod->arch.gp = gp;
4966 DEBUGP("%s: placing gp at 0x%lx\n", __func__, gp);
4967 }
4968diff --git a/arch/ia64/kernel/palinfo.c b/arch/ia64/kernel/palinfo.c
4969index 2b3c2d7..a318d84 100644
4970--- a/arch/ia64/kernel/palinfo.c
4971+++ b/arch/ia64/kernel/palinfo.c
4972@@ -980,7 +980,7 @@ static int __cpuinit palinfo_cpu_callback(struct notifier_block *nfb,
4973 return NOTIFY_OK;
4974 }
4975
4976-static struct notifier_block __refdata palinfo_cpu_notifier =
4977+static struct notifier_block palinfo_cpu_notifier =
4978 {
4979 .notifier_call = palinfo_cpu_callback,
4980 .priority = 0,
4981diff --git a/arch/ia64/kernel/salinfo.c b/arch/ia64/kernel/salinfo.c
4982index 4bc580a..7767f24 100644
4983--- a/arch/ia64/kernel/salinfo.c
4984+++ b/arch/ia64/kernel/salinfo.c
4985@@ -609,7 +609,7 @@ salinfo_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu
4986 return NOTIFY_OK;
4987 }
4988
4989-static struct notifier_block salinfo_cpu_notifier __cpuinitdata =
4990+static struct notifier_block salinfo_cpu_notifier =
4991 {
4992 .notifier_call = salinfo_cpu_callback,
4993 .priority = 0,
4994diff --git a/arch/ia64/kernel/sys_ia64.c b/arch/ia64/kernel/sys_ia64.c
4995index 41e33f8..65180b2 100644
4996--- a/arch/ia64/kernel/sys_ia64.c
4997+++ b/arch/ia64/kernel/sys_ia64.c
4998@@ -28,6 +28,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
4999 unsigned long align_mask = 0;
5000 struct mm_struct *mm = current->mm;
5001 struct vm_unmapped_area_info info;
5002+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5003
5004 if (len > RGN_MAP_LIMIT)
5005 return -ENOMEM;
5006@@ -43,6 +44,13 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5007 if (REGION_NUMBER(addr) == RGN_HPAGE)
5008 addr = 0;
5009 #endif
5010+
5011+#ifdef CONFIG_PAX_RANDMMAP
5012+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5013+ addr = mm->free_area_cache;
5014+ else
5015+#endif
5016+
5017 if (!addr)
5018 addr = TASK_UNMAPPED_BASE;
5019
5020@@ -61,6 +69,7 @@ arch_get_unmapped_area (struct file *filp, unsigned long addr, unsigned long len
5021 info.high_limit = TASK_SIZE;
5022 info.align_mask = align_mask;
5023 info.align_offset = 0;
5024+ info.threadstack_offset = offset;
5025 return vm_unmapped_area(&info);
5026 }
5027
5028diff --git a/arch/ia64/kernel/topology.c b/arch/ia64/kernel/topology.c
5029index dc00b2c..cce53c2 100644
5030--- a/arch/ia64/kernel/topology.c
5031+++ b/arch/ia64/kernel/topology.c
5032@@ -445,7 +445,7 @@ static int __cpuinit cache_cpu_callback(struct notifier_block *nfb,
5033 return NOTIFY_OK;
5034 }
5035
5036-static struct notifier_block __cpuinitdata cache_cpu_notifier =
5037+static struct notifier_block cache_cpu_notifier =
5038 {
5039 .notifier_call = cache_cpu_callback
5040 };
5041diff --git a/arch/ia64/kernel/vmlinux.lds.S b/arch/ia64/kernel/vmlinux.lds.S
5042index 0ccb28f..8992469 100644
5043--- a/arch/ia64/kernel/vmlinux.lds.S
5044+++ b/arch/ia64/kernel/vmlinux.lds.S
5045@@ -198,7 +198,7 @@ SECTIONS {
5046 /* Per-cpu data: */
5047 . = ALIGN(PERCPU_PAGE_SIZE);
5048 PERCPU_VADDR(SMP_CACHE_BYTES, PERCPU_ADDR, :percpu)
5049- __phys_per_cpu_start = __per_cpu_load;
5050+ __phys_per_cpu_start = per_cpu_load;
5051 /*
5052 * ensure percpu data fits
5053 * into percpu page size
5054diff --git a/arch/ia64/mm/fault.c b/arch/ia64/mm/fault.c
5055index 6cf0341..d352594 100644
5056--- a/arch/ia64/mm/fault.c
5057+++ b/arch/ia64/mm/fault.c
5058@@ -72,6 +72,23 @@ mapped_kernel_page_is_present (unsigned long address)
5059 return pte_present(pte);
5060 }
5061
5062+#ifdef CONFIG_PAX_PAGEEXEC
5063+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5064+{
5065+ unsigned long i;
5066+
5067+ printk(KERN_ERR "PAX: bytes at PC: ");
5068+ for (i = 0; i < 8; i++) {
5069+ unsigned int c;
5070+ if (get_user(c, (unsigned int *)pc+i))
5071+ printk(KERN_CONT "???????? ");
5072+ else
5073+ printk(KERN_CONT "%08x ", c);
5074+ }
5075+ printk("\n");
5076+}
5077+#endif
5078+
5079 # define VM_READ_BIT 0
5080 # define VM_WRITE_BIT 1
5081 # define VM_EXEC_BIT 2
5082@@ -149,8 +166,21 @@ retry:
5083 if (((isr >> IA64_ISR_R_BIT) & 1UL) && (!(vma->vm_flags & (VM_READ | VM_WRITE))))
5084 goto bad_area;
5085
5086- if ((vma->vm_flags & mask) != mask)
5087+ if ((vma->vm_flags & mask) != mask) {
5088+
5089+#ifdef CONFIG_PAX_PAGEEXEC
5090+ if (!(vma->vm_flags & VM_EXEC) && (mask & VM_EXEC)) {
5091+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || address != regs->cr_iip)
5092+ goto bad_area;
5093+
5094+ up_read(&mm->mmap_sem);
5095+ pax_report_fault(regs, (void *)regs->cr_iip, (void *)regs->r12);
5096+ do_group_exit(SIGKILL);
5097+ }
5098+#endif
5099+
5100 goto bad_area;
5101+ }
5102
5103 /*
5104 * If for any reason at all we couldn't handle the fault, make
5105diff --git a/arch/ia64/mm/hugetlbpage.c b/arch/ia64/mm/hugetlbpage.c
5106index 76069c1..c2aa816 100644
5107--- a/arch/ia64/mm/hugetlbpage.c
5108+++ b/arch/ia64/mm/hugetlbpage.c
5109@@ -149,6 +149,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5110 unsigned long pgoff, unsigned long flags)
5111 {
5112 struct vm_unmapped_area_info info;
5113+ unsigned long offset = gr_rand_threadstack_offset(current->mm, file, flags);
5114
5115 if (len > RGN_MAP_LIMIT)
5116 return -ENOMEM;
5117@@ -172,6 +173,7 @@ unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, u
5118 info.high_limit = HPAGE_REGION_BASE + RGN_MAP_LIMIT;
5119 info.align_mask = PAGE_MASK & (HPAGE_SIZE - 1);
5120 info.align_offset = 0;
5121+ info.threadstack_offset = offset;
5122 return vm_unmapped_area(&info);
5123 }
5124
5125diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
5126index d1fe4b4..2628f37 100644
5127--- a/arch/ia64/mm/init.c
5128+++ b/arch/ia64/mm/init.c
5129@@ -120,6 +120,19 @@ ia64_init_addr_space (void)
5130 vma->vm_start = current->thread.rbs_bot & PAGE_MASK;
5131 vma->vm_end = vma->vm_start + PAGE_SIZE;
5132 vma->vm_flags = VM_DATA_DEFAULT_FLAGS|VM_GROWSUP|VM_ACCOUNT;
5133+
5134+#ifdef CONFIG_PAX_PAGEEXEC
5135+ if (current->mm->pax_flags & MF_PAX_PAGEEXEC) {
5136+ vma->vm_flags &= ~VM_EXEC;
5137+
5138+#ifdef CONFIG_PAX_MPROTECT
5139+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
5140+ vma->vm_flags &= ~VM_MAYEXEC;
5141+#endif
5142+
5143+ }
5144+#endif
5145+
5146 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
5147 down_write(&current->mm->mmap_sem);
5148 if (insert_vm_struct(current->mm, vma)) {
5149diff --git a/arch/m32r/include/asm/cache.h b/arch/m32r/include/asm/cache.h
5150index 40b3ee9..8c2c112 100644
5151--- a/arch/m32r/include/asm/cache.h
5152+++ b/arch/m32r/include/asm/cache.h
5153@@ -1,8 +1,10 @@
5154 #ifndef _ASM_M32R_CACHE_H
5155 #define _ASM_M32R_CACHE_H
5156
5157+#include <linux/const.h>
5158+
5159 /* L1 cache line size */
5160 #define L1_CACHE_SHIFT 4
5161-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5162+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5163
5164 #endif /* _ASM_M32R_CACHE_H */
5165diff --git a/arch/m32r/lib/usercopy.c b/arch/m32r/lib/usercopy.c
5166index 82abd15..d95ae5d 100644
5167--- a/arch/m32r/lib/usercopy.c
5168+++ b/arch/m32r/lib/usercopy.c
5169@@ -14,6 +14,9 @@
5170 unsigned long
5171 __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5172 {
5173+ if ((long)n < 0)
5174+ return n;
5175+
5176 prefetch(from);
5177 if (access_ok(VERIFY_WRITE, to, n))
5178 __copy_user(to,from,n);
5179@@ -23,6 +26,9 @@ __generic_copy_to_user(void __user *to, const void *from, unsigned long n)
5180 unsigned long
5181 __generic_copy_from_user(void *to, const void __user *from, unsigned long n)
5182 {
5183+ if ((long)n < 0)
5184+ return n;
5185+
5186 prefetchw(to);
5187 if (access_ok(VERIFY_READ, from, n))
5188 __copy_user_zeroing(to,from,n);
5189diff --git a/arch/m68k/include/asm/cache.h b/arch/m68k/include/asm/cache.h
5190index 0395c51..5f26031 100644
5191--- a/arch/m68k/include/asm/cache.h
5192+++ b/arch/m68k/include/asm/cache.h
5193@@ -4,9 +4,11 @@
5194 #ifndef __ARCH_M68K_CACHE_H
5195 #define __ARCH_M68K_CACHE_H
5196
5197+#include <linux/const.h>
5198+
5199 /* bytes per L1 cache line */
5200 #define L1_CACHE_SHIFT 4
5201-#define L1_CACHE_BYTES (1<< L1_CACHE_SHIFT)
5202+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5203
5204 #define ARCH_DMA_MINALIGN L1_CACHE_BYTES
5205
5206diff --git a/arch/metag/mm/hugetlbpage.c b/arch/metag/mm/hugetlbpage.c
5207index 3c52fa6..11b2ad8 100644
5208--- a/arch/metag/mm/hugetlbpage.c
5209+++ b/arch/metag/mm/hugetlbpage.c
5210@@ -200,6 +200,7 @@ hugetlb_get_unmapped_area_new_pmd(unsigned long len)
5211 info.high_limit = TASK_SIZE;
5212 info.align_mask = PAGE_MASK & HUGEPT_MASK;
5213 info.align_offset = 0;
5214+ info.threadstack_offset = 0;
5215 return vm_unmapped_area(&info);
5216 }
5217
5218diff --git a/arch/microblaze/include/asm/cache.h b/arch/microblaze/include/asm/cache.h
5219index 4efe96a..60e8699 100644
5220--- a/arch/microblaze/include/asm/cache.h
5221+++ b/arch/microblaze/include/asm/cache.h
5222@@ -13,11 +13,12 @@
5223 #ifndef _ASM_MICROBLAZE_CACHE_H
5224 #define _ASM_MICROBLAZE_CACHE_H
5225
5226+#include <linux/const.h>
5227 #include <asm/registers.h>
5228
5229 #define L1_CACHE_SHIFT 5
5230 /* word-granular cache in microblaze */
5231-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5232+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5233
5234 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5235
5236diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
5237index 08b6079..eb272cf 100644
5238--- a/arch/mips/include/asm/atomic.h
5239+++ b/arch/mips/include/asm/atomic.h
5240@@ -21,6 +21,10 @@
5241 #include <asm/cmpxchg.h>
5242 #include <asm/war.h>
5243
5244+#ifdef CONFIG_GENERIC_ATOMIC64
5245+#include <asm-generic/atomic64.h>
5246+#endif
5247+
5248 #define ATOMIC_INIT(i) { (i) }
5249
5250 /*
5251@@ -759,6 +763,16 @@ static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
5252 */
5253 #define atomic64_add_negative(i, v) (atomic64_add_return(i, (v)) < 0)
5254
5255+#define atomic64_read_unchecked(v) atomic64_read(v)
5256+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5257+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5258+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5259+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5260+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5261+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5262+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5263+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5264+
5265 #endif /* CONFIG_64BIT */
5266
5267 /*
5268diff --git a/arch/mips/include/asm/cache.h b/arch/mips/include/asm/cache.h
5269index b4db69f..8f3b093 100644
5270--- a/arch/mips/include/asm/cache.h
5271+++ b/arch/mips/include/asm/cache.h
5272@@ -9,10 +9,11 @@
5273 #ifndef _ASM_CACHE_H
5274 #define _ASM_CACHE_H
5275
5276+#include <linux/const.h>
5277 #include <kmalloc.h>
5278
5279 #define L1_CACHE_SHIFT CONFIG_MIPS_L1_CACHE_SHIFT
5280-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
5281+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5282
5283 #define SMP_CACHE_SHIFT L1_CACHE_SHIFT
5284 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5285diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
5286index cf3ae24..238d22f 100644
5287--- a/arch/mips/include/asm/elf.h
5288+++ b/arch/mips/include/asm/elf.h
5289@@ -372,13 +372,16 @@ extern const char *__elf_platform;
5290 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
5291 #endif
5292
5293+#ifdef CONFIG_PAX_ASLR
5294+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5295+
5296+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5297+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5298+#endif
5299+
5300 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
5301 struct linux_binprm;
5302 extern int arch_setup_additional_pages(struct linux_binprm *bprm,
5303 int uses_interp);
5304
5305-struct mm_struct;
5306-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
5307-#define arch_randomize_brk arch_randomize_brk
5308-
5309 #endif /* _ASM_ELF_H */
5310diff --git a/arch/mips/include/asm/exec.h b/arch/mips/include/asm/exec.h
5311index c1f6afa..38cc6e9 100644
5312--- a/arch/mips/include/asm/exec.h
5313+++ b/arch/mips/include/asm/exec.h
5314@@ -12,6 +12,6 @@
5315 #ifndef _ASM_EXEC_H
5316 #define _ASM_EXEC_H
5317
5318-extern unsigned long arch_align_stack(unsigned long sp);
5319+#define arch_align_stack(x) ((x) & ~0xfUL)
5320
5321 #endif /* _ASM_EXEC_H */
5322diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
5323index f59552f..3abe9b9 100644
5324--- a/arch/mips/include/asm/page.h
5325+++ b/arch/mips/include/asm/page.h
5326@@ -95,7 +95,7 @@ extern void copy_user_highpage(struct page *to, struct page *from,
5327 #ifdef CONFIG_CPU_MIPS32
5328 typedef struct { unsigned long pte_low, pte_high; } pte_t;
5329 #define pte_val(x) ((x).pte_low | ((unsigned long long)(x).pte_high << 32))
5330- #define __pte(x) ({ pte_t __pte = {(x), ((unsigned long long)(x)) >> 32}; __pte; })
5331+ #define __pte(x) ({ pte_t __pte = {(x), (x) >> 32}; __pte; })
5332 #else
5333 typedef struct { unsigned long long pte; } pte_t;
5334 #define pte_val(x) ((x).pte)
5335diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
5336index 881d18b..cea38bc 100644
5337--- a/arch/mips/include/asm/pgalloc.h
5338+++ b/arch/mips/include/asm/pgalloc.h
5339@@ -37,6 +37,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5340 {
5341 set_pud(pud, __pud((unsigned long)pmd));
5342 }
5343+
5344+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
5345+{
5346+ pud_populate(mm, pud, pmd);
5347+}
5348 #endif
5349
5350 /*
5351diff --git a/arch/mips/include/asm/thread_info.h b/arch/mips/include/asm/thread_info.h
5352index 895320e..bf63e10 100644
5353--- a/arch/mips/include/asm/thread_info.h
5354+++ b/arch/mips/include/asm/thread_info.h
5355@@ -115,6 +115,8 @@ static inline struct thread_info *current_thread_info(void)
5356 #define TIF_32BIT_ADDR 23 /* 32-bit address space (o32/n32) */
5357 #define TIF_FPUBOUND 24 /* thread bound to FPU-full CPU set */
5358 #define TIF_LOAD_WATCH 25 /* If set, load watch registers */
5359+/* li takes a 32bit immediate */
5360+#define TIF_GRSEC_SETXID 29 /* update credentials on syscall entry/exit */
5361 #define TIF_SYSCALL_TRACE 31 /* syscall trace active */
5362
5363 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
5364@@ -130,15 +132,18 @@ static inline struct thread_info *current_thread_info(void)
5365 #define _TIF_32BIT_ADDR (1<<TIF_32BIT_ADDR)
5366 #define _TIF_FPUBOUND (1<<TIF_FPUBOUND)
5367 #define _TIF_LOAD_WATCH (1<<TIF_LOAD_WATCH)
5368+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
5369+
5370+#define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5371
5372 /* work to do in syscall_trace_leave() */
5373-#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT)
5374+#define _TIF_WORK_SYSCALL_EXIT (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_GRSEC_SETXID)
5375
5376 /* work to do on interrupt/exception return */
5377 #define _TIF_WORK_MASK \
5378 (_TIF_SIGPENDING | _TIF_NEED_RESCHED | _TIF_NOTIFY_RESUME)
5379 /* work to do on any return to u-space */
5380-#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT)
5381+#define _TIF_ALLWORK_MASK (_TIF_WORK_MASK | _TIF_WORK_SYSCALL_EXIT | _TIF_GRSEC_SETXID)
5382
5383 #endif /* __KERNEL__ */
5384
5385diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
5386index 1188e00..41cf144 100644
5387--- a/arch/mips/kernel/binfmt_elfn32.c
5388+++ b/arch/mips/kernel/binfmt_elfn32.c
5389@@ -50,6 +50,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5390 #undef ELF_ET_DYN_BASE
5391 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5392
5393+#ifdef CONFIG_PAX_ASLR
5394+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5395+
5396+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5397+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5398+#endif
5399+
5400 #include <asm/processor.h>
5401 #include <linux/module.h>
5402 #include <linux/elfcore.h>
5403diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
5404index 202e581..689ca79 100644
5405--- a/arch/mips/kernel/binfmt_elfo32.c
5406+++ b/arch/mips/kernel/binfmt_elfo32.c
5407@@ -56,6 +56,13 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
5408 #undef ELF_ET_DYN_BASE
5409 #define ELF_ET_DYN_BASE (TASK32_SIZE / 3 * 2)
5410
5411+#ifdef CONFIG_PAX_ASLR
5412+#define PAX_ELF_ET_DYN_BASE (TASK_IS_32BIT_ADDR ? 0x00400000UL : 0x00400000UL)
5413+
5414+#define PAX_DELTA_MMAP_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5415+#define PAX_DELTA_STACK_LEN (TASK_IS_32BIT_ADDR ? 27-PAGE_SHIFT : 36-PAGE_SHIFT)
5416+#endif
5417+
5418 #include <asm/processor.h>
5419
5420 /*
5421diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
5422index c6a041d..b3e7318 100644
5423--- a/arch/mips/kernel/process.c
5424+++ b/arch/mips/kernel/process.c
5425@@ -563,15 +563,3 @@ unsigned long get_wchan(struct task_struct *task)
5426 out:
5427 return pc;
5428 }
5429-
5430-/*
5431- * Don't forget that the stack pointer must be aligned on a 8 bytes
5432- * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
5433- */
5434-unsigned long arch_align_stack(unsigned long sp)
5435-{
5436- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
5437- sp -= get_random_int() & ~PAGE_MASK;
5438-
5439- return sp & ALMASK;
5440-}
5441diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
5442index 9c6299c..2fb4c22 100644
5443--- a/arch/mips/kernel/ptrace.c
5444+++ b/arch/mips/kernel/ptrace.c
5445@@ -528,6 +528,10 @@ static inline int audit_arch(void)
5446 return arch;
5447 }
5448
5449+#ifdef CONFIG_GRKERNSEC_SETXID
5450+extern void gr_delayed_cred_worker(void);
5451+#endif
5452+
5453 /*
5454 * Notification of system call entry/exit
5455 * - triggered by current->work.syscall_trace
5456@@ -537,6 +541,11 @@ asmlinkage void syscall_trace_enter(struct pt_regs *regs)
5457 /* do the secure computing check first */
5458 secure_computing_strict(regs->regs[2]);
5459
5460+#ifdef CONFIG_GRKERNSEC_SETXID
5461+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
5462+ gr_delayed_cred_worker();
5463+#endif
5464+
5465 if (!(current->ptrace & PT_PTRACED))
5466 goto out;
5467
5468diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
5469index 9b36424..e7f4154 100644
5470--- a/arch/mips/kernel/scall32-o32.S
5471+++ b/arch/mips/kernel/scall32-o32.S
5472@@ -52,7 +52,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5473
5474 stack_done:
5475 lw t0, TI_FLAGS($28) # syscall tracing enabled?
5476- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5477+ li t1, _TIF_SYSCALL_WORK
5478 and t0, t1
5479 bnez t0, syscall_trace_entry # -> yes
5480
5481diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
5482index 97a5909..59622f8 100644
5483--- a/arch/mips/kernel/scall64-64.S
5484+++ b/arch/mips/kernel/scall64-64.S
5485@@ -54,7 +54,7 @@ NESTED(handle_sys64, PT_SIZE, sp)
5486
5487 sd a3, PT_R26(sp) # save a3 for syscall restarting
5488
5489- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5490+ li t1, _TIF_SYSCALL_WORK
5491 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5492 and t0, t1, t0
5493 bnez t0, syscall_trace_entry
5494diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
5495index edcb659..fb2ab09 100644
5496--- a/arch/mips/kernel/scall64-n32.S
5497+++ b/arch/mips/kernel/scall64-n32.S
5498@@ -47,7 +47,7 @@ NESTED(handle_sysn32, PT_SIZE, sp)
5499
5500 sd a3, PT_R26(sp) # save a3 for syscall restarting
5501
5502- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5503+ li t1, _TIF_SYSCALL_WORK
5504 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5505 and t0, t1, t0
5506 bnez t0, n32_syscall_trace_entry
5507diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
5508index 74f485d..47d2c38 100644
5509--- a/arch/mips/kernel/scall64-o32.S
5510+++ b/arch/mips/kernel/scall64-o32.S
5511@@ -81,7 +81,7 @@ NESTED(handle_sys, PT_SIZE, sp)
5512 PTR 4b, bad_stack
5513 .previous
5514
5515- li t1, _TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT
5516+ li t1, _TIF_SYSCALL_WORK
5517 LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
5518 and t0, t1, t0
5519 bnez t0, trace_a_syscall
5520diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
5521index 0fead53..a2c0fb5 100644
5522--- a/arch/mips/mm/fault.c
5523+++ b/arch/mips/mm/fault.c
5524@@ -27,6 +27,23 @@
5525 #include <asm/highmem.h> /* For VMALLOC_END */
5526 #include <linux/kdebug.h>
5527
5528+#ifdef CONFIG_PAX_PAGEEXEC
5529+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
5530+{
5531+ unsigned long i;
5532+
5533+ printk(KERN_ERR "PAX: bytes at PC: ");
5534+ for (i = 0; i < 5; i++) {
5535+ unsigned int c;
5536+ if (get_user(c, (unsigned int *)pc+i))
5537+ printk(KERN_CONT "???????? ");
5538+ else
5539+ printk(KERN_CONT "%08x ", c);
5540+ }
5541+ printk("\n");
5542+}
5543+#endif
5544+
5545 /*
5546 * This routine handles page faults. It determines the address,
5547 * and the problem, and then passes it off to one of the appropriate
5548diff --git a/arch/mips/mm/mmap.c b/arch/mips/mm/mmap.c
5549index 7e5fe27..9656513 100644
5550--- a/arch/mips/mm/mmap.c
5551+++ b/arch/mips/mm/mmap.c
5552@@ -59,6 +59,7 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5553 struct vm_area_struct *vma;
5554 unsigned long addr = addr0;
5555 int do_color_align;
5556+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
5557 struct vm_unmapped_area_info info;
5558
5559 if (unlikely(len > TASK_SIZE))
5560@@ -84,6 +85,11 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5561 do_color_align = 1;
5562
5563 /* requesting a specific address */
5564+
5565+#ifdef CONFIG_PAX_RANDMMAP
5566+ if (!(current->mm->pax_flags & MF_PAX_RANDMMAP))
5567+#endif
5568+
5569 if (addr) {
5570 if (do_color_align)
5571 addr = COLOUR_ALIGN(addr, pgoff);
5572@@ -91,14 +97,14 @@ static unsigned long arch_get_unmapped_area_common(struct file *filp,
5573 addr = PAGE_ALIGN(addr);
5574
5575 vma = find_vma(mm, addr);
5576- if (TASK_SIZE - len >= addr &&
5577- (!vma || addr + len <= vma->vm_start))
5578+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vmm, addr, len, offset))
5579 return addr;
5580 }
5581
5582 info.length = len;
5583 info.align_mask = do_color_align ? (PAGE_MASK & shm_align_mask) : 0;
5584 info.align_offset = pgoff << PAGE_SHIFT;
5585+ info.threadstack_offset = offset;
5586
5587 if (dir == DOWN) {
5588 info.flags = VM_UNMAPPED_AREA_TOPDOWN;
5589@@ -146,6 +152,10 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5590 {
5591 unsigned long random_factor = 0UL;
5592
5593+#ifdef CONFIG_PAX_RANDMMAP
5594+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
5595+#endif
5596+
5597 if (current->flags & PF_RANDOMIZE) {
5598 random_factor = get_random_int();
5599 random_factor = random_factor << PAGE_SHIFT;
5600@@ -157,42 +167,27 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
5601
5602 if (mmap_is_legacy()) {
5603 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
5604+
5605+#ifdef CONFIG_PAX_RANDMMAP
5606+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5607+ mm->mmap_base += mm->delta_mmap;
5608+#endif
5609+
5610 mm->get_unmapped_area = arch_get_unmapped_area;
5611 mm->unmap_area = arch_unmap_area;
5612 } else {
5613 mm->mmap_base = mmap_base(random_factor);
5614+
5615+#ifdef CONFIG_PAX_RANDMMAP
5616+ if (mm->pax_flags & MF_PAX_RANDMMAP)
5617+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
5618+#endif
5619+
5620 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
5621 mm->unmap_area = arch_unmap_area_topdown;
5622 }
5623 }
5624
5625-static inline unsigned long brk_rnd(void)
5626-{
5627- unsigned long rnd = get_random_int();
5628-
5629- rnd = rnd << PAGE_SHIFT;
5630- /* 8MB for 32bit, 256MB for 64bit */
5631- if (TASK_IS_32BIT_ADDR)
5632- rnd = rnd & 0x7ffffful;
5633- else
5634- rnd = rnd & 0xffffffful;
5635-
5636- return rnd;
5637-}
5638-
5639-unsigned long arch_randomize_brk(struct mm_struct *mm)
5640-{
5641- unsigned long base = mm->brk;
5642- unsigned long ret;
5643-
5644- ret = PAGE_ALIGN(base + brk_rnd());
5645-
5646- if (ret < mm->brk)
5647- return mm->brk;
5648-
5649- return ret;
5650-}
5651-
5652 int __virt_addr_valid(const volatile void *kaddr)
5653 {
5654 return pfn_valid(PFN_DOWN(virt_to_phys(kaddr)));
5655diff --git a/arch/mn10300/proc-mn103e010/include/proc/cache.h b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5656index 967d144..db12197 100644
5657--- a/arch/mn10300/proc-mn103e010/include/proc/cache.h
5658+++ b/arch/mn10300/proc-mn103e010/include/proc/cache.h
5659@@ -11,12 +11,14 @@
5660 #ifndef _ASM_PROC_CACHE_H
5661 #define _ASM_PROC_CACHE_H
5662
5663+#include <linux/const.h>
5664+
5665 /* L1 cache */
5666
5667 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5668 #define L1_CACHE_NENTRIES 256 /* number of entries in each way */
5669-#define L1_CACHE_BYTES 16 /* bytes per entry */
5670 #define L1_CACHE_SHIFT 4 /* shift for bytes per entry */
5671+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5672 #define L1_CACHE_WAYDISP 0x1000 /* displacement of one way from the next */
5673
5674 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5675diff --git a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5676index bcb5df2..84fabd2 100644
5677--- a/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5678+++ b/arch/mn10300/proc-mn2ws0050/include/proc/cache.h
5679@@ -16,13 +16,15 @@
5680 #ifndef _ASM_PROC_CACHE_H
5681 #define _ASM_PROC_CACHE_H
5682
5683+#include <linux/const.h>
5684+
5685 /*
5686 * L1 cache
5687 */
5688 #define L1_CACHE_NWAYS 4 /* number of ways in caches */
5689 #define L1_CACHE_NENTRIES 128 /* number of entries in each way */
5690-#define L1_CACHE_BYTES 32 /* bytes per entry */
5691 #define L1_CACHE_SHIFT 5 /* shift for bytes per entry */
5692+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT) /* bytes per entry */
5693 #define L1_CACHE_WAYDISP 0x1000 /* distance from one way to the next */
5694
5695 #define L1_CACHE_TAG_VALID 0x00000001 /* cache tag valid bit */
5696diff --git a/arch/openrisc/include/asm/cache.h b/arch/openrisc/include/asm/cache.h
5697index 4ce7a01..449202a 100644
5698--- a/arch/openrisc/include/asm/cache.h
5699+++ b/arch/openrisc/include/asm/cache.h
5700@@ -19,11 +19,13 @@
5701 #ifndef __ASM_OPENRISC_CACHE_H
5702 #define __ASM_OPENRISC_CACHE_H
5703
5704+#include <linux/const.h>
5705+
5706 /* FIXME: How can we replace these with values from the CPU...
5707 * they shouldn't be hard-coded!
5708 */
5709
5710-#define L1_CACHE_BYTES 16
5711 #define L1_CACHE_SHIFT 4
5712+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5713
5714 #endif /* __ASM_OPENRISC_CACHE_H */
5715diff --git a/arch/parisc/include/asm/atomic.h b/arch/parisc/include/asm/atomic.h
5716index 472886c..00e7df9 100644
5717--- a/arch/parisc/include/asm/atomic.h
5718+++ b/arch/parisc/include/asm/atomic.h
5719@@ -252,6 +252,16 @@ static inline long atomic64_dec_if_positive(atomic64_t *v)
5720 return dec;
5721 }
5722
5723+#define atomic64_read_unchecked(v) atomic64_read(v)
5724+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
5725+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
5726+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
5727+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
5728+#define atomic64_inc_unchecked(v) atomic64_inc(v)
5729+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
5730+#define atomic64_dec_unchecked(v) atomic64_dec(v)
5731+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
5732+
5733 #endif /* !CONFIG_64BIT */
5734
5735
5736diff --git a/arch/parisc/include/asm/cache.h b/arch/parisc/include/asm/cache.h
5737index 47f11c7..3420df2 100644
5738--- a/arch/parisc/include/asm/cache.h
5739+++ b/arch/parisc/include/asm/cache.h
5740@@ -5,6 +5,7 @@
5741 #ifndef __ARCH_PARISC_CACHE_H
5742 #define __ARCH_PARISC_CACHE_H
5743
5744+#include <linux/const.h>
5745
5746 /*
5747 * PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
5748@@ -15,13 +16,13 @@
5749 * just ruin performance.
5750 */
5751 #ifdef CONFIG_PA20
5752-#define L1_CACHE_BYTES 64
5753 #define L1_CACHE_SHIFT 6
5754 #else
5755-#define L1_CACHE_BYTES 32
5756 #define L1_CACHE_SHIFT 5
5757 #endif
5758
5759+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
5760+
5761 #ifndef __ASSEMBLY__
5762
5763 #define SMP_CACHE_BYTES L1_CACHE_BYTES
5764diff --git a/arch/parisc/include/asm/elf.h b/arch/parisc/include/asm/elf.h
5765index ad2b503..bdf1651 100644
5766--- a/arch/parisc/include/asm/elf.h
5767+++ b/arch/parisc/include/asm/elf.h
5768@@ -342,6 +342,13 @@ struct pt_regs; /* forward declaration... */
5769
5770 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE + 0x01000000)
5771
5772+#ifdef CONFIG_PAX_ASLR
5773+#define PAX_ELF_ET_DYN_BASE 0x10000UL
5774+
5775+#define PAX_DELTA_MMAP_LEN 16
5776+#define PAX_DELTA_STACK_LEN 16
5777+#endif
5778+
5779 /* This yields a mask that user programs can use to figure out what
5780 instruction set this CPU supports. This could be done in user space,
5781 but it's not easy, and we've already done it here. */
5782diff --git a/arch/parisc/include/asm/pgalloc.h b/arch/parisc/include/asm/pgalloc.h
5783index fc987a1..6e068ef 100644
5784--- a/arch/parisc/include/asm/pgalloc.h
5785+++ b/arch/parisc/include/asm/pgalloc.h
5786@@ -61,6 +61,11 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5787 (__u32)(__pa((unsigned long)pmd) >> PxD_VALUE_SHIFT));
5788 }
5789
5790+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
5791+{
5792+ pgd_populate(mm, pgd, pmd);
5793+}
5794+
5795 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
5796 {
5797 pmd_t *pmd = (pmd_t *)__get_free_pages(GFP_KERNEL|__GFP_REPEAT,
5798@@ -93,6 +98,7 @@ static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
5799 #define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
5800 #define pmd_free(mm, x) do { } while (0)
5801 #define pgd_populate(mm, pmd, pte) BUG()
5802+#define pgd_populate_kernel(mm, pmd, pte) BUG()
5803
5804 #endif
5805
5806diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
5807index 1e40d7f..a3eb445 100644
5808--- a/arch/parisc/include/asm/pgtable.h
5809+++ b/arch/parisc/include/asm/pgtable.h
5810@@ -223,6 +223,17 @@ extern void purge_tlb_entries(struct mm_struct *, unsigned long);
5811 #define PAGE_EXECREAD __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_EXEC |_PAGE_ACCESSED)
5812 #define PAGE_COPY PAGE_EXECREAD
5813 #define PAGE_RWX __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_EXEC |_PAGE_ACCESSED)
5814+
5815+#ifdef CONFIG_PAX_PAGEEXEC
5816+# define PAGE_SHARED_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_WRITE | _PAGE_ACCESSED)
5817+# define PAGE_COPY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5818+# define PAGE_READONLY_NOEXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_READ | _PAGE_ACCESSED)
5819+#else
5820+# define PAGE_SHARED_NOEXEC PAGE_SHARED
5821+# define PAGE_COPY_NOEXEC PAGE_COPY
5822+# define PAGE_READONLY_NOEXEC PAGE_READONLY
5823+#endif
5824+
5825 #define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
5826 #define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL_EXEC)
5827 #define PAGE_KERNEL_RWX __pgprot(_PAGE_KERNEL_RWX)
5828diff --git a/arch/parisc/include/asm/uaccess.h b/arch/parisc/include/asm/uaccess.h
5829index e0a8235..ce2f1e1 100644
5830--- a/arch/parisc/include/asm/uaccess.h
5831+++ b/arch/parisc/include/asm/uaccess.h
5832@@ -245,10 +245,10 @@ static inline unsigned long __must_check copy_from_user(void *to,
5833 const void __user *from,
5834 unsigned long n)
5835 {
5836- int sz = __compiletime_object_size(to);
5837+ size_t sz = __compiletime_object_size(to);
5838 int ret = -EFAULT;
5839
5840- if (likely(sz == -1 || !__builtin_constant_p(n) || sz >= n))
5841+ if (likely(sz == (size_t)-1 || !__builtin_constant_p(n) || sz >= n))
5842 ret = __copy_from_user(to, from, n);
5843 else
5844 copy_from_user_overflow();
5845diff --git a/arch/parisc/kernel/module.c b/arch/parisc/kernel/module.c
5846index 2a625fb..9908930 100644
5847--- a/arch/parisc/kernel/module.c
5848+++ b/arch/parisc/kernel/module.c
5849@@ -98,16 +98,38 @@
5850
5851 /* three functions to determine where in the module core
5852 * or init pieces the location is */
5853+static inline int in_init_rx(struct module *me, void *loc)
5854+{
5855+ return (loc >= me->module_init_rx &&
5856+ loc < (me->module_init_rx + me->init_size_rx));
5857+}
5858+
5859+static inline int in_init_rw(struct module *me, void *loc)
5860+{
5861+ return (loc >= me->module_init_rw &&
5862+ loc < (me->module_init_rw + me->init_size_rw));
5863+}
5864+
5865 static inline int in_init(struct module *me, void *loc)
5866 {
5867- return (loc >= me->module_init &&
5868- loc <= (me->module_init + me->init_size));
5869+ return in_init_rx(me, loc) || in_init_rw(me, loc);
5870+}
5871+
5872+static inline int in_core_rx(struct module *me, void *loc)
5873+{
5874+ return (loc >= me->module_core_rx &&
5875+ loc < (me->module_core_rx + me->core_size_rx));
5876+}
5877+
5878+static inline int in_core_rw(struct module *me, void *loc)
5879+{
5880+ return (loc >= me->module_core_rw &&
5881+ loc < (me->module_core_rw + me->core_size_rw));
5882 }
5883
5884 static inline int in_core(struct module *me, void *loc)
5885 {
5886- return (loc >= me->module_core &&
5887- loc <= (me->module_core + me->core_size));
5888+ return in_core_rx(me, loc) || in_core_rw(me, loc);
5889 }
5890
5891 static inline int in_local(struct module *me, void *loc)
5892@@ -371,13 +393,13 @@ int module_frob_arch_sections(CONST Elf_Ehdr *hdr,
5893 }
5894
5895 /* align things a bit */
5896- me->core_size = ALIGN(me->core_size, 16);
5897- me->arch.got_offset = me->core_size;
5898- me->core_size += gots * sizeof(struct got_entry);
5899+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5900+ me->arch.got_offset = me->core_size_rw;
5901+ me->core_size_rw += gots * sizeof(struct got_entry);
5902
5903- me->core_size = ALIGN(me->core_size, 16);
5904- me->arch.fdesc_offset = me->core_size;
5905- me->core_size += fdescs * sizeof(Elf_Fdesc);
5906+ me->core_size_rw = ALIGN(me->core_size_rw, 16);
5907+ me->arch.fdesc_offset = me->core_size_rw;
5908+ me->core_size_rw += fdescs * sizeof(Elf_Fdesc);
5909
5910 me->arch.got_max = gots;
5911 me->arch.fdesc_max = fdescs;
5912@@ -395,7 +417,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5913
5914 BUG_ON(value == 0);
5915
5916- got = me->module_core + me->arch.got_offset;
5917+ got = me->module_core_rw + me->arch.got_offset;
5918 for (i = 0; got[i].addr; i++)
5919 if (got[i].addr == value)
5920 goto out;
5921@@ -413,7 +435,7 @@ static Elf64_Word get_got(struct module *me, unsigned long value, long addend)
5922 #ifdef CONFIG_64BIT
5923 static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5924 {
5925- Elf_Fdesc *fdesc = me->module_core + me->arch.fdesc_offset;
5926+ Elf_Fdesc *fdesc = me->module_core_rw + me->arch.fdesc_offset;
5927
5928 if (!value) {
5929 printk(KERN_ERR "%s: zero OPD requested!\n", me->name);
5930@@ -431,7 +453,7 @@ static Elf_Addr get_fdesc(struct module *me, unsigned long value)
5931
5932 /* Create new one */
5933 fdesc->addr = value;
5934- fdesc->gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5935+ fdesc->gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5936 return (Elf_Addr)fdesc;
5937 }
5938 #endif /* CONFIG_64BIT */
5939@@ -843,7 +865,7 @@ register_unwind_table(struct module *me,
5940
5941 table = (unsigned char *)sechdrs[me->arch.unwind_section].sh_addr;
5942 end = table + sechdrs[me->arch.unwind_section].sh_size;
5943- gp = (Elf_Addr)me->module_core + me->arch.got_offset;
5944+ gp = (Elf_Addr)me->module_core_rw + me->arch.got_offset;
5945
5946 DEBUGP("register_unwind_table(), sect = %d at 0x%p - 0x%p (gp=0x%lx)\n",
5947 me->arch.unwind_section, table, end, gp);
5948diff --git a/arch/parisc/kernel/sys_parisc.c b/arch/parisc/kernel/sys_parisc.c
5949index 5dfd248..64914ac 100644
5950--- a/arch/parisc/kernel/sys_parisc.c
5951+++ b/arch/parisc/kernel/sys_parisc.c
5952@@ -33,9 +33,11 @@
5953 #include <linux/utsname.h>
5954 #include <linux/personality.h>
5955
5956-static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5957+static unsigned long get_unshared_area(struct file *filp, unsigned long addr, unsigned long len,
5958+ unsigned long flags)
5959 {
5960 struct vm_unmapped_area_info info;
5961+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5962
5963 info.flags = 0;
5964 info.length = len;
5965@@ -43,6 +45,7 @@ static unsigned long get_unshared_area(unsigned long addr, unsigned long len)
5966 info.high_limit = TASK_SIZE;
5967 info.align_mask = 0;
5968 info.align_offset = 0;
5969+ info.threadstack_offset = offset;
5970 return vm_unmapped_area(&info);
5971 }
5972
5973@@ -61,10 +64,11 @@ static int get_offset(struct address_space *mapping)
5974 return (unsigned long) mapping >> 8;
5975 }
5976
5977-static unsigned long get_shared_area(struct address_space *mapping,
5978- unsigned long addr, unsigned long len, unsigned long pgoff)
5979+static unsigned long get_shared_area(struct file *filp, struct address_space *mapping,
5980+ unsigned long addr, unsigned long len, unsigned long pgoff, unsigned long flags)
5981 {
5982 struct vm_unmapped_area_info info;
5983+ unsigned long offset = gr_rand_threadstack_offset(current->mm, filp, flags);
5984
5985 info.flags = 0;
5986 info.length = len;
5987@@ -72,6 +76,7 @@ static unsigned long get_shared_area(struct address_space *mapping,
5988 info.high_limit = TASK_SIZE;
5989 info.align_mask = PAGE_MASK & (SHMLBA - 1);
5990 info.align_offset = (get_offset(mapping) + pgoff) << PAGE_SHIFT;
5991+ info.threadstack_offset = offset;
5992 return vm_unmapped_area(&info);
5993 }
5994
5995@@ -86,15 +91,22 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
5996 return -EINVAL;
5997 return addr;
5998 }
5999- if (!addr)
6000+ if (!addr) {
6001 addr = TASK_UNMAPPED_BASE;
6002
6003+#ifdef CONFIG_PAX_RANDMMAP
6004+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
6005+ addr += current->mm->delta_mmap;
6006+#endif
6007+
6008+ }
6009+
6010 if (filp) {
6011- addr = get_shared_area(filp->f_mapping, addr, len, pgoff);
6012+ addr = get_shared_area(filp, filp->f_mapping, addr, len, pgoff, flags);
6013 } else if(flags & MAP_SHARED) {
6014- addr = get_shared_area(NULL, addr, len, pgoff);
6015+ addr = get_shared_area(filp, NULL, addr, len, pgoff, flags);
6016 } else {
6017- addr = get_unshared_area(addr, len);
6018+ addr = get_unshared_area(filp, addr, len, flags);
6019 }
6020 return addr;
6021 }
6022diff --git a/arch/parisc/kernel/traps.c b/arch/parisc/kernel/traps.c
6023index 04e47c6..7a8faf6 100644
6024--- a/arch/parisc/kernel/traps.c
6025+++ b/arch/parisc/kernel/traps.c
6026@@ -727,9 +727,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
6027
6028 down_read(&current->mm->mmap_sem);
6029 vma = find_vma(current->mm,regs->iaoq[0]);
6030- if (vma && (regs->iaoq[0] >= vma->vm_start)
6031- && (vma->vm_flags & VM_EXEC)) {
6032-
6033+ if (vma && (regs->iaoq[0] >= vma->vm_start)) {
6034 fault_address = regs->iaoq[0];
6035 fault_space = regs->iasq[0];
6036
6037diff --git a/arch/parisc/mm/fault.c b/arch/parisc/mm/fault.c
6038index f247a34..dc0f219 100644
6039--- a/arch/parisc/mm/fault.c
6040+++ b/arch/parisc/mm/fault.c
6041@@ -15,6 +15,7 @@
6042 #include <linux/sched.h>
6043 #include <linux/interrupt.h>
6044 #include <linux/module.h>
6045+#include <linux/unistd.h>
6046
6047 #include <asm/uaccess.h>
6048 #include <asm/traps.h>
6049@@ -52,7 +53,7 @@ DEFINE_PER_CPU(struct exception_data, exception_data);
6050 static unsigned long
6051 parisc_acctyp(unsigned long code, unsigned int inst)
6052 {
6053- if (code == 6 || code == 16)
6054+ if (code == 6 || code == 7 || code == 16)
6055 return VM_EXEC;
6056
6057 switch (inst & 0xf0000000) {
6058@@ -138,6 +139,116 @@ parisc_acctyp(unsigned long code, unsigned int inst)
6059 }
6060 #endif
6061
6062+#ifdef CONFIG_PAX_PAGEEXEC
6063+/*
6064+ * PaX: decide what to do with offenders (instruction_pointer(regs) = fault address)
6065+ *
6066+ * returns 1 when task should be killed
6067+ * 2 when rt_sigreturn trampoline was detected
6068+ * 3 when unpatched PLT trampoline was detected
6069+ */
6070+static int pax_handle_fetch_fault(struct pt_regs *regs)
6071+{
6072+
6073+#ifdef CONFIG_PAX_EMUPLT
6074+ int err;
6075+
6076+ do { /* PaX: unpatched PLT emulation */
6077+ unsigned int bl, depwi;
6078+
6079+ err = get_user(bl, (unsigned int *)instruction_pointer(regs));
6080+ err |= get_user(depwi, (unsigned int *)(instruction_pointer(regs)+4));
6081+
6082+ if (err)
6083+ break;
6084+
6085+ if (bl == 0xEA9F1FDDU && depwi == 0xD6801C1EU) {
6086+ unsigned int ldw, bv, ldw2, addr = instruction_pointer(regs)-12;
6087+
6088+ err = get_user(ldw, (unsigned int *)addr);
6089+ err |= get_user(bv, (unsigned int *)(addr+4));
6090+ err |= get_user(ldw2, (unsigned int *)(addr+8));
6091+
6092+ if (err)
6093+ break;
6094+
6095+ if (ldw == 0x0E801096U &&
6096+ bv == 0xEAC0C000U &&
6097+ ldw2 == 0x0E881095U)
6098+ {
6099+ unsigned int resolver, map;
6100+
6101+ err = get_user(resolver, (unsigned int *)(instruction_pointer(regs)+8));
6102+ err |= get_user(map, (unsigned int *)(instruction_pointer(regs)+12));
6103+ if (err)
6104+ break;
6105+
6106+ regs->gr[20] = instruction_pointer(regs)+8;
6107+ regs->gr[21] = map;
6108+ regs->gr[22] = resolver;
6109+ regs->iaoq[0] = resolver | 3UL;
6110+ regs->iaoq[1] = regs->iaoq[0] + 4;
6111+ return 3;
6112+ }
6113+ }
6114+ } while (0);
6115+#endif
6116+
6117+#ifdef CONFIG_PAX_EMUTRAMP
6118+
6119+#ifndef CONFIG_PAX_EMUSIGRT
6120+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
6121+ return 1;
6122+#endif
6123+
6124+ do { /* PaX: rt_sigreturn emulation */
6125+ unsigned int ldi1, ldi2, bel, nop;
6126+
6127+ err = get_user(ldi1, (unsigned int *)instruction_pointer(regs));
6128+ err |= get_user(ldi2, (unsigned int *)(instruction_pointer(regs)+4));
6129+ err |= get_user(bel, (unsigned int *)(instruction_pointer(regs)+8));
6130+ err |= get_user(nop, (unsigned int *)(instruction_pointer(regs)+12));
6131+
6132+ if (err)
6133+ break;
6134+
6135+ if ((ldi1 == 0x34190000U || ldi1 == 0x34190002U) &&
6136+ ldi2 == 0x3414015AU &&
6137+ bel == 0xE4008200U &&
6138+ nop == 0x08000240U)
6139+ {
6140+ regs->gr[25] = (ldi1 & 2) >> 1;
6141+ regs->gr[20] = __NR_rt_sigreturn;
6142+ regs->gr[31] = regs->iaoq[1] + 16;
6143+ regs->sr[0] = regs->iasq[1];
6144+ regs->iaoq[0] = 0x100UL;
6145+ regs->iaoq[1] = regs->iaoq[0] + 4;
6146+ regs->iasq[0] = regs->sr[2];
6147+ regs->iasq[1] = regs->sr[2];
6148+ return 2;
6149+ }
6150+ } while (0);
6151+#endif
6152+
6153+ return 1;
6154+}
6155+
6156+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
6157+{
6158+ unsigned long i;
6159+
6160+ printk(KERN_ERR "PAX: bytes at PC: ");
6161+ for (i = 0; i < 5; i++) {
6162+ unsigned int c;
6163+ if (get_user(c, (unsigned int *)pc+i))
6164+ printk(KERN_CONT "???????? ");
6165+ else
6166+ printk(KERN_CONT "%08x ", c);
6167+ }
6168+ printk("\n");
6169+}
6170+#endif
6171+
6172 int fixup_exception(struct pt_regs *regs)
6173 {
6174 const struct exception_table_entry *fix;
6175@@ -194,8 +305,33 @@ good_area:
6176
6177 acc_type = parisc_acctyp(code,regs->iir);
6178
6179- if ((vma->vm_flags & acc_type) != acc_type)
6180+ if ((vma->vm_flags & acc_type) != acc_type) {
6181+
6182+#ifdef CONFIG_PAX_PAGEEXEC
6183+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && (acc_type & VM_EXEC) &&
6184+ (address & ~3UL) == instruction_pointer(regs))
6185+ {
6186+ up_read(&mm->mmap_sem);
6187+ switch (pax_handle_fetch_fault(regs)) {
6188+
6189+#ifdef CONFIG_PAX_EMUPLT
6190+ case 3:
6191+ return;
6192+#endif
6193+
6194+#ifdef CONFIG_PAX_EMUTRAMP
6195+ case 2:
6196+ return;
6197+#endif
6198+
6199+ }
6200+ pax_report_fault(regs, (void *)instruction_pointer(regs), (void *)regs->gr[30]);
6201+ do_group_exit(SIGKILL);
6202+ }
6203+#endif
6204+
6205 goto bad_area;
6206+ }
6207
6208 /*
6209 * If for any reason at all we couldn't handle the fault, make
6210diff --git a/arch/powerpc/include/asm/atomic.h b/arch/powerpc/include/asm/atomic.h
6211index e3b1d41..8e81edf 100644
6212--- a/arch/powerpc/include/asm/atomic.h
6213+++ b/arch/powerpc/include/asm/atomic.h
6214@@ -523,6 +523,16 @@ static __inline__ long atomic64_inc_not_zero(atomic64_t *v)
6215 return t1;
6216 }
6217
6218+#define atomic64_read_unchecked(v) atomic64_read(v)
6219+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
6220+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
6221+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
6222+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
6223+#define atomic64_inc_unchecked(v) atomic64_inc(v)
6224+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
6225+#define atomic64_dec_unchecked(v) atomic64_dec(v)
6226+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
6227+
6228 #endif /* __powerpc64__ */
6229
6230 #endif /* __KERNEL__ */
6231diff --git a/arch/powerpc/include/asm/cache.h b/arch/powerpc/include/asm/cache.h
6232index 9e495c9..b6878e5 100644
6233--- a/arch/powerpc/include/asm/cache.h
6234+++ b/arch/powerpc/include/asm/cache.h
6235@@ -3,6 +3,7 @@
6236
6237 #ifdef __KERNEL__
6238
6239+#include <linux/const.h>
6240
6241 /* bytes per L1 cache line */
6242 #if defined(CONFIG_8xx) || defined(CONFIG_403GCX)
6243@@ -22,7 +23,7 @@
6244 #define L1_CACHE_SHIFT 7
6245 #endif
6246
6247-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
6248+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
6249
6250 #define SMP_CACHE_BYTES L1_CACHE_BYTES
6251
6252diff --git a/arch/powerpc/include/asm/elf.h b/arch/powerpc/include/asm/elf.h
6253index cc0655a..13eac2e 100644
6254--- a/arch/powerpc/include/asm/elf.h
6255+++ b/arch/powerpc/include/asm/elf.h
6256@@ -28,8 +28,19 @@
6257 the loader. We need to make sure that it is out of the way of the program
6258 that it will "exec", and that there is sufficient room for the brk. */
6259
6260-extern unsigned long randomize_et_dyn(unsigned long base);
6261-#define ELF_ET_DYN_BASE (randomize_et_dyn(0x20000000))
6262+#define ELF_ET_DYN_BASE (0x20000000)
6263+
6264+#ifdef CONFIG_PAX_ASLR
6265+#define PAX_ELF_ET_DYN_BASE (0x10000000UL)
6266+
6267+#ifdef __powerpc64__
6268+#define PAX_DELTA_MMAP_LEN (is_32bit_task() ? 16 : 28)
6269+#define PAX_DELTA_STACK_LEN (is_32bit_task() ? 16 : 28)
6270+#else
6271+#define PAX_DELTA_MMAP_LEN 15
6272+#define PAX_DELTA_STACK_LEN 15
6273+#endif
6274+#endif
6275
6276 /*
6277 * Our registers are always unsigned longs, whether we're a 32 bit
6278@@ -123,10 +134,6 @@ extern int arch_setup_additional_pages(struct linux_binprm *bprm,
6279 (0x7ff >> (PAGE_SHIFT - 12)) : \
6280 (0x3ffff >> (PAGE_SHIFT - 12)))
6281
6282-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
6283-#define arch_randomize_brk arch_randomize_brk
6284-
6285-
6286 #ifdef CONFIG_SPU_BASE
6287 /* Notes used in ET_CORE. Note name is "SPU/<fd>/<filename>". */
6288 #define NT_SPU 1
6289diff --git a/arch/powerpc/include/asm/exec.h b/arch/powerpc/include/asm/exec.h
6290index 8196e9c..d83a9f3 100644
6291--- a/arch/powerpc/include/asm/exec.h
6292+++ b/arch/powerpc/include/asm/exec.h
6293@@ -4,6 +4,6 @@
6294 #ifndef _ASM_POWERPC_EXEC_H
6295 #define _ASM_POWERPC_EXEC_H
6296
6297-extern unsigned long arch_align_stack(unsigned long sp);
6298+#define arch_align_stack(x) ((x) & ~0xfUL)
6299
6300 #endif /* _ASM_POWERPC_EXEC_H */
6301diff --git a/arch/powerpc/include/asm/kmap_types.h b/arch/powerpc/include/asm/kmap_types.h
6302index 5acabbd..7ea14fa 100644
6303--- a/arch/powerpc/include/asm/kmap_types.h
6304+++ b/arch/powerpc/include/asm/kmap_types.h
6305@@ -10,7 +10,7 @@
6306 * 2 of the License, or (at your option) any later version.
6307 */
6308
6309-#define KM_TYPE_NR 16
6310+#define KM_TYPE_NR 17
6311
6312 #endif /* __KERNEL__ */
6313 #endif /* _ASM_POWERPC_KMAP_TYPES_H */
6314diff --git a/arch/powerpc/include/asm/mman.h b/arch/powerpc/include/asm/mman.h
6315index 8565c25..2865190 100644
6316--- a/arch/powerpc/include/asm/mman.h
6317+++ b/arch/powerpc/include/asm/mman.h
6318@@ -24,7 +24,7 @@ static inline unsigned long arch_calc_vm_prot_bits(unsigned long prot)
6319 }
6320 #define arch_calc_vm_prot_bits(prot) arch_calc_vm_prot_bits(prot)
6321
6322-static inline pgprot_t arch_vm_get_page_prot(unsigned long vm_flags)
6323+static inline pgprot_t arch_vm_get_page_prot(vm_flags_t vm_flags)
6324 {
6325 return (vm_flags & VM_SAO) ? __pgprot(_PAGE_SAO) : __pgprot(0);
6326 }
6327diff --git a/arch/powerpc/include/asm/page.h b/arch/powerpc/include/asm/page.h
6328index 988c812..63c7d70 100644
6329--- a/arch/powerpc/include/asm/page.h
6330+++ b/arch/powerpc/include/asm/page.h
6331@@ -220,8 +220,9 @@ extern long long virt_phys_offset;
6332 * and needs to be executable. This means the whole heap ends
6333 * up being executable.
6334 */
6335-#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6336- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6337+#define VM_DATA_DEFAULT_FLAGS32 \
6338+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6339+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6340
6341 #define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6342 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6343@@ -249,6 +250,9 @@ extern long long virt_phys_offset;
6344 #define is_kernel_addr(x) ((x) >= PAGE_OFFSET)
6345 #endif
6346
6347+#define ktla_ktva(addr) (addr)
6348+#define ktva_ktla(addr) (addr)
6349+
6350 #ifndef CONFIG_PPC_BOOK3S_64
6351 /*
6352 * Use the top bit of the higher-level page table entries to indicate whether
6353diff --git a/arch/powerpc/include/asm/page_64.h b/arch/powerpc/include/asm/page_64.h
6354index 88693ce..ac6f9ab 100644
6355--- a/arch/powerpc/include/asm/page_64.h
6356+++ b/arch/powerpc/include/asm/page_64.h
6357@@ -153,15 +153,18 @@ do { \
6358 * stack by default, so in the absence of a PT_GNU_STACK program header
6359 * we turn execute permission off.
6360 */
6361-#define VM_STACK_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
6362- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6363+#define VM_STACK_DEFAULT_FLAGS32 \
6364+ (((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
6365+ VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6366
6367 #define VM_STACK_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
6368 VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
6369
6370+#ifndef CONFIG_PAX_PAGEEXEC
6371 #define VM_STACK_DEFAULT_FLAGS \
6372 (is_32bit_task() ? \
6373 VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
6374+#endif
6375
6376 #include <asm-generic/getorder.h>
6377
6378diff --git a/arch/powerpc/include/asm/pgalloc-64.h b/arch/powerpc/include/asm/pgalloc-64.h
6379index b66ae72..4a378cd 100644
6380--- a/arch/powerpc/include/asm/pgalloc-64.h
6381+++ b/arch/powerpc/include/asm/pgalloc-64.h
6382@@ -53,6 +53,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
6383 #ifndef CONFIG_PPC_64K_PAGES
6384
6385 #define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
6386+#define pgd_populate_kernel(MM, PGD, PUD) pgd_populate((MM), (PGD), (PUD))
6387
6388 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
6389 {
6390@@ -70,6 +71,11 @@ static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6391 pud_set(pud, (unsigned long)pmd);
6392 }
6393
6394+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
6395+{
6396+ pud_populate(mm, pud, pmd);
6397+}
6398+
6399 #define pmd_populate(mm, pmd, pte_page) \
6400 pmd_populate_kernel(mm, pmd, page_address(pte_page))
6401 #define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
6402@@ -171,6 +177,7 @@ extern void __tlb_remove_table(void *_table);
6403 #endif
6404
6405 #define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
6406+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
6407
6408 static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
6409 pte_t *pte)
6410diff --git a/arch/powerpc/include/asm/pgtable.h b/arch/powerpc/include/asm/pgtable.h
6411index 7aeb955..19f748e 100644
6412--- a/arch/powerpc/include/asm/pgtable.h
6413+++ b/arch/powerpc/include/asm/pgtable.h
6414@@ -2,6 +2,7 @@
6415 #define _ASM_POWERPC_PGTABLE_H
6416 #ifdef __KERNEL__
6417
6418+#include <linux/const.h>
6419 #ifndef __ASSEMBLY__
6420 #include <asm/processor.h> /* For TASK_SIZE */
6421 #include <asm/mmu.h>
6422diff --git a/arch/powerpc/include/asm/pte-hash32.h b/arch/powerpc/include/asm/pte-hash32.h
6423index 4aad413..85d86bf 100644
6424--- a/arch/powerpc/include/asm/pte-hash32.h
6425+++ b/arch/powerpc/include/asm/pte-hash32.h
6426@@ -21,6 +21,7 @@
6427 #define _PAGE_FILE 0x004 /* when !present: nonlinear file mapping */
6428 #define _PAGE_USER 0x004 /* usermode access allowed */
6429 #define _PAGE_GUARDED 0x008 /* G: prohibit speculative access */
6430+#define _PAGE_EXEC _PAGE_GUARDED
6431 #define _PAGE_COHERENT 0x010 /* M: enforce memory coherence (SMP systems) */
6432 #define _PAGE_NO_CACHE 0x020 /* I: cache inhibit */
6433 #define _PAGE_WRITETHRU 0x040 /* W: cache write-through */
6434diff --git a/arch/powerpc/include/asm/reg.h b/arch/powerpc/include/asm/reg.h
6435index 362142b..8b22c1b 100644
6436--- a/arch/powerpc/include/asm/reg.h
6437+++ b/arch/powerpc/include/asm/reg.h
6438@@ -234,6 +234,7 @@
6439 #define SPRN_DBCR 0x136 /* e300 Data Breakpoint Control Reg */
6440 #define SPRN_DSISR 0x012 /* Data Storage Interrupt Status Register */
6441 #define DSISR_NOHPTE 0x40000000 /* no translation found */
6442+#define DSISR_GUARDED 0x10000000 /* fetch from guarded storage */
6443 #define DSISR_PROTFAULT 0x08000000 /* protection fault */
6444 #define DSISR_ISSTORE 0x02000000 /* access was a store */
6445 #define DSISR_DABRMATCH 0x00400000 /* hit data breakpoint */
6446diff --git a/arch/powerpc/include/asm/smp.h b/arch/powerpc/include/asm/smp.h
6447index ffbaabe..eabe843 100644
6448--- a/arch/powerpc/include/asm/smp.h
6449+++ b/arch/powerpc/include/asm/smp.h
6450@@ -50,7 +50,7 @@ struct smp_ops_t {
6451 int (*cpu_disable)(void);
6452 void (*cpu_die)(unsigned int nr);
6453 int (*cpu_bootable)(unsigned int nr);
6454-};
6455+} __no_const;
6456
6457 extern void smp_send_debugger_break(void);
6458 extern void start_secondary_resume(void);
6459diff --git a/arch/powerpc/include/asm/thread_info.h b/arch/powerpc/include/asm/thread_info.h
6460index ba7b197..d292e26 100644
6461--- a/arch/powerpc/include/asm/thread_info.h
6462+++ b/arch/powerpc/include/asm/thread_info.h
6463@@ -93,7 +93,6 @@ static inline struct thread_info *current_thread_info(void)
6464 #define TIF_POLLING_NRFLAG 3 /* true if poll_idle() is polling
6465 TIF_NEED_RESCHED */
6466 #define TIF_32BIT 4 /* 32 bit binary */
6467-#define TIF_PERFMON_WORK 5 /* work for pfm_handle_work() */
6468 #define TIF_PERFMON_CTXSW 6 /* perfmon needs ctxsw calls */
6469 #define TIF_SYSCALL_AUDIT 7 /* syscall auditing active */
6470 #define TIF_SINGLESTEP 8 /* singlestepping active */
6471@@ -107,6 +106,9 @@ static inline struct thread_info *current_thread_info(void)
6472 #define TIF_EMULATE_STACK_STORE 16 /* Is an instruction emulation
6473 for stack store? */
6474 #define TIF_MEMDIE 17 /* is terminating due to OOM killer */
6475+#define TIF_PERFMON_WORK 18 /* work for pfm_handle_work() */
6476+/* mask must be expressable within 16 bits to satisfy 'andi' instruction reqs */
6477+#define TIF_GRSEC_SETXID 5 /* update credentials on syscall entry/exit */
6478
6479 /* as above, but as bit values */
6480 #define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
6481@@ -126,9 +128,10 @@ static inline struct thread_info *current_thread_info(void)
6482 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
6483 #define _TIF_EMULATE_STACK_STORE (1<<TIF_EMULATE_STACK_STORE)
6484 #define _TIF_NOHZ (1<<TIF_NOHZ)
6485+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
6486 #define _TIF_SYSCALL_T_OR_A (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
6487 _TIF_SECCOMP | _TIF_SYSCALL_TRACEPOINT | \
6488- _TIF_NOHZ)
6489+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
6490
6491 #define _TIF_USER_WORK_MASK (_TIF_SIGPENDING | _TIF_NEED_RESCHED | \
6492 _TIF_NOTIFY_RESUME | _TIF_UPROBE)
6493diff --git a/arch/powerpc/include/asm/uaccess.h b/arch/powerpc/include/asm/uaccess.h
6494index 4db4959..aba5c41 100644
6495--- a/arch/powerpc/include/asm/uaccess.h
6496+++ b/arch/powerpc/include/asm/uaccess.h
6497@@ -318,52 +318,6 @@ do { \
6498 extern unsigned long __copy_tofrom_user(void __user *to,
6499 const void __user *from, unsigned long size);
6500
6501-#ifndef __powerpc64__
6502-
6503-static inline unsigned long copy_from_user(void *to,
6504- const void __user *from, unsigned long n)
6505-{
6506- unsigned long over;
6507-
6508- if (access_ok(VERIFY_READ, from, n))
6509- return __copy_tofrom_user((__force void __user *)to, from, n);
6510- if ((unsigned long)from < TASK_SIZE) {
6511- over = (unsigned long)from + n - TASK_SIZE;
6512- return __copy_tofrom_user((__force void __user *)to, from,
6513- n - over) + over;
6514- }
6515- return n;
6516-}
6517-
6518-static inline unsigned long copy_to_user(void __user *to,
6519- const void *from, unsigned long n)
6520-{
6521- unsigned long over;
6522-
6523- if (access_ok(VERIFY_WRITE, to, n))
6524- return __copy_tofrom_user(to, (__force void __user *)from, n);
6525- if ((unsigned long)to < TASK_SIZE) {
6526- over = (unsigned long)to + n - TASK_SIZE;
6527- return __copy_tofrom_user(to, (__force void __user *)from,
6528- n - over) + over;
6529- }
6530- return n;
6531-}
6532-
6533-#else /* __powerpc64__ */
6534-
6535-#define __copy_in_user(to, from, size) \
6536- __copy_tofrom_user((to), (from), (size))
6537-
6538-extern unsigned long copy_from_user(void *to, const void __user *from,
6539- unsigned long n);
6540-extern unsigned long copy_to_user(void __user *to, const void *from,
6541- unsigned long n);
6542-extern unsigned long copy_in_user(void __user *to, const void __user *from,
6543- unsigned long n);
6544-
6545-#endif /* __powerpc64__ */
6546-
6547 static inline unsigned long __copy_from_user_inatomic(void *to,
6548 const void __user *from, unsigned long n)
6549 {
6550@@ -387,6 +341,10 @@ static inline unsigned long __copy_from_user_inatomic(void *to,
6551 if (ret == 0)
6552 return 0;
6553 }
6554+
6555+ if (!__builtin_constant_p(n))
6556+ check_object_size(to, n, false);
6557+
6558 return __copy_tofrom_user((__force void __user *)to, from, n);
6559 }
6560
6561@@ -413,6 +371,10 @@ static inline unsigned long __copy_to_user_inatomic(void __user *to,
6562 if (ret == 0)
6563 return 0;
6564 }
6565+
6566+ if (!__builtin_constant_p(n))
6567+ check_object_size(from, n, true);
6568+
6569 return __copy_tofrom_user(to, (__force const void __user *)from, n);
6570 }
6571
6572@@ -430,6 +392,92 @@ static inline unsigned long __copy_to_user(void __user *to,
6573 return __copy_to_user_inatomic(to, from, size);
6574 }
6575
6576+#ifndef __powerpc64__
6577+
6578+static inline unsigned long __must_check copy_from_user(void *to,
6579+ const void __user *from, unsigned long n)
6580+{
6581+ unsigned long over;
6582+
6583+ if ((long)n < 0)
6584+ return n;
6585+
6586+ if (access_ok(VERIFY_READ, from, n)) {
6587+ if (!__builtin_constant_p(n))
6588+ check_object_size(to, n, false);
6589+ return __copy_tofrom_user((__force void __user *)to, from, n);
6590+ }
6591+ if ((unsigned long)from < TASK_SIZE) {
6592+ over = (unsigned long)from + n - TASK_SIZE;
6593+ if (!__builtin_constant_p(n - over))
6594+ check_object_size(to, n - over, false);
6595+ return __copy_tofrom_user((__force void __user *)to, from,
6596+ n - over) + over;
6597+ }
6598+ return n;
6599+}
6600+
6601+static inline unsigned long __must_check copy_to_user(void __user *to,
6602+ const void *from, unsigned long n)
6603+{
6604+ unsigned long over;
6605+
6606+ if ((long)n < 0)
6607+ return n;
6608+
6609+ if (access_ok(VERIFY_WRITE, to, n)) {
6610+ if (!__builtin_constant_p(n))
6611+ check_object_size(from, n, true);
6612+ return __copy_tofrom_user(to, (__force void __user *)from, n);
6613+ }
6614+ if ((unsigned long)to < TASK_SIZE) {
6615+ over = (unsigned long)to + n - TASK_SIZE;
6616+ if (!__builtin_constant_p(n))
6617+ check_object_size(from, n - over, true);
6618+ return __copy_tofrom_user(to, (__force void __user *)from,
6619+ n - over) + over;
6620+ }
6621+ return n;
6622+}
6623+
6624+#else /* __powerpc64__ */
6625+
6626+#define __copy_in_user(to, from, size) \
6627+ __copy_tofrom_user((to), (from), (size))
6628+
6629+static inline unsigned long __must_check copy_from_user(void *to, const void __user *from, unsigned long n)
6630+{
6631+ if ((long)n < 0 || n > INT_MAX)
6632+ return n;
6633+
6634+ if (!__builtin_constant_p(n))
6635+ check_object_size(to, n, false);
6636+
6637+ if (likely(access_ok(VERIFY_READ, from, n)))
6638+ n = __copy_from_user(to, from, n);
6639+ else
6640+ memset(to, 0, n);
6641+ return n;
6642+}
6643+
6644+static inline unsigned long __must_check copy_to_user(void __user *to, const void *from, unsigned long n)
6645+{
6646+ if ((long)n < 0 || n > INT_MAX)
6647+ return n;
6648+
6649+ if (likely(access_ok(VERIFY_WRITE, to, n))) {
6650+ if (!__builtin_constant_p(n))
6651+ check_object_size(from, n, true);
6652+ n = __copy_to_user(to, from, n);
6653+ }
6654+ return n;
6655+}
6656+
6657+extern unsigned long copy_in_user(void __user *to, const void __user *from,
6658+ unsigned long n);
6659+
6660+#endif /* __powerpc64__ */
6661+
6662 extern unsigned long __clear_user(void __user *addr, unsigned long size);
6663
6664 static inline unsigned long clear_user(void __user *addr, unsigned long size)
6665diff --git a/arch/powerpc/kernel/exceptions-64e.S b/arch/powerpc/kernel/exceptions-64e.S
6666index 645170a..6cf0271 100644
6667--- a/arch/powerpc/kernel/exceptions-64e.S
6668+++ b/arch/powerpc/kernel/exceptions-64e.S
6669@@ -757,6 +757,7 @@ storage_fault_common:
6670 std r14,_DAR(r1)
6671 std r15,_DSISR(r1)
6672 addi r3,r1,STACK_FRAME_OVERHEAD
6673+ bl .save_nvgprs
6674 mr r4,r14
6675 mr r5,r15
6676 ld r14,PACA_EXGEN+EX_R14(r13)
6677@@ -765,8 +766,7 @@ storage_fault_common:
6678 cmpdi r3,0
6679 bne- 1f
6680 b .ret_from_except_lite
6681-1: bl .save_nvgprs
6682- mr r5,r3
6683+1: mr r5,r3
6684 addi r3,r1,STACK_FRAME_OVERHEAD
6685 ld r4,_DAR(r1)
6686 bl .bad_page_fault
6687diff --git a/arch/powerpc/kernel/exceptions-64s.S b/arch/powerpc/kernel/exceptions-64s.S
6688index 4e00d22..b26abcc 100644
6689--- a/arch/powerpc/kernel/exceptions-64s.S
6690+++ b/arch/powerpc/kernel/exceptions-64s.S
6691@@ -1356,10 +1356,10 @@ handle_page_fault:
6692 11: ld r4,_DAR(r1)
6693 ld r5,_DSISR(r1)
6694 addi r3,r1,STACK_FRAME_OVERHEAD
6695+ bl .save_nvgprs
6696 bl .do_page_fault
6697 cmpdi r3,0
6698 beq+ 12f
6699- bl .save_nvgprs
6700 mr r5,r3
6701 addi r3,r1,STACK_FRAME_OVERHEAD
6702 lwz r4,_DAR(r1)
6703diff --git a/arch/powerpc/kernel/module_32.c b/arch/powerpc/kernel/module_32.c
6704index 2e3200c..72095ce 100644
6705--- a/arch/powerpc/kernel/module_32.c
6706+++ b/arch/powerpc/kernel/module_32.c
6707@@ -162,7 +162,7 @@ int module_frob_arch_sections(Elf32_Ehdr *hdr,
6708 me->arch.core_plt_section = i;
6709 }
6710 if (!me->arch.core_plt_section || !me->arch.init_plt_section) {
6711- printk("Module doesn't contain .plt or .init.plt sections.\n");
6712+ printk("Module %s doesn't contain .plt or .init.plt sections.\n", me->name);
6713 return -ENOEXEC;
6714 }
6715
6716@@ -192,11 +192,16 @@ static uint32_t do_plt_call(void *location,
6717
6718 DEBUGP("Doing plt for call to 0x%x at 0x%x\n", val, (unsigned int)location);
6719 /* Init, or core PLT? */
6720- if (location >= mod->module_core
6721- && location < mod->module_core + mod->core_size)
6722+ if ((location >= mod->module_core_rx && location < mod->module_core_rx + mod->core_size_rx) ||
6723+ (location >= mod->module_core_rw && location < mod->module_core_rw + mod->core_size_rw))
6724 entry = (void *)sechdrs[mod->arch.core_plt_section].sh_addr;
6725- else
6726+ else if ((location >= mod->module_init_rx && location < mod->module_init_rx + mod->init_size_rx) ||
6727+ (location >= mod->module_init_rw && location < mod->module_init_rw + mod->init_size_rw))
6728 entry = (void *)sechdrs[mod->arch.init_plt_section].sh_addr;
6729+ else {
6730+ printk(KERN_ERR "%s: invalid R_PPC_REL24 entry found\n", mod->name);
6731+ return ~0UL;
6732+ }
6733
6734 /* Find this entry, or if that fails, the next avail. entry */
6735 while (entry->jump[0]) {
6736diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
6737index 076d124..6cb2cbf 100644
6738--- a/arch/powerpc/kernel/process.c
6739+++ b/arch/powerpc/kernel/process.c
6740@@ -874,8 +874,8 @@ void show_regs(struct pt_regs * regs)
6741 * Lookup NIP late so we have the best change of getting the
6742 * above info out without failing
6743 */
6744- printk("NIP ["REG"] %pS\n", regs->nip, (void *)regs->nip);
6745- printk("LR ["REG"] %pS\n", regs->link, (void *)regs->link);
6746+ printk("NIP ["REG"] %pA\n", regs->nip, (void *)regs->nip);
6747+ printk("LR ["REG"] %pA\n", regs->link, (void *)regs->link);
6748 #endif
6749 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
6750 printk("PACATMSCRATCH [%llx]\n", get_paca()->tm_scratch);
6751@@ -1335,10 +1335,10 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6752 newsp = stack[0];
6753 ip = stack[STACK_FRAME_LR_SAVE];
6754 if (!firstframe || ip != lr) {
6755- printk("["REG"] ["REG"] %pS", sp, ip, (void *)ip);
6756+ printk("["REG"] ["REG"] %pA", sp, ip, (void *)ip);
6757 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
6758 if ((ip == rth || ip == mrth) && curr_frame >= 0) {
6759- printk(" (%pS)",
6760+ printk(" (%pA)",
6761 (void *)current->ret_stack[curr_frame].ret);
6762 curr_frame--;
6763 }
6764@@ -1358,7 +1358,7 @@ void show_stack(struct task_struct *tsk, unsigned long *stack)
6765 struct pt_regs *regs = (struct pt_regs *)
6766 (sp + STACK_FRAME_OVERHEAD);
6767 lr = regs->link;
6768- printk("--- Exception: %lx at %pS\n LR = %pS\n",
6769+ printk("--- Exception: %lx at %pA\n LR = %pA\n",
6770 regs->trap, (void *)regs->nip, (void *)lr);
6771 firstframe = 1;
6772 }
6773@@ -1394,58 +1394,3 @@ void notrace __ppc64_runlatch_off(void)
6774 mtspr(SPRN_CTRLT, ctrl);
6775 }
6776 #endif /* CONFIG_PPC64 */
6777-
6778-unsigned long arch_align_stack(unsigned long sp)
6779-{
6780- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
6781- sp -= get_random_int() & ~PAGE_MASK;
6782- return sp & ~0xf;
6783-}
6784-
6785-static inline unsigned long brk_rnd(void)
6786-{
6787- unsigned long rnd = 0;
6788-
6789- /* 8MB for 32bit, 1GB for 64bit */
6790- if (is_32bit_task())
6791- rnd = (long)(get_random_int() % (1<<(23-PAGE_SHIFT)));
6792- else
6793- rnd = (long)(get_random_int() % (1<<(30-PAGE_SHIFT)));
6794-
6795- return rnd << PAGE_SHIFT;
6796-}
6797-
6798-unsigned long arch_randomize_brk(struct mm_struct *mm)
6799-{
6800- unsigned long base = mm->brk;
6801- unsigned long ret;
6802-
6803-#ifdef CONFIG_PPC_STD_MMU_64
6804- /*
6805- * If we are using 1TB segments and we are allowed to randomise
6806- * the heap, we can put it above 1TB so it is backed by a 1TB
6807- * segment. Otherwise the heap will be in the bottom 1TB
6808- * which always uses 256MB segments and this may result in a
6809- * performance penalty.
6810- */
6811- if (!is_32bit_task() && (mmu_highuser_ssize == MMU_SEGSIZE_1T))
6812- base = max_t(unsigned long, mm->brk, 1UL << SID_SHIFT_1T);
6813-#endif
6814-
6815- ret = PAGE_ALIGN(base + brk_rnd());
6816-
6817- if (ret < mm->brk)
6818- return mm->brk;
6819-
6820- return ret;
6821-}
6822-
6823-unsigned long randomize_et_dyn(unsigned long base)
6824-{
6825- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
6826-
6827- if (ret < base)
6828- return base;
6829-
6830- return ret;
6831-}
6832diff --git a/arch/powerpc/kernel/ptrace.c b/arch/powerpc/kernel/ptrace.c
6833index 64f7bd5..8dd550f 100644
6834--- a/arch/powerpc/kernel/ptrace.c
6835+++ b/arch/powerpc/kernel/ptrace.c
6836@@ -1783,6 +1783,10 @@ long arch_ptrace(struct task_struct *child, long request,
6837 return ret;
6838 }
6839
6840+#ifdef CONFIG_GRKERNSEC_SETXID
6841+extern void gr_delayed_cred_worker(void);
6842+#endif
6843+
6844 /*
6845 * We must return the syscall number to actually look up in the table.
6846 * This can be -1L to skip running any syscall at all.
6847@@ -1795,6 +1799,11 @@ long do_syscall_trace_enter(struct pt_regs *regs)
6848
6849 secure_computing_strict(regs->gpr[0]);
6850
6851+#ifdef CONFIG_GRKERNSEC_SETXID
6852+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6853+ gr_delayed_cred_worker();
6854+#endif
6855+
6856 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
6857 tracehook_report_syscall_entry(regs))
6858 /*
6859@@ -1829,6 +1838,11 @@ void do_syscall_trace_leave(struct pt_regs *regs)
6860 {
6861 int step;
6862
6863+#ifdef CONFIG_GRKERNSEC_SETXID
6864+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
6865+ gr_delayed_cred_worker();
6866+#endif
6867+
6868 audit_syscall_exit(regs);
6869
6870 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
6871diff --git a/arch/powerpc/kernel/signal_32.c b/arch/powerpc/kernel/signal_32.c
6872index 0f83122..c0aca6a 100644
6873--- a/arch/powerpc/kernel/signal_32.c
6874+++ b/arch/powerpc/kernel/signal_32.c
6875@@ -987,7 +987,7 @@ int handle_rt_signal32(unsigned long sig, struct k_sigaction *ka,
6876 /* Save user registers on the stack */
6877 frame = &rt_sf->uc.uc_mcontext;
6878 addr = frame;
6879- if (vdso32_rt_sigtramp && current->mm->context.vdso_base) {
6880+ if (vdso32_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6881 sigret = 0;
6882 tramp = current->mm->context.vdso_base + vdso32_rt_sigtramp;
6883 } else {
6884diff --git a/arch/powerpc/kernel/signal_64.c b/arch/powerpc/kernel/signal_64.c
6885index 887e99d..310bc11 100644
6886--- a/arch/powerpc/kernel/signal_64.c
6887+++ b/arch/powerpc/kernel/signal_64.c
6888@@ -751,7 +751,7 @@ int handle_rt_signal64(int signr, struct k_sigaction *ka, siginfo_t *info,
6889 #endif
6890
6891 /* Set up to return from userspace. */
6892- if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
6893+ if (vdso64_rt_sigtramp && current->mm->context.vdso_base != ~0UL) {
6894 regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
6895 } else {
6896 err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
6897diff --git a/arch/powerpc/kernel/sysfs.c b/arch/powerpc/kernel/sysfs.c
6898index e68a845..8b140e6 100644
6899--- a/arch/powerpc/kernel/sysfs.c
6900+++ b/arch/powerpc/kernel/sysfs.c
6901@@ -522,7 +522,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
6902 return NOTIFY_OK;
6903 }
6904
6905-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
6906+static struct notifier_block sysfs_cpu_nb = {
6907 .notifier_call = sysfs_cpu_notify,
6908 };
6909
6910diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c
6911index e4f205a..8bfffb8 100644
6912--- a/arch/powerpc/kernel/traps.c
6913+++ b/arch/powerpc/kernel/traps.c
6914@@ -143,6 +143,8 @@ static unsigned __kprobes long oops_begin(struct pt_regs *regs)
6915 return flags;
6916 }
6917
6918+extern void gr_handle_kernel_exploit(void);
6919+
6920 static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6921 int signr)
6922 {
6923@@ -192,6 +194,9 @@ static void __kprobes oops_end(unsigned long flags, struct pt_regs *regs,
6924 panic("Fatal exception in interrupt");
6925 if (panic_on_oops)
6926 panic("Fatal exception");
6927+
6928+ gr_handle_kernel_exploit();
6929+
6930 do_exit(signr);
6931 }
6932
6933diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c
6934index d4f463a..8fb7431 100644
6935--- a/arch/powerpc/kernel/vdso.c
6936+++ b/arch/powerpc/kernel/vdso.c
6937@@ -34,6 +34,7 @@
6938 #include <asm/firmware.h>
6939 #include <asm/vdso.h>
6940 #include <asm/vdso_datapage.h>
6941+#include <asm/mman.h>
6942
6943 #include "setup.h"
6944
6945@@ -222,7 +223,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6946 vdso_base = VDSO32_MBASE;
6947 #endif
6948
6949- current->mm->context.vdso_base = 0;
6950+ current->mm->context.vdso_base = ~0UL;
6951
6952 /* vDSO has a problem and was disabled, just don't "enable" it for the
6953 * process
6954@@ -242,7 +243,7 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
6955 vdso_base = get_unmapped_area(NULL, vdso_base,
6956 (vdso_pages << PAGE_SHIFT) +
6957 ((VDSO_ALIGNMENT - 1) & PAGE_MASK),
6958- 0, 0);
6959+ 0, MAP_PRIVATE | MAP_EXECUTABLE);
6960 if (IS_ERR_VALUE(vdso_base)) {
6961 rc = vdso_base;
6962 goto fail_mmapsem;
6963diff --git a/arch/powerpc/lib/usercopy_64.c b/arch/powerpc/lib/usercopy_64.c
6964index 5eea6f3..5d10396 100644
6965--- a/arch/powerpc/lib/usercopy_64.c
6966+++ b/arch/powerpc/lib/usercopy_64.c
6967@@ -9,22 +9,6 @@
6968 #include <linux/module.h>
6969 #include <asm/uaccess.h>
6970
6971-unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
6972-{
6973- if (likely(access_ok(VERIFY_READ, from, n)))
6974- n = __copy_from_user(to, from, n);
6975- else
6976- memset(to, 0, n);
6977- return n;
6978-}
6979-
6980-unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
6981-{
6982- if (likely(access_ok(VERIFY_WRITE, to, n)))
6983- n = __copy_to_user(to, from, n);
6984- return n;
6985-}
6986-
6987 unsigned long copy_in_user(void __user *to, const void __user *from,
6988 unsigned long n)
6989 {
6990@@ -35,7 +19,5 @@ unsigned long copy_in_user(void __user *to, const void __user *from,
6991 return n;
6992 }
6993
6994-EXPORT_SYMBOL(copy_from_user);
6995-EXPORT_SYMBOL(copy_to_user);
6996 EXPORT_SYMBOL(copy_in_user);
6997
6998diff --git a/arch/powerpc/mm/fault.c b/arch/powerpc/mm/fault.c
6999index 8726779..a33c512 100644
7000--- a/arch/powerpc/mm/fault.c
7001+++ b/arch/powerpc/mm/fault.c
7002@@ -33,6 +33,10 @@
7003 #include <linux/magic.h>
7004 #include <linux/ratelimit.h>
7005 #include <linux/context_tracking.h>
7006+#include <linux/slab.h>
7007+#include <linux/pagemap.h>
7008+#include <linux/compiler.h>
7009+#include <linux/unistd.h>
7010
7011 #include <asm/firmware.h>
7012 #include <asm/page.h>
7013@@ -69,6 +73,33 @@ static inline int notify_page_fault(struct pt_regs *regs)
7014 }
7015 #endif
7016
7017+#ifdef CONFIG_PAX_PAGEEXEC
7018+/*
7019+ * PaX: decide what to do with offenders (regs->nip = fault address)
7020+ *
7021+ * returns 1 when task should be killed
7022+ */
7023+static int pax_handle_fetch_fault(struct pt_regs *regs)
7024+{
7025+ return 1;
7026+}
7027+
7028+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
7029+{
7030+ unsigned long i;
7031+
7032+ printk(KERN_ERR "PAX: bytes at PC: ");
7033+ for (i = 0; i < 5; i++) {
7034+ unsigned int c;
7035+ if (get_user(c, (unsigned int __user *)pc+i))
7036+ printk(KERN_CONT "???????? ");
7037+ else
7038+ printk(KERN_CONT "%08x ", c);
7039+ }
7040+ printk("\n");
7041+}
7042+#endif
7043+
7044 /*
7045 * Check whether the instruction at regs->nip is a store using
7046 * an update addressing form which will update r1.
7047@@ -216,7 +247,7 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
7048 * indicate errors in DSISR but can validly be set in SRR1.
7049 */
7050 if (trap == 0x400)
7051- error_code &= 0x48200000;
7052+ error_code &= 0x58200000;
7053 else
7054 is_write = error_code & DSISR_ISSTORE;
7055 #else
7056@@ -371,7 +402,7 @@ good_area:
7057 * "undefined". Of those that can be set, this is the only
7058 * one which seems bad.
7059 */
7060- if (error_code & 0x10000000)
7061+ if (error_code & DSISR_GUARDED)
7062 /* Guarded storage error. */
7063 goto bad_area;
7064 #endif /* CONFIG_8xx */
7065@@ -386,7 +417,7 @@ good_area:
7066 * processors use the same I/D cache coherency mechanism
7067 * as embedded.
7068 */
7069- if (error_code & DSISR_PROTFAULT)
7070+ if (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))
7071 goto bad_area;
7072 #endif /* CONFIG_PPC_STD_MMU */
7073
7074@@ -471,6 +502,23 @@ bad_area:
7075 bad_area_nosemaphore:
7076 /* User mode accesses cause a SIGSEGV */
7077 if (user_mode(regs)) {
7078+
7079+#ifdef CONFIG_PAX_PAGEEXEC
7080+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
7081+#ifdef CONFIG_PPC_STD_MMU
7082+ if (is_exec && (error_code & (DSISR_PROTFAULT | DSISR_GUARDED))) {
7083+#else
7084+ if (is_exec && regs->nip == address) {
7085+#endif
7086+ switch (pax_handle_fetch_fault(regs)) {
7087+ }
7088+
7089+ pax_report_fault(regs, (void *)regs->nip, (void *)regs->gpr[PT_R1]);
7090+ do_group_exit(SIGKILL);
7091+ }
7092+ }
7093+#endif
7094+
7095 _exception(SIGSEGV, regs, code, address);
7096 goto bail;
7097 }
7098diff --git a/arch/powerpc/mm/mmap_64.c b/arch/powerpc/mm/mmap_64.c
7099index 67a42ed..cd463e0 100644
7100--- a/arch/powerpc/mm/mmap_64.c
7101+++ b/arch/powerpc/mm/mmap_64.c
7102@@ -57,6 +57,10 @@ static unsigned long mmap_rnd(void)
7103 {
7104 unsigned long rnd = 0;
7105
7106+#ifdef CONFIG_PAX_RANDMMAP
7107+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7108+#endif
7109+
7110 if (current->flags & PF_RANDOMIZE) {
7111 /* 8MB for 32bit, 1GB for 64bit */
7112 if (is_32bit_task())
7113@@ -91,10 +95,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7114 */
7115 if (mmap_is_legacy()) {
7116 mm->mmap_base = TASK_UNMAPPED_BASE;
7117+
7118+#ifdef CONFIG_PAX_RANDMMAP
7119+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7120+ mm->mmap_base += mm->delta_mmap;
7121+#endif
7122+
7123 mm->get_unmapped_area = arch_get_unmapped_area;
7124 mm->unmap_area = arch_unmap_area;
7125 } else {
7126 mm->mmap_base = mmap_base();
7127+
7128+#ifdef CONFIG_PAX_RANDMMAP
7129+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7130+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7131+#endif
7132+
7133 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7134 mm->unmap_area = arch_unmap_area_topdown;
7135 }
7136diff --git a/arch/powerpc/mm/mmu_context_nohash.c b/arch/powerpc/mm/mmu_context_nohash.c
7137index e779642..e5bb889 100644
7138--- a/arch/powerpc/mm/mmu_context_nohash.c
7139+++ b/arch/powerpc/mm/mmu_context_nohash.c
7140@@ -363,7 +363,7 @@ static int __cpuinit mmu_context_cpu_notify(struct notifier_block *self,
7141 return NOTIFY_OK;
7142 }
7143
7144-static struct notifier_block __cpuinitdata mmu_context_cpu_nb = {
7145+static struct notifier_block mmu_context_cpu_nb = {
7146 .notifier_call = mmu_context_cpu_notify,
7147 };
7148
7149diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c
7150index 2859a1f..74f9a6e 100644
7151--- a/arch/powerpc/mm/numa.c
7152+++ b/arch/powerpc/mm/numa.c
7153@@ -919,7 +919,7 @@ static void __init *careful_zallocation(int nid, unsigned long size,
7154 return ret;
7155 }
7156
7157-static struct notifier_block __cpuinitdata ppc64_numa_nb = {
7158+static struct notifier_block ppc64_numa_nb = {
7159 .notifier_call = cpu_numa_callback,
7160 .priority = 1 /* Must run before sched domains notifier. */
7161 };
7162diff --git a/arch/powerpc/mm/slice.c b/arch/powerpc/mm/slice.c
7163index 3e99c14..f00953c 100644
7164--- a/arch/powerpc/mm/slice.c
7165+++ b/arch/powerpc/mm/slice.c
7166@@ -103,7 +103,7 @@ static int slice_area_is_free(struct mm_struct *mm, unsigned long addr,
7167 if ((mm->task_size - len) < addr)
7168 return 0;
7169 vma = find_vma(mm, addr);
7170- return (!vma || (addr + len) <= vma->vm_start);
7171+ return check_heap_stack_gap(vma, addr, len, 0);
7172 }
7173
7174 static int slice_low_has_vma(struct mm_struct *mm, unsigned long slice)
7175@@ -277,6 +277,12 @@ static unsigned long slice_find_area_bottomup(struct mm_struct *mm,
7176 info.align_offset = 0;
7177
7178 addr = TASK_UNMAPPED_BASE;
7179+
7180+#ifdef CONFIG_PAX_RANDMMAP
7181+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7182+ addr += mm->delta_mmap;
7183+#endif
7184+
7185 while (addr < TASK_SIZE) {
7186 info.low_limit = addr;
7187 if (!slice_scan_available(addr, available, 1, &addr))
7188@@ -410,6 +416,11 @@ unsigned long slice_get_unmapped_area(unsigned long addr, unsigned long len,
7189 if (fixed && addr > (mm->task_size - len))
7190 return -EINVAL;
7191
7192+#ifdef CONFIG_PAX_RANDMMAP
7193+ if (!fixed && (mm->pax_flags & MF_PAX_RANDMMAP))
7194+ addr = 0;
7195+#endif
7196+
7197 /* If hint, make sure it matches our alignment restrictions */
7198 if (!fixed && addr) {
7199 addr = _ALIGN_UP(addr, 1ul << pshift);
7200diff --git a/arch/powerpc/platforms/cell/spufs/file.c b/arch/powerpc/platforms/cell/spufs/file.c
7201index 9098692..3d54cd1 100644
7202--- a/arch/powerpc/platforms/cell/spufs/file.c
7203+++ b/arch/powerpc/platforms/cell/spufs/file.c
7204@@ -280,9 +280,9 @@ spufs_mem_mmap_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
7205 return VM_FAULT_NOPAGE;
7206 }
7207
7208-static int spufs_mem_mmap_access(struct vm_area_struct *vma,
7209+static ssize_t spufs_mem_mmap_access(struct vm_area_struct *vma,
7210 unsigned long address,
7211- void *buf, int len, int write)
7212+ void *buf, size_t len, int write)
7213 {
7214 struct spu_context *ctx = vma->vm_file->private_data;
7215 unsigned long offset = address - vma->vm_start;
7216diff --git a/arch/powerpc/platforms/powermac/smp.c b/arch/powerpc/platforms/powermac/smp.c
7217index bdb738a..49c9f95 100644
7218--- a/arch/powerpc/platforms/powermac/smp.c
7219+++ b/arch/powerpc/platforms/powermac/smp.c
7220@@ -885,7 +885,7 @@ static int smp_core99_cpu_notify(struct notifier_block *self,
7221 return NOTIFY_OK;
7222 }
7223
7224-static struct notifier_block __cpuinitdata smp_core99_cpu_nb = {
7225+static struct notifier_block smp_core99_cpu_nb = {
7226 .notifier_call = smp_core99_cpu_notify,
7227 };
7228 #endif /* CONFIG_HOTPLUG_CPU */
7229diff --git a/arch/s390/include/asm/atomic.h b/arch/s390/include/asm/atomic.h
7230index c797832..ce575c8 100644
7231--- a/arch/s390/include/asm/atomic.h
7232+++ b/arch/s390/include/asm/atomic.h
7233@@ -326,6 +326,16 @@ static inline long long atomic64_dec_if_positive(atomic64_t *v)
7234 #define atomic64_dec_and_test(_v) (atomic64_sub_return(1, _v) == 0)
7235 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7236
7237+#define atomic64_read_unchecked(v) atomic64_read(v)
7238+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
7239+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
7240+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
7241+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
7242+#define atomic64_inc_unchecked(v) atomic64_inc(v)
7243+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
7244+#define atomic64_dec_unchecked(v) atomic64_dec(v)
7245+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
7246+
7247 #define smp_mb__before_atomic_dec() smp_mb()
7248 #define smp_mb__after_atomic_dec() smp_mb()
7249 #define smp_mb__before_atomic_inc() smp_mb()
7250diff --git a/arch/s390/include/asm/cache.h b/arch/s390/include/asm/cache.h
7251index 4d7ccac..d03d0ad 100644
7252--- a/arch/s390/include/asm/cache.h
7253+++ b/arch/s390/include/asm/cache.h
7254@@ -9,8 +9,10 @@
7255 #ifndef __ARCH_S390_CACHE_H
7256 #define __ARCH_S390_CACHE_H
7257
7258-#define L1_CACHE_BYTES 256
7259+#include <linux/const.h>
7260+
7261 #define L1_CACHE_SHIFT 8
7262+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7263 #define NET_SKB_PAD 32
7264
7265 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7266diff --git a/arch/s390/include/asm/elf.h b/arch/s390/include/asm/elf.h
7267index 78f4f87..598ce39 100644
7268--- a/arch/s390/include/asm/elf.h
7269+++ b/arch/s390/include/asm/elf.h
7270@@ -162,8 +162,14 @@ extern unsigned int vdso_enabled;
7271 the loader. We need to make sure that it is out of the way of the program
7272 that it will "exec", and that there is sufficient room for the brk. */
7273
7274-extern unsigned long randomize_et_dyn(unsigned long base);
7275-#define ELF_ET_DYN_BASE (randomize_et_dyn(STACK_TOP / 3 * 2))
7276+#define ELF_ET_DYN_BASE (STACK_TOP / 3 * 2)
7277+
7278+#ifdef CONFIG_PAX_ASLR
7279+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_31BIT) ? 0x10000UL : 0x80000000UL)
7280+
7281+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7282+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_31BIT) ? 15 : 26)
7283+#endif
7284
7285 /* This yields a mask that user programs can use to figure out what
7286 instruction set this CPU supports. */
7287@@ -222,9 +228,6 @@ struct linux_binprm;
7288 #define ARCH_HAS_SETUP_ADDITIONAL_PAGES 1
7289 int arch_setup_additional_pages(struct linux_binprm *, int);
7290
7291-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
7292-#define arch_randomize_brk arch_randomize_brk
7293-
7294 void *fill_cpu_elf_notes(void *ptr, struct save_area *sa);
7295
7296 #endif
7297diff --git a/arch/s390/include/asm/exec.h b/arch/s390/include/asm/exec.h
7298index c4a93d6..4d2a9b4 100644
7299--- a/arch/s390/include/asm/exec.h
7300+++ b/arch/s390/include/asm/exec.h
7301@@ -7,6 +7,6 @@
7302 #ifndef __ASM_EXEC_H
7303 #define __ASM_EXEC_H
7304
7305-extern unsigned long arch_align_stack(unsigned long sp);
7306+#define arch_align_stack(x) ((x) & ~0xfUL)
7307
7308 #endif /* __ASM_EXEC_H */
7309diff --git a/arch/s390/include/asm/uaccess.h b/arch/s390/include/asm/uaccess.h
7310index 9c33ed4..e40cbef 100644
7311--- a/arch/s390/include/asm/uaccess.h
7312+++ b/arch/s390/include/asm/uaccess.h
7313@@ -252,6 +252,10 @@ static inline unsigned long __must_check
7314 copy_to_user(void __user *to, const void *from, unsigned long n)
7315 {
7316 might_fault();
7317+
7318+ if ((long)n < 0)
7319+ return n;
7320+
7321 return __copy_to_user(to, from, n);
7322 }
7323
7324@@ -275,6 +279,9 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
7325 static inline unsigned long __must_check
7326 __copy_from_user(void *to, const void __user *from, unsigned long n)
7327 {
7328+ if ((long)n < 0)
7329+ return n;
7330+
7331 if (__builtin_constant_p(n) && (n <= 256))
7332 return uaccess.copy_from_user_small(n, from, to);
7333 else
7334@@ -306,10 +313,14 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
7335 static inline unsigned long __must_check
7336 copy_from_user(void *to, const void __user *from, unsigned long n)
7337 {
7338- unsigned int sz = __compiletime_object_size(to);
7339+ size_t sz = __compiletime_object_size(to);
7340
7341 might_fault();
7342- if (unlikely(sz != -1 && sz < n)) {
7343+
7344+ if ((long)n < 0)
7345+ return n;
7346+
7347+ if (unlikely(sz != (size_t)-1 && sz < n)) {
7348 copy_from_user_overflow();
7349 return n;
7350 }
7351diff --git a/arch/s390/kernel/module.c b/arch/s390/kernel/module.c
7352index 7845e15..59c4353 100644
7353--- a/arch/s390/kernel/module.c
7354+++ b/arch/s390/kernel/module.c
7355@@ -169,11 +169,11 @@ int module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
7356
7357 /* Increase core size by size of got & plt and set start
7358 offsets for got and plt. */
7359- me->core_size = ALIGN(me->core_size, 4);
7360- me->arch.got_offset = me->core_size;
7361- me->core_size += me->arch.got_size;
7362- me->arch.plt_offset = me->core_size;
7363- me->core_size += me->arch.plt_size;
7364+ me->core_size_rw = ALIGN(me->core_size_rw, 4);
7365+ me->arch.got_offset = me->core_size_rw;
7366+ me->core_size_rw += me->arch.got_size;
7367+ me->arch.plt_offset = me->core_size_rx;
7368+ me->core_size_rx += me->arch.plt_size;
7369 return 0;
7370 }
7371
7372@@ -289,7 +289,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7373 if (info->got_initialized == 0) {
7374 Elf_Addr *gotent;
7375
7376- gotent = me->module_core + me->arch.got_offset +
7377+ gotent = me->module_core_rw + me->arch.got_offset +
7378 info->got_offset;
7379 *gotent = val;
7380 info->got_initialized = 1;
7381@@ -312,7 +312,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7382 rc = apply_rela_bits(loc, val, 0, 64, 0);
7383 else if (r_type == R_390_GOTENT ||
7384 r_type == R_390_GOTPLTENT) {
7385- val += (Elf_Addr) me->module_core - loc;
7386+ val += (Elf_Addr) me->module_core_rw - loc;
7387 rc = apply_rela_bits(loc, val, 1, 32, 1);
7388 }
7389 break;
7390@@ -325,7 +325,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7391 case R_390_PLTOFF64: /* 16 bit offset from GOT to PLT. */
7392 if (info->plt_initialized == 0) {
7393 unsigned int *ip;
7394- ip = me->module_core + me->arch.plt_offset +
7395+ ip = me->module_core_rx + me->arch.plt_offset +
7396 info->plt_offset;
7397 #ifndef CONFIG_64BIT
7398 ip[0] = 0x0d105810; /* basr 1,0; l 1,6(1); br 1 */
7399@@ -350,7 +350,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7400 val - loc + 0xffffUL < 0x1ffffeUL) ||
7401 (r_type == R_390_PLT32DBL &&
7402 val - loc + 0xffffffffULL < 0x1fffffffeULL)))
7403- val = (Elf_Addr) me->module_core +
7404+ val = (Elf_Addr) me->module_core_rx +
7405 me->arch.plt_offset +
7406 info->plt_offset;
7407 val += rela->r_addend - loc;
7408@@ -372,7 +372,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7409 case R_390_GOTOFF32: /* 32 bit offset to GOT. */
7410 case R_390_GOTOFF64: /* 64 bit offset to GOT. */
7411 val = val + rela->r_addend -
7412- ((Elf_Addr) me->module_core + me->arch.got_offset);
7413+ ((Elf_Addr) me->module_core_rw + me->arch.got_offset);
7414 if (r_type == R_390_GOTOFF16)
7415 rc = apply_rela_bits(loc, val, 0, 16, 0);
7416 else if (r_type == R_390_GOTOFF32)
7417@@ -382,7 +382,7 @@ static int apply_rela(Elf_Rela *rela, Elf_Addr base, Elf_Sym *symtab,
7418 break;
7419 case R_390_GOTPC: /* 32 bit PC relative offset to GOT. */
7420 case R_390_GOTPCDBL: /* 32 bit PC rel. off. to GOT shifted by 1. */
7421- val = (Elf_Addr) me->module_core + me->arch.got_offset +
7422+ val = (Elf_Addr) me->module_core_rw + me->arch.got_offset +
7423 rela->r_addend - loc;
7424 if (r_type == R_390_GOTPC)
7425 rc = apply_rela_bits(loc, val, 1, 32, 0);
7426diff --git a/arch/s390/kernel/process.c b/arch/s390/kernel/process.c
7427index 2bc3edd..ab9d598 100644
7428--- a/arch/s390/kernel/process.c
7429+++ b/arch/s390/kernel/process.c
7430@@ -236,39 +236,3 @@ unsigned long get_wchan(struct task_struct *p)
7431 }
7432 return 0;
7433 }
7434-
7435-unsigned long arch_align_stack(unsigned long sp)
7436-{
7437- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
7438- sp -= get_random_int() & ~PAGE_MASK;
7439- return sp & ~0xf;
7440-}
7441-
7442-static inline unsigned long brk_rnd(void)
7443-{
7444- /* 8MB for 32bit, 1GB for 64bit */
7445- if (is_32bit_task())
7446- return (get_random_int() & 0x7ffUL) << PAGE_SHIFT;
7447- else
7448- return (get_random_int() & 0x3ffffUL) << PAGE_SHIFT;
7449-}
7450-
7451-unsigned long arch_randomize_brk(struct mm_struct *mm)
7452-{
7453- unsigned long ret = PAGE_ALIGN(mm->brk + brk_rnd());
7454-
7455- if (ret < mm->brk)
7456- return mm->brk;
7457- return ret;
7458-}
7459-
7460-unsigned long randomize_et_dyn(unsigned long base)
7461-{
7462- unsigned long ret = PAGE_ALIGN(base + brk_rnd());
7463-
7464- if (!(current->flags & PF_RANDOMIZE))
7465- return base;
7466- if (ret < base)
7467- return base;
7468- return ret;
7469-}
7470diff --git a/arch/s390/mm/mmap.c b/arch/s390/mm/mmap.c
7471index 06bafec..2bca531 100644
7472--- a/arch/s390/mm/mmap.c
7473+++ b/arch/s390/mm/mmap.c
7474@@ -90,10 +90,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7475 */
7476 if (mmap_is_legacy()) {
7477 mm->mmap_base = TASK_UNMAPPED_BASE;
7478+
7479+#ifdef CONFIG_PAX_RANDMMAP
7480+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7481+ mm->mmap_base += mm->delta_mmap;
7482+#endif
7483+
7484 mm->get_unmapped_area = arch_get_unmapped_area;
7485 mm->unmap_area = arch_unmap_area;
7486 } else {
7487 mm->mmap_base = mmap_base();
7488+
7489+#ifdef CONFIG_PAX_RANDMMAP
7490+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7491+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7492+#endif
7493+
7494 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
7495 mm->unmap_area = arch_unmap_area_topdown;
7496 }
7497@@ -175,10 +187,22 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
7498 */
7499 if (mmap_is_legacy()) {
7500 mm->mmap_base = TASK_UNMAPPED_BASE;
7501+
7502+#ifdef CONFIG_PAX_RANDMMAP
7503+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7504+ mm->mmap_base += mm->delta_mmap;
7505+#endif
7506+
7507 mm->get_unmapped_area = s390_get_unmapped_area;
7508 mm->unmap_area = arch_unmap_area;
7509 } else {
7510 mm->mmap_base = mmap_base();
7511+
7512+#ifdef CONFIG_PAX_RANDMMAP
7513+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7514+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
7515+#endif
7516+
7517 mm->get_unmapped_area = s390_get_unmapped_area_topdown;
7518 mm->unmap_area = arch_unmap_area_topdown;
7519 }
7520diff --git a/arch/score/include/asm/cache.h b/arch/score/include/asm/cache.h
7521index ae3d59f..f65f075 100644
7522--- a/arch/score/include/asm/cache.h
7523+++ b/arch/score/include/asm/cache.h
7524@@ -1,7 +1,9 @@
7525 #ifndef _ASM_SCORE_CACHE_H
7526 #define _ASM_SCORE_CACHE_H
7527
7528+#include <linux/const.h>
7529+
7530 #define L1_CACHE_SHIFT 4
7531-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7532+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7533
7534 #endif /* _ASM_SCORE_CACHE_H */
7535diff --git a/arch/score/include/asm/exec.h b/arch/score/include/asm/exec.h
7536index f9f3cd5..58ff438 100644
7537--- a/arch/score/include/asm/exec.h
7538+++ b/arch/score/include/asm/exec.h
7539@@ -1,6 +1,6 @@
7540 #ifndef _ASM_SCORE_EXEC_H
7541 #define _ASM_SCORE_EXEC_H
7542
7543-extern unsigned long arch_align_stack(unsigned long sp);
7544+#define arch_align_stack(x) (x)
7545
7546 #endif /* _ASM_SCORE_EXEC_H */
7547diff --git a/arch/score/kernel/process.c b/arch/score/kernel/process.c
7548index f4c6d02..e9355c3 100644
7549--- a/arch/score/kernel/process.c
7550+++ b/arch/score/kernel/process.c
7551@@ -116,8 +116,3 @@ unsigned long get_wchan(struct task_struct *task)
7552
7553 return task_pt_regs(task)->cp0_epc;
7554 }
7555-
7556-unsigned long arch_align_stack(unsigned long sp)
7557-{
7558- return sp;
7559-}
7560diff --git a/arch/sh/include/asm/cache.h b/arch/sh/include/asm/cache.h
7561index ef9e555..331bd29 100644
7562--- a/arch/sh/include/asm/cache.h
7563+++ b/arch/sh/include/asm/cache.h
7564@@ -9,10 +9,11 @@
7565 #define __ASM_SH_CACHE_H
7566 #ifdef __KERNEL__
7567
7568+#include <linux/const.h>
7569 #include <linux/init.h>
7570 #include <cpu/cache.h>
7571
7572-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
7573+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7574
7575 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
7576
7577diff --git a/arch/sh/kernel/cpu/sh4a/smp-shx3.c b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7578index 03f2b55..b0270327 100644
7579--- a/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7580+++ b/arch/sh/kernel/cpu/sh4a/smp-shx3.c
7581@@ -143,7 +143,7 @@ shx3_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
7582 return NOTIFY_OK;
7583 }
7584
7585-static struct notifier_block __cpuinitdata shx3_cpu_notifier = {
7586+static struct notifier_block shx3_cpu_notifier = {
7587 .notifier_call = shx3_cpu_callback,
7588 };
7589
7590diff --git a/arch/sh/mm/mmap.c b/arch/sh/mm/mmap.c
7591index 6777177..cb5e44f 100644
7592--- a/arch/sh/mm/mmap.c
7593+++ b/arch/sh/mm/mmap.c
7594@@ -36,6 +36,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7595 struct mm_struct *mm = current->mm;
7596 struct vm_area_struct *vma;
7597 int do_colour_align;
7598+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7599 struct vm_unmapped_area_info info;
7600
7601 if (flags & MAP_FIXED) {
7602@@ -55,6 +56,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7603 if (filp || (flags & MAP_SHARED))
7604 do_colour_align = 1;
7605
7606+#ifdef CONFIG_PAX_RANDMMAP
7607+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7608+#endif
7609+
7610 if (addr) {
7611 if (do_colour_align)
7612 addr = COLOUR_ALIGN(addr, pgoff);
7613@@ -62,14 +67,13 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr,
7614 addr = PAGE_ALIGN(addr);
7615
7616 vma = find_vma(mm, addr);
7617- if (TASK_SIZE - len >= addr &&
7618- (!vma || addr + len <= vma->vm_start))
7619+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7620 return addr;
7621 }
7622
7623 info.flags = 0;
7624 info.length = len;
7625- info.low_limit = TASK_UNMAPPED_BASE;
7626+ info.low_limit = mm->mmap_base;
7627 info.high_limit = TASK_SIZE;
7628 info.align_mask = do_colour_align ? (PAGE_MASK & shm_align_mask) : 0;
7629 info.align_offset = pgoff << PAGE_SHIFT;
7630@@ -85,6 +89,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7631 struct mm_struct *mm = current->mm;
7632 unsigned long addr = addr0;
7633 int do_colour_align;
7634+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
7635 struct vm_unmapped_area_info info;
7636
7637 if (flags & MAP_FIXED) {
7638@@ -104,6 +109,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7639 if (filp || (flags & MAP_SHARED))
7640 do_colour_align = 1;
7641
7642+#ifdef CONFIG_PAX_RANDMMAP
7643+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
7644+#endif
7645+
7646 /* requesting a specific address */
7647 if (addr) {
7648 if (do_colour_align)
7649@@ -112,8 +121,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7650 addr = PAGE_ALIGN(addr);
7651
7652 vma = find_vma(mm, addr);
7653- if (TASK_SIZE - len >= addr &&
7654- (!vma || addr + len <= vma->vm_start))
7655+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
7656 return addr;
7657 }
7658
7659@@ -135,6 +143,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
7660 VM_BUG_ON(addr != -ENOMEM);
7661 info.flags = 0;
7662 info.low_limit = TASK_UNMAPPED_BASE;
7663+
7664+#ifdef CONFIG_PAX_RANDMMAP
7665+ if (mm->pax_flags & MF_PAX_RANDMMAP)
7666+ info.low_limit += mm->delta_mmap;
7667+#endif
7668+
7669 info.high_limit = TASK_SIZE;
7670 addr = vm_unmapped_area(&info);
7671 }
7672diff --git a/arch/sparc/include/asm/atomic_64.h b/arch/sparc/include/asm/atomic_64.h
7673index be56a24..443328f 100644
7674--- a/arch/sparc/include/asm/atomic_64.h
7675+++ b/arch/sparc/include/asm/atomic_64.h
7676@@ -14,18 +14,40 @@
7677 #define ATOMIC64_INIT(i) { (i) }
7678
7679 #define atomic_read(v) (*(volatile int *)&(v)->counter)
7680+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
7681+{
7682+ return v->counter;
7683+}
7684 #define atomic64_read(v) (*(volatile long *)&(v)->counter)
7685+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
7686+{
7687+ return v->counter;
7688+}
7689
7690 #define atomic_set(v, i) (((v)->counter) = i)
7691+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
7692+{
7693+ v->counter = i;
7694+}
7695 #define atomic64_set(v, i) (((v)->counter) = i)
7696+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
7697+{
7698+ v->counter = i;
7699+}
7700
7701 extern void atomic_add(int, atomic_t *);
7702+extern void atomic_add_unchecked(int, atomic_unchecked_t *);
7703 extern void atomic64_add(long, atomic64_t *);
7704+extern void atomic64_add_unchecked(long, atomic64_unchecked_t *);
7705 extern void atomic_sub(int, atomic_t *);
7706+extern void atomic_sub_unchecked(int, atomic_unchecked_t *);
7707 extern void atomic64_sub(long, atomic64_t *);
7708+extern void atomic64_sub_unchecked(long, atomic64_unchecked_t *);
7709
7710 extern int atomic_add_ret(int, atomic_t *);
7711+extern int atomic_add_ret_unchecked(int, atomic_unchecked_t *);
7712 extern long atomic64_add_ret(long, atomic64_t *);
7713+extern long atomic64_add_ret_unchecked(long, atomic64_unchecked_t *);
7714 extern int atomic_sub_ret(int, atomic_t *);
7715 extern long atomic64_sub_ret(long, atomic64_t *);
7716
7717@@ -33,13 +55,29 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7718 #define atomic64_dec_return(v) atomic64_sub_ret(1, v)
7719
7720 #define atomic_inc_return(v) atomic_add_ret(1, v)
7721+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
7722+{
7723+ return atomic_add_ret_unchecked(1, v);
7724+}
7725 #define atomic64_inc_return(v) atomic64_add_ret(1, v)
7726+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
7727+{
7728+ return atomic64_add_ret_unchecked(1, v);
7729+}
7730
7731 #define atomic_sub_return(i, v) atomic_sub_ret(i, v)
7732 #define atomic64_sub_return(i, v) atomic64_sub_ret(i, v)
7733
7734 #define atomic_add_return(i, v) atomic_add_ret(i, v)
7735+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
7736+{
7737+ return atomic_add_ret_unchecked(i, v);
7738+}
7739 #define atomic64_add_return(i, v) atomic64_add_ret(i, v)
7740+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
7741+{
7742+ return atomic64_add_ret_unchecked(i, v);
7743+}
7744
7745 /*
7746 * atomic_inc_and_test - increment and test
7747@@ -50,6 +88,10 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7748 * other cases.
7749 */
7750 #define atomic_inc_and_test(v) (atomic_inc_return(v) == 0)
7751+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
7752+{
7753+ return atomic_inc_return_unchecked(v) == 0;
7754+}
7755 #define atomic64_inc_and_test(v) (atomic64_inc_return(v) == 0)
7756
7757 #define atomic_sub_and_test(i, v) (atomic_sub_ret(i, v) == 0)
7758@@ -59,25 +101,60 @@ extern long atomic64_sub_ret(long, atomic64_t *);
7759 #define atomic64_dec_and_test(v) (atomic64_sub_ret(1, v) == 0)
7760
7761 #define atomic_inc(v) atomic_add(1, v)
7762+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
7763+{
7764+ atomic_add_unchecked(1, v);
7765+}
7766 #define atomic64_inc(v) atomic64_add(1, v)
7767+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
7768+{
7769+ atomic64_add_unchecked(1, v);
7770+}
7771
7772 #define atomic_dec(v) atomic_sub(1, v)
7773+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
7774+{
7775+ atomic_sub_unchecked(1, v);
7776+}
7777 #define atomic64_dec(v) atomic64_sub(1, v)
7778+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
7779+{
7780+ atomic64_sub_unchecked(1, v);
7781+}
7782
7783 #define atomic_add_negative(i, v) (atomic_add_ret(i, v) < 0)
7784 #define atomic64_add_negative(i, v) (atomic64_add_ret(i, v) < 0)
7785
7786 #define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
7787+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
7788+{
7789+ return cmpxchg(&v->counter, old, new);
7790+}
7791 #define atomic_xchg(v, new) (xchg(&((v)->counter), new))
7792+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
7793+{
7794+ return xchg(&v->counter, new);
7795+}
7796
7797 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7798 {
7799- int c, old;
7800+ int c, old, new;
7801 c = atomic_read(v);
7802 for (;;) {
7803- if (unlikely(c == (u)))
7804+ if (unlikely(c == u))
7805 break;
7806- old = atomic_cmpxchg((v), c, c + (a));
7807+
7808+ asm volatile("addcc %2, %0, %0\n"
7809+
7810+#ifdef CONFIG_PAX_REFCOUNT
7811+ "tvs %%icc, 6\n"
7812+#endif
7813+
7814+ : "=r" (new)
7815+ : "0" (c), "ir" (a)
7816+ : "cc");
7817+
7818+ old = atomic_cmpxchg(v, c, new);
7819 if (likely(old == c))
7820 break;
7821 c = old;
7822@@ -88,20 +165,35 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
7823 #define atomic64_cmpxchg(v, o, n) \
7824 ((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
7825 #define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
7826+static inline long atomic64_xchg_unchecked(atomic64_unchecked_t *v, long new)
7827+{
7828+ return xchg(&v->counter, new);
7829+}
7830
7831 static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
7832 {
7833- long c, old;
7834+ long c, old, new;
7835 c = atomic64_read(v);
7836 for (;;) {
7837- if (unlikely(c == (u)))
7838+ if (unlikely(c == u))
7839 break;
7840- old = atomic64_cmpxchg((v), c, c + (a));
7841+
7842+ asm volatile("addcc %2, %0, %0\n"
7843+
7844+#ifdef CONFIG_PAX_REFCOUNT
7845+ "tvs %%xcc, 6\n"
7846+#endif
7847+
7848+ : "=r" (new)
7849+ : "0" (c), "ir" (a)
7850+ : "cc");
7851+
7852+ old = atomic64_cmpxchg(v, c, new);
7853 if (likely(old == c))
7854 break;
7855 c = old;
7856 }
7857- return c != (u);
7858+ return c != u;
7859 }
7860
7861 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
7862diff --git a/arch/sparc/include/asm/cache.h b/arch/sparc/include/asm/cache.h
7863index 5bb6991..5c2132e 100644
7864--- a/arch/sparc/include/asm/cache.h
7865+++ b/arch/sparc/include/asm/cache.h
7866@@ -7,10 +7,12 @@
7867 #ifndef _SPARC_CACHE_H
7868 #define _SPARC_CACHE_H
7869
7870+#include <linux/const.h>
7871+
7872 #define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
7873
7874 #define L1_CACHE_SHIFT 5
7875-#define L1_CACHE_BYTES 32
7876+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
7877
7878 #ifdef CONFIG_SPARC32
7879 #define SMP_CACHE_BYTES_SHIFT 5
7880diff --git a/arch/sparc/include/asm/elf_32.h b/arch/sparc/include/asm/elf_32.h
7881index a24e41f..47677ff 100644
7882--- a/arch/sparc/include/asm/elf_32.h
7883+++ b/arch/sparc/include/asm/elf_32.h
7884@@ -114,6 +114,13 @@ typedef struct {
7885
7886 #define ELF_ET_DYN_BASE (TASK_UNMAPPED_BASE)
7887
7888+#ifdef CONFIG_PAX_ASLR
7889+#define PAX_ELF_ET_DYN_BASE 0x10000UL
7890+
7891+#define PAX_DELTA_MMAP_LEN 16
7892+#define PAX_DELTA_STACK_LEN 16
7893+#endif
7894+
7895 /* This yields a mask that user programs can use to figure out what
7896 instruction set this cpu supports. This can NOT be done in userspace
7897 on Sparc. */
7898diff --git a/arch/sparc/include/asm/elf_64.h b/arch/sparc/include/asm/elf_64.h
7899index 370ca1e..d4f4a98 100644
7900--- a/arch/sparc/include/asm/elf_64.h
7901+++ b/arch/sparc/include/asm/elf_64.h
7902@@ -189,6 +189,13 @@ typedef struct {
7903 #define ELF_ET_DYN_BASE 0x0000010000000000UL
7904 #define COMPAT_ELF_ET_DYN_BASE 0x0000000070000000UL
7905
7906+#ifdef CONFIG_PAX_ASLR
7907+#define PAX_ELF_ET_DYN_BASE (test_thread_flag(TIF_32BIT) ? 0x10000UL : 0x100000UL)
7908+
7909+#define PAX_DELTA_MMAP_LEN (test_thread_flag(TIF_32BIT) ? 14 : 28)
7910+#define PAX_DELTA_STACK_LEN (test_thread_flag(TIF_32BIT) ? 15 : 29)
7911+#endif
7912+
7913 extern unsigned long sparc64_elf_hwcap;
7914 #define ELF_HWCAP sparc64_elf_hwcap
7915
7916diff --git a/arch/sparc/include/asm/pgalloc_32.h b/arch/sparc/include/asm/pgalloc_32.h
7917index 9b1c36d..209298b 100644
7918--- a/arch/sparc/include/asm/pgalloc_32.h
7919+++ b/arch/sparc/include/asm/pgalloc_32.h
7920@@ -33,6 +33,7 @@ static inline void pgd_set(pgd_t * pgdp, pmd_t * pmdp)
7921 }
7922
7923 #define pgd_populate(MM, PGD, PMD) pgd_set(PGD, PMD)
7924+#define pgd_populate_kernel(MM, PGD, PMD) pgd_populate((MM), (PGD), (PMD))
7925
7926 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm,
7927 unsigned long address)
7928diff --git a/arch/sparc/include/asm/pgalloc_64.h b/arch/sparc/include/asm/pgalloc_64.h
7929index bcfe063..b333142 100644
7930--- a/arch/sparc/include/asm/pgalloc_64.h
7931+++ b/arch/sparc/include/asm/pgalloc_64.h
7932@@ -26,6 +26,7 @@ static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
7933 }
7934
7935 #define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
7936+#define pud_populate_kernel(MM, PUD, PMD) pud_populate((MM), (PUD), (PMD))
7937
7938 static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
7939 {
7940diff --git a/arch/sparc/include/asm/pgtable_32.h b/arch/sparc/include/asm/pgtable_32.h
7941index 6fc1348..390c50a 100644
7942--- a/arch/sparc/include/asm/pgtable_32.h
7943+++ b/arch/sparc/include/asm/pgtable_32.h
7944@@ -50,6 +50,9 @@ extern unsigned long calc_highpages(void);
7945 #define PAGE_SHARED SRMMU_PAGE_SHARED
7946 #define PAGE_COPY SRMMU_PAGE_COPY
7947 #define PAGE_READONLY SRMMU_PAGE_RDONLY
7948+#define PAGE_SHARED_NOEXEC SRMMU_PAGE_SHARED_NOEXEC
7949+#define PAGE_COPY_NOEXEC SRMMU_PAGE_COPY_NOEXEC
7950+#define PAGE_READONLY_NOEXEC SRMMU_PAGE_RDONLY_NOEXEC
7951 #define PAGE_KERNEL SRMMU_PAGE_KERNEL
7952
7953 /* Top-level page directory - dummy used by init-mm.
7954@@ -62,18 +65,18 @@ extern unsigned long ptr_in_current_pgd;
7955
7956 /* xwr */
7957 #define __P000 PAGE_NONE
7958-#define __P001 PAGE_READONLY
7959-#define __P010 PAGE_COPY
7960-#define __P011 PAGE_COPY
7961+#define __P001 PAGE_READONLY_NOEXEC
7962+#define __P010 PAGE_COPY_NOEXEC
7963+#define __P011 PAGE_COPY_NOEXEC
7964 #define __P100 PAGE_READONLY
7965 #define __P101 PAGE_READONLY
7966 #define __P110 PAGE_COPY
7967 #define __P111 PAGE_COPY
7968
7969 #define __S000 PAGE_NONE
7970-#define __S001 PAGE_READONLY
7971-#define __S010 PAGE_SHARED
7972-#define __S011 PAGE_SHARED
7973+#define __S001 PAGE_READONLY_NOEXEC
7974+#define __S010 PAGE_SHARED_NOEXEC
7975+#define __S011 PAGE_SHARED_NOEXEC
7976 #define __S100 PAGE_READONLY
7977 #define __S101 PAGE_READONLY
7978 #define __S110 PAGE_SHARED
7979diff --git a/arch/sparc/include/asm/pgtsrmmu.h b/arch/sparc/include/asm/pgtsrmmu.h
7980index 79da178..c2eede8 100644
7981--- a/arch/sparc/include/asm/pgtsrmmu.h
7982+++ b/arch/sparc/include/asm/pgtsrmmu.h
7983@@ -115,6 +115,11 @@
7984 SRMMU_EXEC | SRMMU_REF)
7985 #define SRMMU_PAGE_RDONLY __pgprot(SRMMU_VALID | SRMMU_CACHE | \
7986 SRMMU_EXEC | SRMMU_REF)
7987+
7988+#define SRMMU_PAGE_SHARED_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_WRITE | SRMMU_REF)
7989+#define SRMMU_PAGE_COPY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7990+#define SRMMU_PAGE_RDONLY_NOEXEC __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_REF)
7991+
7992 #define SRMMU_PAGE_KERNEL __pgprot(SRMMU_VALID | SRMMU_CACHE | SRMMU_PRIV | \
7993 SRMMU_DIRTY | SRMMU_REF)
7994
7995diff --git a/arch/sparc/include/asm/spinlock_64.h b/arch/sparc/include/asm/spinlock_64.h
7996index 9689176..63c18ea 100644
7997--- a/arch/sparc/include/asm/spinlock_64.h
7998+++ b/arch/sparc/include/asm/spinlock_64.h
7999@@ -92,14 +92,19 @@ static inline void arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long fla
8000
8001 /* Multi-reader locks, these are much saner than the 32-bit Sparc ones... */
8002
8003-static void inline arch_read_lock(arch_rwlock_t *lock)
8004+static inline void arch_read_lock(arch_rwlock_t *lock)
8005 {
8006 unsigned long tmp1, tmp2;
8007
8008 __asm__ __volatile__ (
8009 "1: ldsw [%2], %0\n"
8010 " brlz,pn %0, 2f\n"
8011-"4: add %0, 1, %1\n"
8012+"4: addcc %0, 1, %1\n"
8013+
8014+#ifdef CONFIG_PAX_REFCOUNT
8015+" tvs %%icc, 6\n"
8016+#endif
8017+
8018 " cas [%2], %0, %1\n"
8019 " cmp %0, %1\n"
8020 " bne,pn %%icc, 1b\n"
8021@@ -112,10 +117,10 @@ static void inline arch_read_lock(arch_rwlock_t *lock)
8022 " .previous"
8023 : "=&r" (tmp1), "=&r" (tmp2)
8024 : "r" (lock)
8025- : "memory");
8026+ : "memory", "cc");
8027 }
8028
8029-static int inline arch_read_trylock(arch_rwlock_t *lock)
8030+static inline int arch_read_trylock(arch_rwlock_t *lock)
8031 {
8032 int tmp1, tmp2;
8033
8034@@ -123,7 +128,12 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8035 "1: ldsw [%2], %0\n"
8036 " brlz,a,pn %0, 2f\n"
8037 " mov 0, %0\n"
8038-" add %0, 1, %1\n"
8039+" addcc %0, 1, %1\n"
8040+
8041+#ifdef CONFIG_PAX_REFCOUNT
8042+" tvs %%icc, 6\n"
8043+#endif
8044+
8045 " cas [%2], %0, %1\n"
8046 " cmp %0, %1\n"
8047 " bne,pn %%icc, 1b\n"
8048@@ -136,13 +146,18 @@ static int inline arch_read_trylock(arch_rwlock_t *lock)
8049 return tmp1;
8050 }
8051
8052-static void inline arch_read_unlock(arch_rwlock_t *lock)
8053+static inline void arch_read_unlock(arch_rwlock_t *lock)
8054 {
8055 unsigned long tmp1, tmp2;
8056
8057 __asm__ __volatile__(
8058 "1: lduw [%2], %0\n"
8059-" sub %0, 1, %1\n"
8060+" subcc %0, 1, %1\n"
8061+
8062+#ifdef CONFIG_PAX_REFCOUNT
8063+" tvs %%icc, 6\n"
8064+#endif
8065+
8066 " cas [%2], %0, %1\n"
8067 " cmp %0, %1\n"
8068 " bne,pn %%xcc, 1b\n"
8069@@ -152,7 +167,7 @@ static void inline arch_read_unlock(arch_rwlock_t *lock)
8070 : "memory");
8071 }
8072
8073-static void inline arch_write_lock(arch_rwlock_t *lock)
8074+static inline void arch_write_lock(arch_rwlock_t *lock)
8075 {
8076 unsigned long mask, tmp1, tmp2;
8077
8078@@ -177,7 +192,7 @@ static void inline arch_write_lock(arch_rwlock_t *lock)
8079 : "memory");
8080 }
8081
8082-static void inline arch_write_unlock(arch_rwlock_t *lock)
8083+static inline void arch_write_unlock(arch_rwlock_t *lock)
8084 {
8085 __asm__ __volatile__(
8086 " stw %%g0, [%0]"
8087@@ -186,7 +201,7 @@ static void inline arch_write_unlock(arch_rwlock_t *lock)
8088 : "memory");
8089 }
8090
8091-static int inline arch_write_trylock(arch_rwlock_t *lock)
8092+static inline int arch_write_trylock(arch_rwlock_t *lock)
8093 {
8094 unsigned long mask, tmp1, tmp2, result;
8095
8096diff --git a/arch/sparc/include/asm/thread_info_32.h b/arch/sparc/include/asm/thread_info_32.h
8097index dd38075..e7cac83 100644
8098--- a/arch/sparc/include/asm/thread_info_32.h
8099+++ b/arch/sparc/include/asm/thread_info_32.h
8100@@ -49,6 +49,8 @@ struct thread_info {
8101 unsigned long w_saved;
8102
8103 struct restart_block restart_block;
8104+
8105+ unsigned long lowest_stack;
8106 };
8107
8108 /*
8109diff --git a/arch/sparc/include/asm/thread_info_64.h b/arch/sparc/include/asm/thread_info_64.h
8110index d5e5042..9bfee76 100644
8111--- a/arch/sparc/include/asm/thread_info_64.h
8112+++ b/arch/sparc/include/asm/thread_info_64.h
8113@@ -63,6 +63,8 @@ struct thread_info {
8114 struct pt_regs *kern_una_regs;
8115 unsigned int kern_una_insn;
8116
8117+ unsigned long lowest_stack;
8118+
8119 unsigned long fpregs[0] __attribute__ ((aligned(64)));
8120 };
8121
8122@@ -192,10 +194,11 @@ register struct thread_info *current_thread_info_reg asm("g6");
8123 #define TIF_UNALIGNED 5 /* allowed to do unaligned accesses */
8124 /* flag bit 6 is available */
8125 #define TIF_32BIT 7 /* 32-bit binary */
8126-/* flag bit 8 is available */
8127+#define TIF_GRSEC_SETXID 8 /* update credentials on syscall entry/exit */
8128 #define TIF_SECCOMP 9 /* secure computing */
8129 #define TIF_SYSCALL_AUDIT 10 /* syscall auditing active */
8130 #define TIF_SYSCALL_TRACEPOINT 11 /* syscall tracepoint instrumentation */
8131+
8132 /* NOTE: Thread flags >= 12 should be ones we have no interest
8133 * in using in assembly, else we can't use the mask as
8134 * an immediate value in instructions such as andcc.
8135@@ -214,12 +217,18 @@ register struct thread_info *current_thread_info_reg asm("g6");
8136 #define _TIF_SYSCALL_AUDIT (1<<TIF_SYSCALL_AUDIT)
8137 #define _TIF_SYSCALL_TRACEPOINT (1<<TIF_SYSCALL_TRACEPOINT)
8138 #define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
8139+#define _TIF_GRSEC_SETXID (1<<TIF_GRSEC_SETXID)
8140
8141 #define _TIF_USER_WORK_MASK ((0xff << TI_FLAG_WSAVED_SHIFT) | \
8142 _TIF_DO_NOTIFY_RESUME_MASK | \
8143 _TIF_NEED_RESCHED)
8144 #define _TIF_DO_NOTIFY_RESUME_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING)
8145
8146+#define _TIF_WORK_SYSCALL \
8147+ (_TIF_SYSCALL_TRACE | _TIF_SECCOMP | _TIF_SYSCALL_AUDIT | \
8148+ _TIF_SYSCALL_TRACEPOINT | _TIF_GRSEC_SETXID)
8149+
8150+
8151 /*
8152 * Thread-synchronous status.
8153 *
8154diff --git a/arch/sparc/include/asm/uaccess.h b/arch/sparc/include/asm/uaccess.h
8155index 0167d26..767bb0c 100644
8156--- a/arch/sparc/include/asm/uaccess.h
8157+++ b/arch/sparc/include/asm/uaccess.h
8158@@ -1,5 +1,6 @@
8159 #ifndef ___ASM_SPARC_UACCESS_H
8160 #define ___ASM_SPARC_UACCESS_H
8161+
8162 #if defined(__sparc__) && defined(__arch64__)
8163 #include <asm/uaccess_64.h>
8164 #else
8165diff --git a/arch/sparc/include/asm/uaccess_32.h b/arch/sparc/include/asm/uaccess_32.h
8166index 53a28dd..50c38c3 100644
8167--- a/arch/sparc/include/asm/uaccess_32.h
8168+++ b/arch/sparc/include/asm/uaccess_32.h
8169@@ -250,27 +250,46 @@ extern unsigned long __copy_user(void __user *to, const void __user *from, unsig
8170
8171 static inline unsigned long copy_to_user(void __user *to, const void *from, unsigned long n)
8172 {
8173- if (n && __access_ok((unsigned long) to, n))
8174+ if ((long)n < 0)
8175+ return n;
8176+
8177+ if (n && __access_ok((unsigned long) to, n)) {
8178+ if (!__builtin_constant_p(n))
8179+ check_object_size(from, n, true);
8180 return __copy_user(to, (__force void __user *) from, n);
8181- else
8182+ } else
8183 return n;
8184 }
8185
8186 static inline unsigned long __copy_to_user(void __user *to, const void *from, unsigned long n)
8187 {
8188+ if ((long)n < 0)
8189+ return n;
8190+
8191+ if (!__builtin_constant_p(n))
8192+ check_object_size(from, n, true);
8193+
8194 return __copy_user(to, (__force void __user *) from, n);
8195 }
8196
8197 static inline unsigned long copy_from_user(void *to, const void __user *from, unsigned long n)
8198 {
8199- if (n && __access_ok((unsigned long) from, n))
8200+ if ((long)n < 0)
8201+ return n;
8202+
8203+ if (n && __access_ok((unsigned long) from, n)) {
8204+ if (!__builtin_constant_p(n))
8205+ check_object_size(to, n, false);
8206 return __copy_user((__force void __user *) to, from, n);
8207- else
8208+ } else
8209 return n;
8210 }
8211
8212 static inline unsigned long __copy_from_user(void *to, const void __user *from, unsigned long n)
8213 {
8214+ if ((long)n < 0)
8215+ return n;
8216+
8217 return __copy_user((__force void __user *) to, from, n);
8218 }
8219
8220diff --git a/arch/sparc/include/asm/uaccess_64.h b/arch/sparc/include/asm/uaccess_64.h
8221index e562d3c..191f176 100644
8222--- a/arch/sparc/include/asm/uaccess_64.h
8223+++ b/arch/sparc/include/asm/uaccess_64.h
8224@@ -10,6 +10,7 @@
8225 #include <linux/compiler.h>
8226 #include <linux/string.h>
8227 #include <linux/thread_info.h>
8228+#include <linux/kernel.h>
8229 #include <asm/asi.h>
8230 #include <asm/spitfire.h>
8231 #include <asm-generic/uaccess-unaligned.h>
8232@@ -214,8 +215,15 @@ extern unsigned long copy_from_user_fixup(void *to, const void __user *from,
8233 static inline unsigned long __must_check
8234 copy_from_user(void *to, const void __user *from, unsigned long size)
8235 {
8236- unsigned long ret = ___copy_from_user(to, from, size);
8237+ unsigned long ret;
8238
8239+ if ((long)size < 0 || size > INT_MAX)
8240+ return size;
8241+
8242+ if (!__builtin_constant_p(size))
8243+ check_object_size(to, size, false);
8244+
8245+ ret = ___copy_from_user(to, from, size);
8246 if (unlikely(ret))
8247 ret = copy_from_user_fixup(to, from, size);
8248
8249@@ -231,8 +239,15 @@ extern unsigned long copy_to_user_fixup(void __user *to, const void *from,
8250 static inline unsigned long __must_check
8251 copy_to_user(void __user *to, const void *from, unsigned long size)
8252 {
8253- unsigned long ret = ___copy_to_user(to, from, size);
8254+ unsigned long ret;
8255
8256+ if ((long)size < 0 || size > INT_MAX)
8257+ return size;
8258+
8259+ if (!__builtin_constant_p(size))
8260+ check_object_size(from, size, true);
8261+
8262+ ret = ___copy_to_user(to, from, size);
8263 if (unlikely(ret))
8264 ret = copy_to_user_fixup(to, from, size);
8265 return ret;
8266diff --git a/arch/sparc/kernel/Makefile b/arch/sparc/kernel/Makefile
8267index d432fb2..6056af1 100644
8268--- a/arch/sparc/kernel/Makefile
8269+++ b/arch/sparc/kernel/Makefile
8270@@ -3,7 +3,7 @@
8271 #
8272
8273 asflags-y := -ansi
8274-ccflags-y := -Werror
8275+#ccflags-y := -Werror
8276
8277 extra-y := head_$(BITS).o
8278
8279diff --git a/arch/sparc/kernel/ds.c b/arch/sparc/kernel/ds.c
8280index 5ef48da..11d460f 100644
8281--- a/arch/sparc/kernel/ds.c
8282+++ b/arch/sparc/kernel/ds.c
8283@@ -783,6 +783,16 @@ void ldom_set_var(const char *var, const char *value)
8284 char *base, *p;
8285 int msg_len, loops;
8286
8287+ if (strlen(var) + strlen(value) + 2 >
8288+ sizeof(pkt) - sizeof(pkt.header)) {
8289+ printk(KERN_ERR PFX
8290+ "contents length: %zu, which more than max: %lu,"
8291+ "so could not set (%s) variable to (%s).\n",
8292+ strlen(var) + strlen(value) + 2,
8293+ sizeof(pkt) - sizeof(pkt.header), var, value);
8294+ return;
8295+ }
8296+
8297 memset(&pkt, 0, sizeof(pkt));
8298 pkt.header.data.tag.type = DS_DATA;
8299 pkt.header.data.handle = cp->handle;
8300diff --git a/arch/sparc/kernel/process_32.c b/arch/sparc/kernel/process_32.c
8301index fdd819d..5af08c8 100644
8302--- a/arch/sparc/kernel/process_32.c
8303+++ b/arch/sparc/kernel/process_32.c
8304@@ -116,14 +116,14 @@ void show_regs(struct pt_regs *r)
8305
8306 printk("PSR: %08lx PC: %08lx NPC: %08lx Y: %08lx %s\n",
8307 r->psr, r->pc, r->npc, r->y, print_tainted());
8308- printk("PC: <%pS>\n", (void *) r->pc);
8309+ printk("PC: <%pA>\n", (void *) r->pc);
8310 printk("%%G: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8311 r->u_regs[0], r->u_regs[1], r->u_regs[2], r->u_regs[3],
8312 r->u_regs[4], r->u_regs[5], r->u_regs[6], r->u_regs[7]);
8313 printk("%%O: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8314 r->u_regs[8], r->u_regs[9], r->u_regs[10], r->u_regs[11],
8315 r->u_regs[12], r->u_regs[13], r->u_regs[14], r->u_regs[15]);
8316- printk("RPC: <%pS>\n", (void *) r->u_regs[15]);
8317+ printk("RPC: <%pA>\n", (void *) r->u_regs[15]);
8318
8319 printk("%%L: %08lx %08lx %08lx %08lx %08lx %08lx %08lx %08lx\n",
8320 rw->locals[0], rw->locals[1], rw->locals[2], rw->locals[3],
8321@@ -160,7 +160,7 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8322 rw = (struct reg_window32 *) fp;
8323 pc = rw->ins[7];
8324 printk("[%08lx : ", pc);
8325- printk("%pS ] ", (void *) pc);
8326+ printk("%pA ] ", (void *) pc);
8327 fp = rw->ins[6];
8328 } while (++count < 16);
8329 printk("\n");
8330diff --git a/arch/sparc/kernel/process_64.c b/arch/sparc/kernel/process_64.c
8331index baebab2..9cd13b1 100644
8332--- a/arch/sparc/kernel/process_64.c
8333+++ b/arch/sparc/kernel/process_64.c
8334@@ -158,7 +158,7 @@ static void show_regwindow(struct pt_regs *regs)
8335 printk("i4: %016lx i5: %016lx i6: %016lx i7: %016lx\n",
8336 rwk->ins[4], rwk->ins[5], rwk->ins[6], rwk->ins[7]);
8337 if (regs->tstate & TSTATE_PRIV)
8338- printk("I7: <%pS>\n", (void *) rwk->ins[7]);
8339+ printk("I7: <%pA>\n", (void *) rwk->ins[7]);
8340 }
8341
8342 void show_regs(struct pt_regs *regs)
8343@@ -167,7 +167,7 @@ void show_regs(struct pt_regs *regs)
8344
8345 printk("TSTATE: %016lx TPC: %016lx TNPC: %016lx Y: %08x %s\n", regs->tstate,
8346 regs->tpc, regs->tnpc, regs->y, print_tainted());
8347- printk("TPC: <%pS>\n", (void *) regs->tpc);
8348+ printk("TPC: <%pA>\n", (void *) regs->tpc);
8349 printk("g0: %016lx g1: %016lx g2: %016lx g3: %016lx\n",
8350 regs->u_regs[0], regs->u_regs[1], regs->u_regs[2],
8351 regs->u_regs[3]);
8352@@ -180,7 +180,7 @@ void show_regs(struct pt_regs *regs)
8353 printk("o4: %016lx o5: %016lx sp: %016lx ret_pc: %016lx\n",
8354 regs->u_regs[12], regs->u_regs[13], regs->u_regs[14],
8355 regs->u_regs[15]);
8356- printk("RPC: <%pS>\n", (void *) regs->u_regs[15]);
8357+ printk("RPC: <%pA>\n", (void *) regs->u_regs[15]);
8358 show_regwindow(regs);
8359 show_stack(current, (unsigned long *) regs->u_regs[UREG_FP]);
8360 }
8361@@ -269,7 +269,7 @@ void arch_trigger_all_cpu_backtrace(void)
8362 ((tp && tp->task) ? tp->task->pid : -1));
8363
8364 if (gp->tstate & TSTATE_PRIV) {
8365- printk(" TPC[%pS] O7[%pS] I7[%pS] RPC[%pS]\n",
8366+ printk(" TPC[%pA] O7[%pA] I7[%pA] RPC[%pA]\n",
8367 (void *) gp->tpc,
8368 (void *) gp->o7,
8369 (void *) gp->i7,
8370diff --git a/arch/sparc/kernel/prom_common.c b/arch/sparc/kernel/prom_common.c
8371index 79cc0d1..ec62734 100644
8372--- a/arch/sparc/kernel/prom_common.c
8373+++ b/arch/sparc/kernel/prom_common.c
8374@@ -144,7 +144,7 @@ static int __init prom_common_nextprop(phandle node, char *prev, char *buf)
8375
8376 unsigned int prom_early_allocated __initdata;
8377
8378-static struct of_pdt_ops prom_sparc_ops __initdata = {
8379+static struct of_pdt_ops prom_sparc_ops __initconst = {
8380 .nextprop = prom_common_nextprop,
8381 .getproplen = prom_getproplen,
8382 .getproperty = prom_getproperty,
8383diff --git a/arch/sparc/kernel/ptrace_64.c b/arch/sparc/kernel/ptrace_64.c
8384index 7ff45e4..a58f271 100644
8385--- a/arch/sparc/kernel/ptrace_64.c
8386+++ b/arch/sparc/kernel/ptrace_64.c
8387@@ -1057,6 +1057,10 @@ long arch_ptrace(struct task_struct *child, long request,
8388 return ret;
8389 }
8390
8391+#ifdef CONFIG_GRKERNSEC_SETXID
8392+extern void gr_delayed_cred_worker(void);
8393+#endif
8394+
8395 asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8396 {
8397 int ret = 0;
8398@@ -1064,6 +1068,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8399 /* do the secure computing check first */
8400 secure_computing_strict(regs->u_regs[UREG_G1]);
8401
8402+#ifdef CONFIG_GRKERNSEC_SETXID
8403+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8404+ gr_delayed_cred_worker();
8405+#endif
8406+
8407 if (test_thread_flag(TIF_SYSCALL_TRACE))
8408 ret = tracehook_report_syscall_entry(regs);
8409
8410@@ -1084,6 +1093,11 @@ asmlinkage int syscall_trace_enter(struct pt_regs *regs)
8411
8412 asmlinkage void syscall_trace_leave(struct pt_regs *regs)
8413 {
8414+#ifdef CONFIG_GRKERNSEC_SETXID
8415+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
8416+ gr_delayed_cred_worker();
8417+#endif
8418+
8419 audit_syscall_exit(regs);
8420
8421 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
8422diff --git a/arch/sparc/kernel/sys_sparc_32.c b/arch/sparc/kernel/sys_sparc_32.c
8423index 3a8d184..49498a8 100644
8424--- a/arch/sparc/kernel/sys_sparc_32.c
8425+++ b/arch/sparc/kernel/sys_sparc_32.c
8426@@ -52,7 +52,7 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8427 if (len > TASK_SIZE - PAGE_SIZE)
8428 return -ENOMEM;
8429 if (!addr)
8430- addr = TASK_UNMAPPED_BASE;
8431+ addr = current->mm->mmap_base;
8432
8433 info.flags = 0;
8434 info.length = len;
8435diff --git a/arch/sparc/kernel/sys_sparc_64.c b/arch/sparc/kernel/sys_sparc_64.c
8436index 2daaaa6..4fb84dc 100644
8437--- a/arch/sparc/kernel/sys_sparc_64.c
8438+++ b/arch/sparc/kernel/sys_sparc_64.c
8439@@ -90,13 +90,14 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8440 struct vm_area_struct * vma;
8441 unsigned long task_size = TASK_SIZE;
8442 int do_color_align;
8443+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8444 struct vm_unmapped_area_info info;
8445
8446 if (flags & MAP_FIXED) {
8447 /* We do not accept a shared mapping if it would violate
8448 * cache aliasing constraints.
8449 */
8450- if ((flags & MAP_SHARED) &&
8451+ if ((filp || (flags & MAP_SHARED)) &&
8452 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8453 return -EINVAL;
8454 return addr;
8455@@ -111,6 +112,10 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8456 if (filp || (flags & MAP_SHARED))
8457 do_color_align = 1;
8458
8459+#ifdef CONFIG_PAX_RANDMMAP
8460+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8461+#endif
8462+
8463 if (addr) {
8464 if (do_color_align)
8465 addr = COLOR_ALIGN(addr, pgoff);
8466@@ -118,22 +123,28 @@ unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, unsi
8467 addr = PAGE_ALIGN(addr);
8468
8469 vma = find_vma(mm, addr);
8470- if (task_size - len >= addr &&
8471- (!vma || addr + len <= vma->vm_start))
8472+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8473 return addr;
8474 }
8475
8476 info.flags = 0;
8477 info.length = len;
8478- info.low_limit = TASK_UNMAPPED_BASE;
8479+ info.low_limit = mm->mmap_base;
8480 info.high_limit = min(task_size, VA_EXCLUDE_START);
8481 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8482 info.align_offset = pgoff << PAGE_SHIFT;
8483+ info.threadstack_offset = offset;
8484 addr = vm_unmapped_area(&info);
8485
8486 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
8487 VM_BUG_ON(addr != -ENOMEM);
8488 info.low_limit = VA_EXCLUDE_END;
8489+
8490+#ifdef CONFIG_PAX_RANDMMAP
8491+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8492+ info.low_limit += mm->delta_mmap;
8493+#endif
8494+
8495 info.high_limit = task_size;
8496 addr = vm_unmapped_area(&info);
8497 }
8498@@ -151,6 +162,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8499 unsigned long task_size = STACK_TOP32;
8500 unsigned long addr = addr0;
8501 int do_color_align;
8502+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
8503 struct vm_unmapped_area_info info;
8504
8505 /* This should only ever run for 32-bit processes. */
8506@@ -160,7 +172,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8507 /* We do not accept a shared mapping if it would violate
8508 * cache aliasing constraints.
8509 */
8510- if ((flags & MAP_SHARED) &&
8511+ if ((filp || (flags & MAP_SHARED)) &&
8512 ((addr - (pgoff << PAGE_SHIFT)) & (SHMLBA - 1)))
8513 return -EINVAL;
8514 return addr;
8515@@ -173,6 +185,10 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8516 if (filp || (flags & MAP_SHARED))
8517 do_color_align = 1;
8518
8519+#ifdef CONFIG_PAX_RANDMMAP
8520+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8521+#endif
8522+
8523 /* requesting a specific address */
8524 if (addr) {
8525 if (do_color_align)
8526@@ -181,8 +197,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8527 addr = PAGE_ALIGN(addr);
8528
8529 vma = find_vma(mm, addr);
8530- if (task_size - len >= addr &&
8531- (!vma || addr + len <= vma->vm_start))
8532+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
8533 return addr;
8534 }
8535
8536@@ -192,6 +207,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8537 info.high_limit = mm->mmap_base;
8538 info.align_mask = do_color_align ? (PAGE_MASK & (SHMLBA - 1)) : 0;
8539 info.align_offset = pgoff << PAGE_SHIFT;
8540+ info.threadstack_offset = offset;
8541 addr = vm_unmapped_area(&info);
8542
8543 /*
8544@@ -204,6 +220,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
8545 VM_BUG_ON(addr != -ENOMEM);
8546 info.flags = 0;
8547 info.low_limit = TASK_UNMAPPED_BASE;
8548+
8549+#ifdef CONFIG_PAX_RANDMMAP
8550+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8551+ info.low_limit += mm->delta_mmap;
8552+#endif
8553+
8554 info.high_limit = STACK_TOP32;
8555 addr = vm_unmapped_area(&info);
8556 }
8557@@ -260,10 +282,14 @@ unsigned long get_fb_unmapped_area(struct file *filp, unsigned long orig_addr, u
8558 EXPORT_SYMBOL(get_fb_unmapped_area);
8559
8560 /* Essentially the same as PowerPC. */
8561-static unsigned long mmap_rnd(void)
8562+static unsigned long mmap_rnd(struct mm_struct *mm)
8563 {
8564 unsigned long rnd = 0UL;
8565
8566+#ifdef CONFIG_PAX_RANDMMAP
8567+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
8568+#endif
8569+
8570 if (current->flags & PF_RANDOMIZE) {
8571 unsigned long val = get_random_int();
8572 if (test_thread_flag(TIF_32BIT))
8573@@ -276,7 +302,7 @@ static unsigned long mmap_rnd(void)
8574
8575 void arch_pick_mmap_layout(struct mm_struct *mm)
8576 {
8577- unsigned long random_factor = mmap_rnd();
8578+ unsigned long random_factor = mmap_rnd(mm);
8579 unsigned long gap;
8580
8581 /*
8582@@ -289,6 +315,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8583 gap == RLIM_INFINITY ||
8584 sysctl_legacy_va_layout) {
8585 mm->mmap_base = TASK_UNMAPPED_BASE + random_factor;
8586+
8587+#ifdef CONFIG_PAX_RANDMMAP
8588+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8589+ mm->mmap_base += mm->delta_mmap;
8590+#endif
8591+
8592 mm->get_unmapped_area = arch_get_unmapped_area;
8593 mm->unmap_area = arch_unmap_area;
8594 } else {
8595@@ -301,6 +333,12 @@ void arch_pick_mmap_layout(struct mm_struct *mm)
8596 gap = (task_size / 6 * 5);
8597
8598 mm->mmap_base = PAGE_ALIGN(task_size - gap - random_factor);
8599+
8600+#ifdef CONFIG_PAX_RANDMMAP
8601+ if (mm->pax_flags & MF_PAX_RANDMMAP)
8602+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
8603+#endif
8604+
8605 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
8606 mm->unmap_area = arch_unmap_area_topdown;
8607 }
8608diff --git a/arch/sparc/kernel/syscalls.S b/arch/sparc/kernel/syscalls.S
8609index 22a1098..6255eb9 100644
8610--- a/arch/sparc/kernel/syscalls.S
8611+++ b/arch/sparc/kernel/syscalls.S
8612@@ -52,7 +52,7 @@ sys32_rt_sigreturn:
8613 #endif
8614 .align 32
8615 1: ldx [%g6 + TI_FLAGS], %l5
8616- andcc %l5, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8617+ andcc %l5, _TIF_WORK_SYSCALL, %g0
8618 be,pt %icc, rtrap
8619 nop
8620 call syscall_trace_leave
8621@@ -184,7 +184,7 @@ linux_sparc_syscall32:
8622
8623 srl %i5, 0, %o5 ! IEU1
8624 srl %i2, 0, %o2 ! IEU0 Group
8625- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8626+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8627 bne,pn %icc, linux_syscall_trace32 ! CTI
8628 mov %i0, %l5 ! IEU1
8629 call %l7 ! CTI Group brk forced
8630@@ -207,7 +207,7 @@ linux_sparc_syscall:
8631
8632 mov %i3, %o3 ! IEU1
8633 mov %i4, %o4 ! IEU0 Group
8634- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8635+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8636 bne,pn %icc, linux_syscall_trace ! CTI Group
8637 mov %i0, %l5 ! IEU0
8638 2: call %l7 ! CTI Group brk forced
8639@@ -223,7 +223,7 @@ ret_sys_call:
8640
8641 cmp %o0, -ERESTART_RESTARTBLOCK
8642 bgeu,pn %xcc, 1f
8643- andcc %l0, (_TIF_SYSCALL_TRACE|_TIF_SECCOMP|_TIF_SYSCALL_AUDIT|_TIF_SYSCALL_TRACEPOINT), %g0
8644+ andcc %l0, _TIF_WORK_SYSCALL, %g0
8645 ldx [%sp + PTREGS_OFF + PT_V9_TNPC], %l1 ! pc = npc
8646
8647 2:
8648diff --git a/arch/sparc/kernel/sysfs.c b/arch/sparc/kernel/sysfs.c
8649index 654e8aa..45f431b 100644
8650--- a/arch/sparc/kernel/sysfs.c
8651+++ b/arch/sparc/kernel/sysfs.c
8652@@ -266,7 +266,7 @@ static int __cpuinit sysfs_cpu_notify(struct notifier_block *self,
8653 return NOTIFY_OK;
8654 }
8655
8656-static struct notifier_block __cpuinitdata sysfs_cpu_nb = {
8657+static struct notifier_block sysfs_cpu_nb = {
8658 .notifier_call = sysfs_cpu_notify,
8659 };
8660
8661diff --git a/arch/sparc/kernel/traps_32.c b/arch/sparc/kernel/traps_32.c
8662index 6629829..036032d 100644
8663--- a/arch/sparc/kernel/traps_32.c
8664+++ b/arch/sparc/kernel/traps_32.c
8665@@ -44,6 +44,8 @@ static void instruction_dump(unsigned long *pc)
8666 #define __SAVE __asm__ __volatile__("save %sp, -0x40, %sp\n\t")
8667 #define __RESTORE __asm__ __volatile__("restore %g0, %g0, %g0\n\t")
8668
8669+extern void gr_handle_kernel_exploit(void);
8670+
8671 void die_if_kernel(char *str, struct pt_regs *regs)
8672 {
8673 static int die_counter;
8674@@ -76,15 +78,17 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8675 count++ < 30 &&
8676 (((unsigned long) rw) >= PAGE_OFFSET) &&
8677 !(((unsigned long) rw) & 0x7)) {
8678- printk("Caller[%08lx]: %pS\n", rw->ins[7],
8679+ printk("Caller[%08lx]: %pA\n", rw->ins[7],
8680 (void *) rw->ins[7]);
8681 rw = (struct reg_window32 *)rw->ins[6];
8682 }
8683 }
8684 printk("Instruction DUMP:");
8685 instruction_dump ((unsigned long *) regs->pc);
8686- if(regs->psr & PSR_PS)
8687+ if(regs->psr & PSR_PS) {
8688+ gr_handle_kernel_exploit();
8689 do_exit(SIGKILL);
8690+ }
8691 do_exit(SIGSEGV);
8692 }
8693
8694diff --git a/arch/sparc/kernel/traps_64.c b/arch/sparc/kernel/traps_64.c
8695index b3f833a..ac74b2d 100644
8696--- a/arch/sparc/kernel/traps_64.c
8697+++ b/arch/sparc/kernel/traps_64.c
8698@@ -76,7 +76,7 @@ static void dump_tl1_traplog(struct tl1_traplog *p)
8699 i + 1,
8700 p->trapstack[i].tstate, p->trapstack[i].tpc,
8701 p->trapstack[i].tnpc, p->trapstack[i].tt);
8702- printk("TRAPLOG: TPC<%pS>\n", (void *) p->trapstack[i].tpc);
8703+ printk("TRAPLOG: TPC<%pA>\n", (void *) p->trapstack[i].tpc);
8704 }
8705 }
8706
8707@@ -96,6 +96,12 @@ void bad_trap(struct pt_regs *regs, long lvl)
8708
8709 lvl -= 0x100;
8710 if (regs->tstate & TSTATE_PRIV) {
8711+
8712+#ifdef CONFIG_PAX_REFCOUNT
8713+ if (lvl == 6)
8714+ pax_report_refcount_overflow(regs);
8715+#endif
8716+
8717 sprintf(buffer, "Kernel bad sw trap %lx", lvl);
8718 die_if_kernel(buffer, regs);
8719 }
8720@@ -114,11 +120,16 @@ void bad_trap(struct pt_regs *regs, long lvl)
8721 void bad_trap_tl1(struct pt_regs *regs, long lvl)
8722 {
8723 char buffer[32];
8724-
8725+
8726 if (notify_die(DIE_TRAP_TL1, "bad trap tl1", regs,
8727 0, lvl, SIGTRAP) == NOTIFY_STOP)
8728 return;
8729
8730+#ifdef CONFIG_PAX_REFCOUNT
8731+ if (lvl == 6)
8732+ pax_report_refcount_overflow(regs);
8733+#endif
8734+
8735 dump_tl1_traplog((struct tl1_traplog *)(regs + 1));
8736
8737 sprintf (buffer, "Bad trap %lx at tl>0", lvl);
8738@@ -1142,7 +1153,7 @@ static void cheetah_log_errors(struct pt_regs *regs, struct cheetah_err_info *in
8739 regs->tpc, regs->tnpc, regs->u_regs[UREG_I7], regs->tstate);
8740 printk("%s" "ERROR(%d): ",
8741 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id());
8742- printk("TPC<%pS>\n", (void *) regs->tpc);
8743+ printk("TPC<%pA>\n", (void *) regs->tpc);
8744 printk("%s" "ERROR(%d): M_SYND(%lx), E_SYND(%lx)%s%s\n",
8745 (recoverable ? KERN_WARNING : KERN_CRIT), smp_processor_id(),
8746 (afsr & CHAFSR_M_SYNDROME) >> CHAFSR_M_SYNDROME_SHIFT,
8747@@ -1749,7 +1760,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8748 smp_processor_id(),
8749 (type & 0x1) ? 'I' : 'D',
8750 regs->tpc);
8751- printk(KERN_EMERG "TPC<%pS>\n", (void *) regs->tpc);
8752+ printk(KERN_EMERG "TPC<%pA>\n", (void *) regs->tpc);
8753 panic("Irrecoverable Cheetah+ parity error.");
8754 }
8755
8756@@ -1757,7 +1768,7 @@ void cheetah_plus_parity_error(int type, struct pt_regs *regs)
8757 smp_processor_id(),
8758 (type & 0x1) ? 'I' : 'D',
8759 regs->tpc);
8760- printk(KERN_WARNING "TPC<%pS>\n", (void *) regs->tpc);
8761+ printk(KERN_WARNING "TPC<%pA>\n", (void *) regs->tpc);
8762 }
8763
8764 struct sun4v_error_entry {
8765@@ -2104,9 +2115,9 @@ void sun4v_itlb_error_report(struct pt_regs *regs, int tl)
8766
8767 printk(KERN_EMERG "SUN4V-ITLB: Error at TPC[%lx], tl %d\n",
8768 regs->tpc, tl);
8769- printk(KERN_EMERG "SUN4V-ITLB: TPC<%pS>\n", (void *) regs->tpc);
8770+ printk(KERN_EMERG "SUN4V-ITLB: TPC<%pA>\n", (void *) regs->tpc);
8771 printk(KERN_EMERG "SUN4V-ITLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8772- printk(KERN_EMERG "SUN4V-ITLB: O7<%pS>\n",
8773+ printk(KERN_EMERG "SUN4V-ITLB: O7<%pA>\n",
8774 (void *) regs->u_regs[UREG_I7]);
8775 printk(KERN_EMERG "SUN4V-ITLB: vaddr[%lx] ctx[%lx] "
8776 "pte[%lx] error[%lx]\n",
8777@@ -2128,9 +2139,9 @@ void sun4v_dtlb_error_report(struct pt_regs *regs, int tl)
8778
8779 printk(KERN_EMERG "SUN4V-DTLB: Error at TPC[%lx], tl %d\n",
8780 regs->tpc, tl);
8781- printk(KERN_EMERG "SUN4V-DTLB: TPC<%pS>\n", (void *) regs->tpc);
8782+ printk(KERN_EMERG "SUN4V-DTLB: TPC<%pA>\n", (void *) regs->tpc);
8783 printk(KERN_EMERG "SUN4V-DTLB: O7[%lx]\n", regs->u_regs[UREG_I7]);
8784- printk(KERN_EMERG "SUN4V-DTLB: O7<%pS>\n",
8785+ printk(KERN_EMERG "SUN4V-DTLB: O7<%pA>\n",
8786 (void *) regs->u_regs[UREG_I7]);
8787 printk(KERN_EMERG "SUN4V-DTLB: vaddr[%lx] ctx[%lx] "
8788 "pte[%lx] error[%lx]\n",
8789@@ -2336,13 +2347,13 @@ void show_stack(struct task_struct *tsk, unsigned long *_ksp)
8790 fp = (unsigned long)sf->fp + STACK_BIAS;
8791 }
8792
8793- printk(" [%016lx] %pS\n", pc, (void *) pc);
8794+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8795 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
8796 if ((pc + 8UL) == (unsigned long) &return_to_handler) {
8797 int index = tsk->curr_ret_stack;
8798 if (tsk->ret_stack && index >= graph) {
8799 pc = tsk->ret_stack[index - graph].ret;
8800- printk(" [%016lx] %pS\n", pc, (void *) pc);
8801+ printk(" [%016lx] %pA\n", pc, (void *) pc);
8802 graph++;
8803 }
8804 }
8805@@ -2360,6 +2371,8 @@ static inline struct reg_window *kernel_stack_up(struct reg_window *rw)
8806 return (struct reg_window *) (fp + STACK_BIAS);
8807 }
8808
8809+extern void gr_handle_kernel_exploit(void);
8810+
8811 void die_if_kernel(char *str, struct pt_regs *regs)
8812 {
8813 static int die_counter;
8814@@ -2388,7 +2401,7 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8815 while (rw &&
8816 count++ < 30 &&
8817 kstack_valid(tp, (unsigned long) rw)) {
8818- printk("Caller[%016lx]: %pS\n", rw->ins[7],
8819+ printk("Caller[%016lx]: %pA\n", rw->ins[7],
8820 (void *) rw->ins[7]);
8821
8822 rw = kernel_stack_up(rw);
8823@@ -2401,8 +2414,10 @@ void die_if_kernel(char *str, struct pt_regs *regs)
8824 }
8825 user_instruction_dump ((unsigned int __user *) regs->tpc);
8826 }
8827- if (regs->tstate & TSTATE_PRIV)
8828+ if (regs->tstate & TSTATE_PRIV) {
8829+ gr_handle_kernel_exploit();
8830 do_exit(SIGKILL);
8831+ }
8832 do_exit(SIGSEGV);
8833 }
8834 EXPORT_SYMBOL(die_if_kernel);
8835diff --git a/arch/sparc/kernel/unaligned_64.c b/arch/sparc/kernel/unaligned_64.c
8836index 8201c25e..072a2a7 100644
8837--- a/arch/sparc/kernel/unaligned_64.c
8838+++ b/arch/sparc/kernel/unaligned_64.c
8839@@ -286,7 +286,7 @@ static void log_unaligned(struct pt_regs *regs)
8840 static DEFINE_RATELIMIT_STATE(ratelimit, 5 * HZ, 5);
8841
8842 if (__ratelimit(&ratelimit)) {
8843- printk("Kernel unaligned access at TPC[%lx] %pS\n",
8844+ printk("Kernel unaligned access at TPC[%lx] %pA\n",
8845 regs->tpc, (void *) regs->tpc);
8846 }
8847 }
8848diff --git a/arch/sparc/lib/Makefile b/arch/sparc/lib/Makefile
8849index dbe119b..089c7c1 100644
8850--- a/arch/sparc/lib/Makefile
8851+++ b/arch/sparc/lib/Makefile
8852@@ -2,7 +2,7 @@
8853 #
8854
8855 asflags-y := -ansi -DST_DIV0=0x02
8856-ccflags-y := -Werror
8857+#ccflags-y := -Werror
8858
8859 lib-$(CONFIG_SPARC32) += ashrdi3.o
8860 lib-$(CONFIG_SPARC32) += memcpy.o memset.o
8861diff --git a/arch/sparc/lib/atomic_64.S b/arch/sparc/lib/atomic_64.S
8862index 85c233d..68500e0 100644
8863--- a/arch/sparc/lib/atomic_64.S
8864+++ b/arch/sparc/lib/atomic_64.S
8865@@ -17,7 +17,12 @@
8866 ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8867 BACKOFF_SETUP(%o2)
8868 1: lduw [%o1], %g1
8869- add %g1, %o0, %g7
8870+ addcc %g1, %o0, %g7
8871+
8872+#ifdef CONFIG_PAX_REFCOUNT
8873+ tvs %icc, 6
8874+#endif
8875+
8876 cas [%o1], %g1, %g7
8877 cmp %g1, %g7
8878 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8879@@ -27,10 +32,28 @@ ENTRY(atomic_add) /* %o0 = increment, %o1 = atomic_ptr */
8880 2: BACKOFF_SPIN(%o2, %o3, 1b)
8881 ENDPROC(atomic_add)
8882
8883+ENTRY(atomic_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8884+ BACKOFF_SETUP(%o2)
8885+1: lduw [%o1], %g1
8886+ add %g1, %o0, %g7
8887+ cas [%o1], %g1, %g7
8888+ cmp %g1, %g7
8889+ bne,pn %icc, 2f
8890+ nop
8891+ retl
8892+ nop
8893+2: BACKOFF_SPIN(%o2, %o3, 1b)
8894+ENDPROC(atomic_add_unchecked)
8895+
8896 ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8897 BACKOFF_SETUP(%o2)
8898 1: lduw [%o1], %g1
8899- sub %g1, %o0, %g7
8900+ subcc %g1, %o0, %g7
8901+
8902+#ifdef CONFIG_PAX_REFCOUNT
8903+ tvs %icc, 6
8904+#endif
8905+
8906 cas [%o1], %g1, %g7
8907 cmp %g1, %g7
8908 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8909@@ -40,10 +63,28 @@ ENTRY(atomic_sub) /* %o0 = decrement, %o1 = atomic_ptr */
8910 2: BACKOFF_SPIN(%o2, %o3, 1b)
8911 ENDPROC(atomic_sub)
8912
8913+ENTRY(atomic_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
8914+ BACKOFF_SETUP(%o2)
8915+1: lduw [%o1], %g1
8916+ sub %g1, %o0, %g7
8917+ cas [%o1], %g1, %g7
8918+ cmp %g1, %g7
8919+ bne,pn %icc, 2f
8920+ nop
8921+ retl
8922+ nop
8923+2: BACKOFF_SPIN(%o2, %o3, 1b)
8924+ENDPROC(atomic_sub_unchecked)
8925+
8926 ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8927 BACKOFF_SETUP(%o2)
8928 1: lduw [%o1], %g1
8929- add %g1, %o0, %g7
8930+ addcc %g1, %o0, %g7
8931+
8932+#ifdef CONFIG_PAX_REFCOUNT
8933+ tvs %icc, 6
8934+#endif
8935+
8936 cas [%o1], %g1, %g7
8937 cmp %g1, %g7
8938 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8939@@ -53,10 +94,29 @@ ENTRY(atomic_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
8940 2: BACKOFF_SPIN(%o2, %o3, 1b)
8941 ENDPROC(atomic_add_ret)
8942
8943+ENTRY(atomic_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8944+ BACKOFF_SETUP(%o2)
8945+1: lduw [%o1], %g1
8946+ addcc %g1, %o0, %g7
8947+ cas [%o1], %g1, %g7
8948+ cmp %g1, %g7
8949+ bne,pn %icc, 2f
8950+ add %g7, %o0, %g7
8951+ sra %g7, 0, %o0
8952+ retl
8953+ nop
8954+2: BACKOFF_SPIN(%o2, %o3, 1b)
8955+ENDPROC(atomic_add_ret_unchecked)
8956+
8957 ENTRY(atomic_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
8958 BACKOFF_SETUP(%o2)
8959 1: lduw [%o1], %g1
8960- sub %g1, %o0, %g7
8961+ subcc %g1, %o0, %g7
8962+
8963+#ifdef CONFIG_PAX_REFCOUNT
8964+ tvs %icc, 6
8965+#endif
8966+
8967 cas [%o1], %g1, %g7
8968 cmp %g1, %g7
8969 bne,pn %icc, BACKOFF_LABEL(2f, 1b)
8970@@ -69,7 +129,12 @@ ENDPROC(atomic_sub_ret)
8971 ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8972 BACKOFF_SETUP(%o2)
8973 1: ldx [%o1], %g1
8974- add %g1, %o0, %g7
8975+ addcc %g1, %o0, %g7
8976+
8977+#ifdef CONFIG_PAX_REFCOUNT
8978+ tvs %xcc, 6
8979+#endif
8980+
8981 casx [%o1], %g1, %g7
8982 cmp %g1, %g7
8983 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
8984@@ -79,10 +144,28 @@ ENTRY(atomic64_add) /* %o0 = increment, %o1 = atomic_ptr */
8985 2: BACKOFF_SPIN(%o2, %o3, 1b)
8986 ENDPROC(atomic64_add)
8987
8988+ENTRY(atomic64_add_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
8989+ BACKOFF_SETUP(%o2)
8990+1: ldx [%o1], %g1
8991+ addcc %g1, %o0, %g7
8992+ casx [%o1], %g1, %g7
8993+ cmp %g1, %g7
8994+ bne,pn %xcc, 2f
8995+ nop
8996+ retl
8997+ nop
8998+2: BACKOFF_SPIN(%o2, %o3, 1b)
8999+ENDPROC(atomic64_add_unchecked)
9000+
9001 ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9002 BACKOFF_SETUP(%o2)
9003 1: ldx [%o1], %g1
9004- sub %g1, %o0, %g7
9005+ subcc %g1, %o0, %g7
9006+
9007+#ifdef CONFIG_PAX_REFCOUNT
9008+ tvs %xcc, 6
9009+#endif
9010+
9011 casx [%o1], %g1, %g7
9012 cmp %g1, %g7
9013 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9014@@ -92,10 +175,28 @@ ENTRY(atomic64_sub) /* %o0 = decrement, %o1 = atomic_ptr */
9015 2: BACKOFF_SPIN(%o2, %o3, 1b)
9016 ENDPROC(atomic64_sub)
9017
9018+ENTRY(atomic64_sub_unchecked) /* %o0 = decrement, %o1 = atomic_ptr */
9019+ BACKOFF_SETUP(%o2)
9020+1: ldx [%o1], %g1
9021+ subcc %g1, %o0, %g7
9022+ casx [%o1], %g1, %g7
9023+ cmp %g1, %g7
9024+ bne,pn %xcc, 2f
9025+ nop
9026+ retl
9027+ nop
9028+2: BACKOFF_SPIN(%o2, %o3, 1b)
9029+ENDPROC(atomic64_sub_unchecked)
9030+
9031 ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9032 BACKOFF_SETUP(%o2)
9033 1: ldx [%o1], %g1
9034- add %g1, %o0, %g7
9035+ addcc %g1, %o0, %g7
9036+
9037+#ifdef CONFIG_PAX_REFCOUNT
9038+ tvs %xcc, 6
9039+#endif
9040+
9041 casx [%o1], %g1, %g7
9042 cmp %g1, %g7
9043 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9044@@ -105,10 +206,29 @@ ENTRY(atomic64_add_ret) /* %o0 = increment, %o1 = atomic_ptr */
9045 2: BACKOFF_SPIN(%o2, %o3, 1b)
9046 ENDPROC(atomic64_add_ret)
9047
9048+ENTRY(atomic64_add_ret_unchecked) /* %o0 = increment, %o1 = atomic_ptr */
9049+ BACKOFF_SETUP(%o2)
9050+1: ldx [%o1], %g1
9051+ addcc %g1, %o0, %g7
9052+ casx [%o1], %g1, %g7
9053+ cmp %g1, %g7
9054+ bne,pn %xcc, 2f
9055+ add %g7, %o0, %g7
9056+ mov %g7, %o0
9057+ retl
9058+ nop
9059+2: BACKOFF_SPIN(%o2, %o3, 1b)
9060+ENDPROC(atomic64_add_ret_unchecked)
9061+
9062 ENTRY(atomic64_sub_ret) /* %o0 = decrement, %o1 = atomic_ptr */
9063 BACKOFF_SETUP(%o2)
9064 1: ldx [%o1], %g1
9065- sub %g1, %o0, %g7
9066+ subcc %g1, %o0, %g7
9067+
9068+#ifdef CONFIG_PAX_REFCOUNT
9069+ tvs %xcc, 6
9070+#endif
9071+
9072 casx [%o1], %g1, %g7
9073 cmp %g1, %g7
9074 bne,pn %xcc, BACKOFF_LABEL(2f, 1b)
9075diff --git a/arch/sparc/lib/ksyms.c b/arch/sparc/lib/ksyms.c
9076index 0c4e35e..745d3e4 100644
9077--- a/arch/sparc/lib/ksyms.c
9078+++ b/arch/sparc/lib/ksyms.c
9079@@ -109,12 +109,18 @@ EXPORT_SYMBOL(__downgrade_write);
9080
9081 /* Atomic counter implementation. */
9082 EXPORT_SYMBOL(atomic_add);
9083+EXPORT_SYMBOL(atomic_add_unchecked);
9084 EXPORT_SYMBOL(atomic_add_ret);
9085+EXPORT_SYMBOL(atomic_add_ret_unchecked);
9086 EXPORT_SYMBOL(atomic_sub);
9087+EXPORT_SYMBOL(atomic_sub_unchecked);
9088 EXPORT_SYMBOL(atomic_sub_ret);
9089 EXPORT_SYMBOL(atomic64_add);
9090+EXPORT_SYMBOL(atomic64_add_unchecked);
9091 EXPORT_SYMBOL(atomic64_add_ret);
9092+EXPORT_SYMBOL(atomic64_add_ret_unchecked);
9093 EXPORT_SYMBOL(atomic64_sub);
9094+EXPORT_SYMBOL(atomic64_sub_unchecked);
9095 EXPORT_SYMBOL(atomic64_sub_ret);
9096 EXPORT_SYMBOL(atomic64_dec_if_positive);
9097
9098diff --git a/arch/sparc/mm/Makefile b/arch/sparc/mm/Makefile
9099index 30c3ecc..736f015 100644
9100--- a/arch/sparc/mm/Makefile
9101+++ b/arch/sparc/mm/Makefile
9102@@ -2,7 +2,7 @@
9103 #
9104
9105 asflags-y := -ansi
9106-ccflags-y := -Werror
9107+#ccflags-y := -Werror
9108
9109 obj-$(CONFIG_SPARC64) += ultra.o tlb.o tsb.o gup.o
9110 obj-y += fault_$(BITS).o
9111diff --git a/arch/sparc/mm/fault_32.c b/arch/sparc/mm/fault_32.c
9112index e98bfda..ea8d221 100644
9113--- a/arch/sparc/mm/fault_32.c
9114+++ b/arch/sparc/mm/fault_32.c
9115@@ -21,6 +21,9 @@
9116 #include <linux/perf_event.h>
9117 #include <linux/interrupt.h>
9118 #include <linux/kdebug.h>
9119+#include <linux/slab.h>
9120+#include <linux/pagemap.h>
9121+#include <linux/compiler.h>
9122
9123 #include <asm/page.h>
9124 #include <asm/pgtable.h>
9125@@ -159,6 +162,277 @@ static unsigned long compute_si_addr(struct pt_regs *regs, int text_fault)
9126 return safe_compute_effective_address(regs, insn);
9127 }
9128
9129+#ifdef CONFIG_PAX_PAGEEXEC
9130+#ifdef CONFIG_PAX_DLRESOLVE
9131+static void pax_emuplt_close(struct vm_area_struct *vma)
9132+{
9133+ vma->vm_mm->call_dl_resolve = 0UL;
9134+}
9135+
9136+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9137+{
9138+ unsigned int *kaddr;
9139+
9140+ vmf->page = alloc_page(GFP_HIGHUSER);
9141+ if (!vmf->page)
9142+ return VM_FAULT_OOM;
9143+
9144+ kaddr = kmap(vmf->page);
9145+ memset(kaddr, 0, PAGE_SIZE);
9146+ kaddr[0] = 0x9DE3BFA8U; /* save */
9147+ flush_dcache_page(vmf->page);
9148+ kunmap(vmf->page);
9149+ return VM_FAULT_MAJOR;
9150+}
9151+
9152+static const struct vm_operations_struct pax_vm_ops = {
9153+ .close = pax_emuplt_close,
9154+ .fault = pax_emuplt_fault
9155+};
9156+
9157+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9158+{
9159+ int ret;
9160+
9161+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9162+ vma->vm_mm = current->mm;
9163+ vma->vm_start = addr;
9164+ vma->vm_end = addr + PAGE_SIZE;
9165+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9166+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9167+ vma->vm_ops = &pax_vm_ops;
9168+
9169+ ret = insert_vm_struct(current->mm, vma);
9170+ if (ret)
9171+ return ret;
9172+
9173+ ++current->mm->total_vm;
9174+ return 0;
9175+}
9176+#endif
9177+
9178+/*
9179+ * PaX: decide what to do with offenders (regs->pc = fault address)
9180+ *
9181+ * returns 1 when task should be killed
9182+ * 2 when patched PLT trampoline was detected
9183+ * 3 when unpatched PLT trampoline was detected
9184+ */
9185+static int pax_handle_fetch_fault(struct pt_regs *regs)
9186+{
9187+
9188+#ifdef CONFIG_PAX_EMUPLT
9189+ int err;
9190+
9191+ do { /* PaX: patched PLT emulation #1 */
9192+ unsigned int sethi1, sethi2, jmpl;
9193+
9194+ err = get_user(sethi1, (unsigned int *)regs->pc);
9195+ err |= get_user(sethi2, (unsigned int *)(regs->pc+4));
9196+ err |= get_user(jmpl, (unsigned int *)(regs->pc+8));
9197+
9198+ if (err)
9199+ break;
9200+
9201+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9202+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9203+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9204+ {
9205+ unsigned int addr;
9206+
9207+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9208+ addr = regs->u_regs[UREG_G1];
9209+ addr += (((jmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9210+ regs->pc = addr;
9211+ regs->npc = addr+4;
9212+ return 2;
9213+ }
9214+ } while (0);
9215+
9216+ do { /* PaX: patched PLT emulation #2 */
9217+ unsigned int ba;
9218+
9219+ err = get_user(ba, (unsigned int *)regs->pc);
9220+
9221+ if (err)
9222+ break;
9223+
9224+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9225+ unsigned int addr;
9226+
9227+ if ((ba & 0xFFC00000U) == 0x30800000U)
9228+ addr = regs->pc + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9229+ else
9230+ addr = regs->pc + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9231+ regs->pc = addr;
9232+ regs->npc = addr+4;
9233+ return 2;
9234+ }
9235+ } while (0);
9236+
9237+ do { /* PaX: patched PLT emulation #3 */
9238+ unsigned int sethi, bajmpl, nop;
9239+
9240+ err = get_user(sethi, (unsigned int *)regs->pc);
9241+ err |= get_user(bajmpl, (unsigned int *)(regs->pc+4));
9242+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9243+
9244+ if (err)
9245+ break;
9246+
9247+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9248+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9249+ nop == 0x01000000U)
9250+ {
9251+ unsigned int addr;
9252+
9253+ addr = (sethi & 0x003FFFFFU) << 10;
9254+ regs->u_regs[UREG_G1] = addr;
9255+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9256+ addr += (((bajmpl | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9257+ else
9258+ addr = regs->pc + ((((bajmpl | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9259+ regs->pc = addr;
9260+ regs->npc = addr+4;
9261+ return 2;
9262+ }
9263+ } while (0);
9264+
9265+ do { /* PaX: unpatched PLT emulation step 1 */
9266+ unsigned int sethi, ba, nop;
9267+
9268+ err = get_user(sethi, (unsigned int *)regs->pc);
9269+ err |= get_user(ba, (unsigned int *)(regs->pc+4));
9270+ err |= get_user(nop, (unsigned int *)(regs->pc+8));
9271+
9272+ if (err)
9273+ break;
9274+
9275+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9276+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9277+ nop == 0x01000000U)
9278+ {
9279+ unsigned int addr, save, call;
9280+
9281+ if ((ba & 0xFFC00000U) == 0x30800000U)
9282+ addr = regs->pc + 4 + ((((ba | 0xFFC00000U) ^ 0x00200000U) + 0x00200000U) << 2);
9283+ else
9284+ addr = regs->pc + 4 + ((((ba | 0xFFF80000U) ^ 0x00040000U) + 0x00040000U) << 2);
9285+
9286+ err = get_user(save, (unsigned int *)addr);
9287+ err |= get_user(call, (unsigned int *)(addr+4));
9288+ err |= get_user(nop, (unsigned int *)(addr+8));
9289+ if (err)
9290+ break;
9291+
9292+#ifdef CONFIG_PAX_DLRESOLVE
9293+ if (save == 0x9DE3BFA8U &&
9294+ (call & 0xC0000000U) == 0x40000000U &&
9295+ nop == 0x01000000U)
9296+ {
9297+ struct vm_area_struct *vma;
9298+ unsigned long call_dl_resolve;
9299+
9300+ down_read(&current->mm->mmap_sem);
9301+ call_dl_resolve = current->mm->call_dl_resolve;
9302+ up_read(&current->mm->mmap_sem);
9303+ if (likely(call_dl_resolve))
9304+ goto emulate;
9305+
9306+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9307+
9308+ down_write(&current->mm->mmap_sem);
9309+ if (current->mm->call_dl_resolve) {
9310+ call_dl_resolve = current->mm->call_dl_resolve;
9311+ up_write(&current->mm->mmap_sem);
9312+ if (vma)
9313+ kmem_cache_free(vm_area_cachep, vma);
9314+ goto emulate;
9315+ }
9316+
9317+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9318+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9319+ up_write(&current->mm->mmap_sem);
9320+ if (vma)
9321+ kmem_cache_free(vm_area_cachep, vma);
9322+ return 1;
9323+ }
9324+
9325+ if (pax_insert_vma(vma, call_dl_resolve)) {
9326+ up_write(&current->mm->mmap_sem);
9327+ kmem_cache_free(vm_area_cachep, vma);
9328+ return 1;
9329+ }
9330+
9331+ current->mm->call_dl_resolve = call_dl_resolve;
9332+ up_write(&current->mm->mmap_sem);
9333+
9334+emulate:
9335+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9336+ regs->pc = call_dl_resolve;
9337+ regs->npc = addr+4;
9338+ return 3;
9339+ }
9340+#endif
9341+
9342+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9343+ if ((save & 0xFFC00000U) == 0x05000000U &&
9344+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9345+ nop == 0x01000000U)
9346+ {
9347+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9348+ regs->u_regs[UREG_G2] = addr + 4;
9349+ addr = (save & 0x003FFFFFU) << 10;
9350+ addr += (((call | 0xFFFFE000U) ^ 0x00001000U) + 0x00001000U);
9351+ regs->pc = addr;
9352+ regs->npc = addr+4;
9353+ return 3;
9354+ }
9355+ }
9356+ } while (0);
9357+
9358+ do { /* PaX: unpatched PLT emulation step 2 */
9359+ unsigned int save, call, nop;
9360+
9361+ err = get_user(save, (unsigned int *)(regs->pc-4));
9362+ err |= get_user(call, (unsigned int *)regs->pc);
9363+ err |= get_user(nop, (unsigned int *)(regs->pc+4));
9364+ if (err)
9365+ break;
9366+
9367+ if (save == 0x9DE3BFA8U &&
9368+ (call & 0xC0000000U) == 0x40000000U &&
9369+ nop == 0x01000000U)
9370+ {
9371+ unsigned int dl_resolve = regs->pc + ((((call | 0xC0000000U) ^ 0x20000000U) + 0x20000000U) << 2);
9372+
9373+ regs->u_regs[UREG_RETPC] = regs->pc;
9374+ regs->pc = dl_resolve;
9375+ regs->npc = dl_resolve+4;
9376+ return 3;
9377+ }
9378+ } while (0);
9379+#endif
9380+
9381+ return 1;
9382+}
9383+
9384+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9385+{
9386+ unsigned long i;
9387+
9388+ printk(KERN_ERR "PAX: bytes at PC: ");
9389+ for (i = 0; i < 8; i++) {
9390+ unsigned int c;
9391+ if (get_user(c, (unsigned int *)pc+i))
9392+ printk(KERN_CONT "???????? ");
9393+ else
9394+ printk(KERN_CONT "%08x ", c);
9395+ }
9396+ printk("\n");
9397+}
9398+#endif
9399+
9400 static noinline void do_fault_siginfo(int code, int sig, struct pt_regs *regs,
9401 int text_fault)
9402 {
9403@@ -230,6 +504,24 @@ good_area:
9404 if (!(vma->vm_flags & VM_WRITE))
9405 goto bad_area;
9406 } else {
9407+
9408+#ifdef CONFIG_PAX_PAGEEXEC
9409+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && text_fault && !(vma->vm_flags & VM_EXEC)) {
9410+ up_read(&mm->mmap_sem);
9411+ switch (pax_handle_fetch_fault(regs)) {
9412+
9413+#ifdef CONFIG_PAX_EMUPLT
9414+ case 2:
9415+ case 3:
9416+ return;
9417+#endif
9418+
9419+ }
9420+ pax_report_fault(regs, (void *)regs->pc, (void *)regs->u_regs[UREG_FP]);
9421+ do_group_exit(SIGKILL);
9422+ }
9423+#endif
9424+
9425 /* Allow reads even for write-only mappings */
9426 if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
9427 goto bad_area;
9428diff --git a/arch/sparc/mm/fault_64.c b/arch/sparc/mm/fault_64.c
9429index 5062ff3..e0b75f3 100644
9430--- a/arch/sparc/mm/fault_64.c
9431+++ b/arch/sparc/mm/fault_64.c
9432@@ -21,6 +21,9 @@
9433 #include <linux/kprobes.h>
9434 #include <linux/kdebug.h>
9435 #include <linux/percpu.h>
9436+#include <linux/slab.h>
9437+#include <linux/pagemap.h>
9438+#include <linux/compiler.h>
9439
9440 #include <asm/page.h>
9441 #include <asm/pgtable.h>
9442@@ -74,7 +77,7 @@ static void __kprobes bad_kernel_pc(struct pt_regs *regs, unsigned long vaddr)
9443 printk(KERN_CRIT "OOPS: Bogus kernel PC [%016lx] in fault handler\n",
9444 regs->tpc);
9445 printk(KERN_CRIT "OOPS: RPC [%016lx]\n", regs->u_regs[15]);
9446- printk("OOPS: RPC <%pS>\n", (void *) regs->u_regs[15]);
9447+ printk("OOPS: RPC <%pA>\n", (void *) regs->u_regs[15]);
9448 printk(KERN_CRIT "OOPS: Fault was to vaddr[%lx]\n", vaddr);
9449 dump_stack();
9450 unhandled_fault(regs->tpc, current, regs);
9451@@ -270,6 +273,466 @@ static void noinline __kprobes bogus_32bit_fault_address(struct pt_regs *regs,
9452 show_regs(regs);
9453 }
9454
9455+#ifdef CONFIG_PAX_PAGEEXEC
9456+#ifdef CONFIG_PAX_DLRESOLVE
9457+static void pax_emuplt_close(struct vm_area_struct *vma)
9458+{
9459+ vma->vm_mm->call_dl_resolve = 0UL;
9460+}
9461+
9462+static int pax_emuplt_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
9463+{
9464+ unsigned int *kaddr;
9465+
9466+ vmf->page = alloc_page(GFP_HIGHUSER);
9467+ if (!vmf->page)
9468+ return VM_FAULT_OOM;
9469+
9470+ kaddr = kmap(vmf->page);
9471+ memset(kaddr, 0, PAGE_SIZE);
9472+ kaddr[0] = 0x9DE3BFA8U; /* save */
9473+ flush_dcache_page(vmf->page);
9474+ kunmap(vmf->page);
9475+ return VM_FAULT_MAJOR;
9476+}
9477+
9478+static const struct vm_operations_struct pax_vm_ops = {
9479+ .close = pax_emuplt_close,
9480+ .fault = pax_emuplt_fault
9481+};
9482+
9483+static int pax_insert_vma(struct vm_area_struct *vma, unsigned long addr)
9484+{
9485+ int ret;
9486+
9487+ INIT_LIST_HEAD(&vma->anon_vma_chain);
9488+ vma->vm_mm = current->mm;
9489+ vma->vm_start = addr;
9490+ vma->vm_end = addr + PAGE_SIZE;
9491+ vma->vm_flags = VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYEXEC;
9492+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
9493+ vma->vm_ops = &pax_vm_ops;
9494+
9495+ ret = insert_vm_struct(current->mm, vma);
9496+ if (ret)
9497+ return ret;
9498+
9499+ ++current->mm->total_vm;
9500+ return 0;
9501+}
9502+#endif
9503+
9504+/*
9505+ * PaX: decide what to do with offenders (regs->tpc = fault address)
9506+ *
9507+ * returns 1 when task should be killed
9508+ * 2 when patched PLT trampoline was detected
9509+ * 3 when unpatched PLT trampoline was detected
9510+ */
9511+static int pax_handle_fetch_fault(struct pt_regs *regs)
9512+{
9513+
9514+#ifdef CONFIG_PAX_EMUPLT
9515+ int err;
9516+
9517+ do { /* PaX: patched PLT emulation #1 */
9518+ unsigned int sethi1, sethi2, jmpl;
9519+
9520+ err = get_user(sethi1, (unsigned int *)regs->tpc);
9521+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+4));
9522+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+8));
9523+
9524+ if (err)
9525+ break;
9526+
9527+ if ((sethi1 & 0xFFC00000U) == 0x03000000U &&
9528+ (sethi2 & 0xFFC00000U) == 0x03000000U &&
9529+ (jmpl & 0xFFFFE000U) == 0x81C06000U)
9530+ {
9531+ unsigned long addr;
9532+
9533+ regs->u_regs[UREG_G1] = (sethi2 & 0x003FFFFFU) << 10;
9534+ addr = regs->u_regs[UREG_G1];
9535+ addr += (((jmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9536+
9537+ if (test_thread_flag(TIF_32BIT))
9538+ addr &= 0xFFFFFFFFUL;
9539+
9540+ regs->tpc = addr;
9541+ regs->tnpc = addr+4;
9542+ return 2;
9543+ }
9544+ } while (0);
9545+
9546+ do { /* PaX: patched PLT emulation #2 */
9547+ unsigned int ba;
9548+
9549+ err = get_user(ba, (unsigned int *)regs->tpc);
9550+
9551+ if (err)
9552+ break;
9553+
9554+ if ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30480000U) {
9555+ unsigned long addr;
9556+
9557+ if ((ba & 0xFFC00000U) == 0x30800000U)
9558+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9559+ else
9560+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9561+
9562+ if (test_thread_flag(TIF_32BIT))
9563+ addr &= 0xFFFFFFFFUL;
9564+
9565+ regs->tpc = addr;
9566+ regs->tnpc = addr+4;
9567+ return 2;
9568+ }
9569+ } while (0);
9570+
9571+ do { /* PaX: patched PLT emulation #3 */
9572+ unsigned int sethi, bajmpl, nop;
9573+
9574+ err = get_user(sethi, (unsigned int *)regs->tpc);
9575+ err |= get_user(bajmpl, (unsigned int *)(regs->tpc+4));
9576+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9577+
9578+ if (err)
9579+ break;
9580+
9581+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9582+ ((bajmpl & 0xFFFFE000U) == 0x81C06000U || (bajmpl & 0xFFF80000U) == 0x30480000U) &&
9583+ nop == 0x01000000U)
9584+ {
9585+ unsigned long addr;
9586+
9587+ addr = (sethi & 0x003FFFFFU) << 10;
9588+ regs->u_regs[UREG_G1] = addr;
9589+ if ((bajmpl & 0xFFFFE000U) == 0x81C06000U)
9590+ addr += (((bajmpl | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9591+ else
9592+ addr = regs->tpc + ((((bajmpl | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9593+
9594+ if (test_thread_flag(TIF_32BIT))
9595+ addr &= 0xFFFFFFFFUL;
9596+
9597+ regs->tpc = addr;
9598+ regs->tnpc = addr+4;
9599+ return 2;
9600+ }
9601+ } while (0);
9602+
9603+ do { /* PaX: patched PLT emulation #4 */
9604+ unsigned int sethi, mov1, call, mov2;
9605+
9606+ err = get_user(sethi, (unsigned int *)regs->tpc);
9607+ err |= get_user(mov1, (unsigned int *)(regs->tpc+4));
9608+ err |= get_user(call, (unsigned int *)(regs->tpc+8));
9609+ err |= get_user(mov2, (unsigned int *)(regs->tpc+12));
9610+
9611+ if (err)
9612+ break;
9613+
9614+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9615+ mov1 == 0x8210000FU &&
9616+ (call & 0xC0000000U) == 0x40000000U &&
9617+ mov2 == 0x9E100001U)
9618+ {
9619+ unsigned long addr;
9620+
9621+ regs->u_regs[UREG_G1] = regs->u_regs[UREG_RETPC];
9622+ addr = regs->tpc + 4 + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9623+
9624+ if (test_thread_flag(TIF_32BIT))
9625+ addr &= 0xFFFFFFFFUL;
9626+
9627+ regs->tpc = addr;
9628+ regs->tnpc = addr+4;
9629+ return 2;
9630+ }
9631+ } while (0);
9632+
9633+ do { /* PaX: patched PLT emulation #5 */
9634+ unsigned int sethi, sethi1, sethi2, or1, or2, sllx, jmpl, nop;
9635+
9636+ err = get_user(sethi, (unsigned int *)regs->tpc);
9637+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9638+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9639+ err |= get_user(or1, (unsigned int *)(regs->tpc+12));
9640+ err |= get_user(or2, (unsigned int *)(regs->tpc+16));
9641+ err |= get_user(sllx, (unsigned int *)(regs->tpc+20));
9642+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+24));
9643+ err |= get_user(nop, (unsigned int *)(regs->tpc+28));
9644+
9645+ if (err)
9646+ break;
9647+
9648+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9649+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9650+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9651+ (or1 & 0xFFFFE000U) == 0x82106000U &&
9652+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9653+ sllx == 0x83287020U &&
9654+ jmpl == 0x81C04005U &&
9655+ nop == 0x01000000U)
9656+ {
9657+ unsigned long addr;
9658+
9659+ regs->u_regs[UREG_G1] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9660+ regs->u_regs[UREG_G1] <<= 32;
9661+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9662+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9663+ regs->tpc = addr;
9664+ regs->tnpc = addr+4;
9665+ return 2;
9666+ }
9667+ } while (0);
9668+
9669+ do { /* PaX: patched PLT emulation #6 */
9670+ unsigned int sethi, sethi1, sethi2, sllx, or, jmpl, nop;
9671+
9672+ err = get_user(sethi, (unsigned int *)regs->tpc);
9673+ err |= get_user(sethi1, (unsigned int *)(regs->tpc+4));
9674+ err |= get_user(sethi2, (unsigned int *)(regs->tpc+8));
9675+ err |= get_user(sllx, (unsigned int *)(regs->tpc+12));
9676+ err |= get_user(or, (unsigned int *)(regs->tpc+16));
9677+ err |= get_user(jmpl, (unsigned int *)(regs->tpc+20));
9678+ err |= get_user(nop, (unsigned int *)(regs->tpc+24));
9679+
9680+ if (err)
9681+ break;
9682+
9683+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9684+ (sethi1 & 0xFFC00000U) == 0x03000000U &&
9685+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9686+ sllx == 0x83287020U &&
9687+ (or & 0xFFFFE000U) == 0x8A116000U &&
9688+ jmpl == 0x81C04005U &&
9689+ nop == 0x01000000U)
9690+ {
9691+ unsigned long addr;
9692+
9693+ regs->u_regs[UREG_G1] = (sethi1 & 0x003FFFFFU) << 10;
9694+ regs->u_regs[UREG_G1] <<= 32;
9695+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or & 0x3FFU);
9696+ addr = regs->u_regs[UREG_G1] + regs->u_regs[UREG_G5];
9697+ regs->tpc = addr;
9698+ regs->tnpc = addr+4;
9699+ return 2;
9700+ }
9701+ } while (0);
9702+
9703+ do { /* PaX: unpatched PLT emulation step 1 */
9704+ unsigned int sethi, ba, nop;
9705+
9706+ err = get_user(sethi, (unsigned int *)regs->tpc);
9707+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9708+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9709+
9710+ if (err)
9711+ break;
9712+
9713+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9714+ ((ba & 0xFFC00000U) == 0x30800000U || (ba & 0xFFF80000U) == 0x30680000U) &&
9715+ nop == 0x01000000U)
9716+ {
9717+ unsigned long addr;
9718+ unsigned int save, call;
9719+ unsigned int sethi1, sethi2, or1, or2, sllx, add, jmpl;
9720+
9721+ if ((ba & 0xFFC00000U) == 0x30800000U)
9722+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFC00000UL) ^ 0x00200000UL) + 0x00200000UL) << 2);
9723+ else
9724+ addr = regs->tpc + 4 + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9725+
9726+ if (test_thread_flag(TIF_32BIT))
9727+ addr &= 0xFFFFFFFFUL;
9728+
9729+ err = get_user(save, (unsigned int *)addr);
9730+ err |= get_user(call, (unsigned int *)(addr+4));
9731+ err |= get_user(nop, (unsigned int *)(addr+8));
9732+ if (err)
9733+ break;
9734+
9735+#ifdef CONFIG_PAX_DLRESOLVE
9736+ if (save == 0x9DE3BFA8U &&
9737+ (call & 0xC0000000U) == 0x40000000U &&
9738+ nop == 0x01000000U)
9739+ {
9740+ struct vm_area_struct *vma;
9741+ unsigned long call_dl_resolve;
9742+
9743+ down_read(&current->mm->mmap_sem);
9744+ call_dl_resolve = current->mm->call_dl_resolve;
9745+ up_read(&current->mm->mmap_sem);
9746+ if (likely(call_dl_resolve))
9747+ goto emulate;
9748+
9749+ vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
9750+
9751+ down_write(&current->mm->mmap_sem);
9752+ if (current->mm->call_dl_resolve) {
9753+ call_dl_resolve = current->mm->call_dl_resolve;
9754+ up_write(&current->mm->mmap_sem);
9755+ if (vma)
9756+ kmem_cache_free(vm_area_cachep, vma);
9757+ goto emulate;
9758+ }
9759+
9760+ call_dl_resolve = get_unmapped_area(NULL, 0UL, PAGE_SIZE, 0UL, MAP_PRIVATE);
9761+ if (!vma || (call_dl_resolve & ~PAGE_MASK)) {
9762+ up_write(&current->mm->mmap_sem);
9763+ if (vma)
9764+ kmem_cache_free(vm_area_cachep, vma);
9765+ return 1;
9766+ }
9767+
9768+ if (pax_insert_vma(vma, call_dl_resolve)) {
9769+ up_write(&current->mm->mmap_sem);
9770+ kmem_cache_free(vm_area_cachep, vma);
9771+ return 1;
9772+ }
9773+
9774+ current->mm->call_dl_resolve = call_dl_resolve;
9775+ up_write(&current->mm->mmap_sem);
9776+
9777+emulate:
9778+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9779+ regs->tpc = call_dl_resolve;
9780+ regs->tnpc = addr+4;
9781+ return 3;
9782+ }
9783+#endif
9784+
9785+ /* PaX: glibc 2.4+ generates sethi/jmpl instead of save/call */
9786+ if ((save & 0xFFC00000U) == 0x05000000U &&
9787+ (call & 0xFFFFE000U) == 0x85C0A000U &&
9788+ nop == 0x01000000U)
9789+ {
9790+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9791+ regs->u_regs[UREG_G2] = addr + 4;
9792+ addr = (save & 0x003FFFFFU) << 10;
9793+ addr += (((call | 0xFFFFFFFFFFFFE000UL) ^ 0x00001000UL) + 0x00001000UL);
9794+
9795+ if (test_thread_flag(TIF_32BIT))
9796+ addr &= 0xFFFFFFFFUL;
9797+
9798+ regs->tpc = addr;
9799+ regs->tnpc = addr+4;
9800+ return 3;
9801+ }
9802+
9803+ /* PaX: 64-bit PLT stub */
9804+ err = get_user(sethi1, (unsigned int *)addr);
9805+ err |= get_user(sethi2, (unsigned int *)(addr+4));
9806+ err |= get_user(or1, (unsigned int *)(addr+8));
9807+ err |= get_user(or2, (unsigned int *)(addr+12));
9808+ err |= get_user(sllx, (unsigned int *)(addr+16));
9809+ err |= get_user(add, (unsigned int *)(addr+20));
9810+ err |= get_user(jmpl, (unsigned int *)(addr+24));
9811+ err |= get_user(nop, (unsigned int *)(addr+28));
9812+ if (err)
9813+ break;
9814+
9815+ if ((sethi1 & 0xFFC00000U) == 0x09000000U &&
9816+ (sethi2 & 0xFFC00000U) == 0x0B000000U &&
9817+ (or1 & 0xFFFFE000U) == 0x88112000U &&
9818+ (or2 & 0xFFFFE000U) == 0x8A116000U &&
9819+ sllx == 0x89293020U &&
9820+ add == 0x8A010005U &&
9821+ jmpl == 0x89C14000U &&
9822+ nop == 0x01000000U)
9823+ {
9824+ regs->u_regs[UREG_G1] = (sethi & 0x003FFFFFU) << 10;
9825+ regs->u_regs[UREG_G4] = ((sethi1 & 0x003FFFFFU) << 10) | (or1 & 0x000003FFU);
9826+ regs->u_regs[UREG_G4] <<= 32;
9827+ regs->u_regs[UREG_G5] = ((sethi2 & 0x003FFFFFU) << 10) | (or2 & 0x000003FFU);
9828+ regs->u_regs[UREG_G5] += regs->u_regs[UREG_G4];
9829+ regs->u_regs[UREG_G4] = addr + 24;
9830+ addr = regs->u_regs[UREG_G5];
9831+ regs->tpc = addr;
9832+ regs->tnpc = addr+4;
9833+ return 3;
9834+ }
9835+ }
9836+ } while (0);
9837+
9838+#ifdef CONFIG_PAX_DLRESOLVE
9839+ do { /* PaX: unpatched PLT emulation step 2 */
9840+ unsigned int save, call, nop;
9841+
9842+ err = get_user(save, (unsigned int *)(regs->tpc-4));
9843+ err |= get_user(call, (unsigned int *)regs->tpc);
9844+ err |= get_user(nop, (unsigned int *)(regs->tpc+4));
9845+ if (err)
9846+ break;
9847+
9848+ if (save == 0x9DE3BFA8U &&
9849+ (call & 0xC0000000U) == 0x40000000U &&
9850+ nop == 0x01000000U)
9851+ {
9852+ unsigned long dl_resolve = regs->tpc + ((((call | 0xFFFFFFFFC0000000UL) ^ 0x20000000UL) + 0x20000000UL) << 2);
9853+
9854+ if (test_thread_flag(TIF_32BIT))
9855+ dl_resolve &= 0xFFFFFFFFUL;
9856+
9857+ regs->u_regs[UREG_RETPC] = regs->tpc;
9858+ regs->tpc = dl_resolve;
9859+ regs->tnpc = dl_resolve+4;
9860+ return 3;
9861+ }
9862+ } while (0);
9863+#endif
9864+
9865+ do { /* PaX: patched PLT emulation #7, must be AFTER the unpatched PLT emulation */
9866+ unsigned int sethi, ba, nop;
9867+
9868+ err = get_user(sethi, (unsigned int *)regs->tpc);
9869+ err |= get_user(ba, (unsigned int *)(regs->tpc+4));
9870+ err |= get_user(nop, (unsigned int *)(regs->tpc+8));
9871+
9872+ if (err)
9873+ break;
9874+
9875+ if ((sethi & 0xFFC00000U) == 0x03000000U &&
9876+ (ba & 0xFFF00000U) == 0x30600000U &&
9877+ nop == 0x01000000U)
9878+ {
9879+ unsigned long addr;
9880+
9881+ addr = (sethi & 0x003FFFFFU) << 10;
9882+ regs->u_regs[UREG_G1] = addr;
9883+ addr = regs->tpc + ((((ba | 0xFFFFFFFFFFF80000UL) ^ 0x00040000UL) + 0x00040000UL) << 2);
9884+
9885+ if (test_thread_flag(TIF_32BIT))
9886+ addr &= 0xFFFFFFFFUL;
9887+
9888+ regs->tpc = addr;
9889+ regs->tnpc = addr+4;
9890+ return 2;
9891+ }
9892+ } while (0);
9893+
9894+#endif
9895+
9896+ return 1;
9897+}
9898+
9899+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
9900+{
9901+ unsigned long i;
9902+
9903+ printk(KERN_ERR "PAX: bytes at PC: ");
9904+ for (i = 0; i < 8; i++) {
9905+ unsigned int c;
9906+ if (get_user(c, (unsigned int *)pc+i))
9907+ printk(KERN_CONT "???????? ");
9908+ else
9909+ printk(KERN_CONT "%08x ", c);
9910+ }
9911+ printk("\n");
9912+}
9913+#endif
9914+
9915 asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
9916 {
9917 struct mm_struct *mm = current->mm;
9918@@ -341,6 +804,29 @@ retry:
9919 if (!vma)
9920 goto bad_area;
9921
9922+#ifdef CONFIG_PAX_PAGEEXEC
9923+ /* PaX: detect ITLB misses on non-exec pages */
9924+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && vma->vm_start <= address &&
9925+ !(vma->vm_flags & VM_EXEC) && (fault_code & FAULT_CODE_ITLB))
9926+ {
9927+ if (address != regs->tpc)
9928+ goto good_area;
9929+
9930+ up_read(&mm->mmap_sem);
9931+ switch (pax_handle_fetch_fault(regs)) {
9932+
9933+#ifdef CONFIG_PAX_EMUPLT
9934+ case 2:
9935+ case 3:
9936+ return;
9937+#endif
9938+
9939+ }
9940+ pax_report_fault(regs, (void *)regs->tpc, (void *)(regs->u_regs[UREG_FP] + STACK_BIAS));
9941+ do_group_exit(SIGKILL);
9942+ }
9943+#endif
9944+
9945 /* Pure DTLB misses do not tell us whether the fault causing
9946 * load/store/atomic was a write or not, it only says that there
9947 * was no match. So in such a case we (carefully) read the
9948diff --git a/arch/sparc/mm/hugetlbpage.c b/arch/sparc/mm/hugetlbpage.c
9949index d2b5944..bd813f2 100644
9950--- a/arch/sparc/mm/hugetlbpage.c
9951+++ b/arch/sparc/mm/hugetlbpage.c
9952@@ -38,7 +38,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9953
9954 info.flags = 0;
9955 info.length = len;
9956- info.low_limit = TASK_UNMAPPED_BASE;
9957+ info.low_limit = mm->mmap_base;
9958 info.high_limit = min(task_size, VA_EXCLUDE_START);
9959 info.align_mask = PAGE_MASK & ~HPAGE_MASK;
9960 info.align_offset = 0;
9961@@ -47,6 +47,12 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *filp,
9962 if ((addr & ~PAGE_MASK) && task_size > VA_EXCLUDE_END) {
9963 VM_BUG_ON(addr != -ENOMEM);
9964 info.low_limit = VA_EXCLUDE_END;
9965+
9966+#ifdef CONFIG_PAX_RANDMMAP
9967+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9968+ info.low_limit += mm->delta_mmap;
9969+#endif
9970+
9971 info.high_limit = task_size;
9972 addr = vm_unmapped_area(&info);
9973 }
9974@@ -85,6 +91,12 @@ hugetlb_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
9975 VM_BUG_ON(addr != -ENOMEM);
9976 info.flags = 0;
9977 info.low_limit = TASK_UNMAPPED_BASE;
9978+
9979+#ifdef CONFIG_PAX_RANDMMAP
9980+ if (mm->pax_flags & MF_PAX_RANDMMAP)
9981+ info.low_limit += mm->delta_mmap;
9982+#endif
9983+
9984 info.high_limit = STACK_TOP32;
9985 addr = vm_unmapped_area(&info);
9986 }
9987@@ -99,6 +111,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9988 struct mm_struct *mm = current->mm;
9989 struct vm_area_struct *vma;
9990 unsigned long task_size = TASK_SIZE;
9991+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
9992
9993 if (test_thread_flag(TIF_32BIT))
9994 task_size = STACK_TOP32;
9995@@ -114,11 +127,14 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
9996 return addr;
9997 }
9998
9999+#ifdef CONFIG_PAX_RANDMMAP
10000+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
10001+#endif
10002+
10003 if (addr) {
10004 addr = ALIGN(addr, HPAGE_SIZE);
10005 vma = find_vma(mm, addr);
10006- if (task_size - len >= addr &&
10007- (!vma || addr + len <= vma->vm_start))
10008+ if (task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
10009 return addr;
10010 }
10011 if (mm->get_unmapped_area == arch_get_unmapped_area)
10012diff --git a/arch/tile/include/asm/atomic_64.h b/arch/tile/include/asm/atomic_64.h
10013index f4500c6..889656c 100644
10014--- a/arch/tile/include/asm/atomic_64.h
10015+++ b/arch/tile/include/asm/atomic_64.h
10016@@ -143,6 +143,16 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
10017
10018 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
10019
10020+#define atomic64_read_unchecked(v) atomic64_read(v)
10021+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
10022+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
10023+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
10024+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
10025+#define atomic64_inc_unchecked(v) atomic64_inc(v)
10026+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
10027+#define atomic64_dec_unchecked(v) atomic64_dec(v)
10028+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
10029+
10030 /* Atomic dec and inc don't implement barrier, so provide them if needed. */
10031 #define smp_mb__before_atomic_dec() smp_mb()
10032 #define smp_mb__after_atomic_dec() smp_mb()
10033diff --git a/arch/tile/include/asm/cache.h b/arch/tile/include/asm/cache.h
10034index a9a5299..0fce79e 100644
10035--- a/arch/tile/include/asm/cache.h
10036+++ b/arch/tile/include/asm/cache.h
10037@@ -15,11 +15,12 @@
10038 #ifndef _ASM_TILE_CACHE_H
10039 #define _ASM_TILE_CACHE_H
10040
10041+#include <linux/const.h>
10042 #include <arch/chip.h>
10043
10044 /* bytes per L1 data cache line */
10045 #define L1_CACHE_SHIFT CHIP_L1D_LOG_LINE_SIZE()
10046-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10047+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10048
10049 /* bytes per L2 cache line */
10050 #define L2_CACHE_SHIFT CHIP_L2_LOG_LINE_SIZE()
10051diff --git a/arch/tile/include/asm/uaccess.h b/arch/tile/include/asm/uaccess.h
10052index 8a082bc..7a6bf87 100644
10053--- a/arch/tile/include/asm/uaccess.h
10054+++ b/arch/tile/include/asm/uaccess.h
10055@@ -408,9 +408,9 @@ static inline unsigned long __must_check copy_from_user(void *to,
10056 const void __user *from,
10057 unsigned long n)
10058 {
10059- int sz = __compiletime_object_size(to);
10060+ size_t sz = __compiletime_object_size(to);
10061
10062- if (likely(sz == -1 || sz >= n))
10063+ if (likely(sz == (size_t)-1 || sz >= n))
10064 n = _copy_from_user(to, from, n);
10065 else
10066 copy_from_user_overflow();
10067diff --git a/arch/tile/mm/hugetlbpage.c b/arch/tile/mm/hugetlbpage.c
10068index 650ccff..45fe2d6 100644
10069--- a/arch/tile/mm/hugetlbpage.c
10070+++ b/arch/tile/mm/hugetlbpage.c
10071@@ -239,6 +239,7 @@ static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
10072 info.high_limit = TASK_SIZE;
10073 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10074 info.align_offset = 0;
10075+ info.threadstack_offset = 0;
10076 return vm_unmapped_area(&info);
10077 }
10078
10079@@ -256,6 +257,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
10080 info.high_limit = current->mm->mmap_base;
10081 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
10082 info.align_offset = 0;
10083+ info.threadstack_offset = 0;
10084 addr = vm_unmapped_area(&info);
10085
10086 /*
10087diff --git a/arch/um/Makefile b/arch/um/Makefile
10088index 133f7de..1d6f2f1 100644
10089--- a/arch/um/Makefile
10090+++ b/arch/um/Makefile
10091@@ -62,6 +62,10 @@ USER_CFLAGS = $(patsubst $(KERNEL_DEFINES),,$(patsubst -D__KERNEL__,,\
10092 $(patsubst -I%,,$(KBUILD_CFLAGS)))) $(ARCH_INCLUDE) $(MODE_INCLUDE) \
10093 $(filter -I%,$(CFLAGS)) -D_FILE_OFFSET_BITS=64 -idirafter include
10094
10095+ifdef CONSTIFY_PLUGIN
10096+USER_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10097+endif
10098+
10099 #This will adjust *FLAGS accordingly to the platform.
10100 include $(srctree)/$(ARCH_DIR)/Makefile-os-$(OS)
10101
10102diff --git a/arch/um/defconfig b/arch/um/defconfig
10103index 08107a7..ab22afe 100644
10104--- a/arch/um/defconfig
10105+++ b/arch/um/defconfig
10106@@ -51,7 +51,6 @@ CONFIG_X86_CMPXCHG=y
10107 CONFIG_X86_L1_CACHE_SHIFT=5
10108 CONFIG_X86_XADD=y
10109 CONFIG_X86_PPRO_FENCE=y
10110-CONFIG_X86_WP_WORKS_OK=y
10111 CONFIG_X86_INVLPG=y
10112 CONFIG_X86_BSWAP=y
10113 CONFIG_X86_POPAD_OK=y
10114diff --git a/arch/um/include/asm/cache.h b/arch/um/include/asm/cache.h
10115index 19e1bdd..3665b77 100644
10116--- a/arch/um/include/asm/cache.h
10117+++ b/arch/um/include/asm/cache.h
10118@@ -1,6 +1,7 @@
10119 #ifndef __UM_CACHE_H
10120 #define __UM_CACHE_H
10121
10122+#include <linux/const.h>
10123
10124 #if defined(CONFIG_UML_X86) && !defined(CONFIG_64BIT)
10125 # define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
10126@@ -12,6 +13,6 @@
10127 # define L1_CACHE_SHIFT 5
10128 #endif
10129
10130-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10131+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10132
10133 #endif
10134diff --git a/arch/um/include/asm/kmap_types.h b/arch/um/include/asm/kmap_types.h
10135index 2e0a6b1..a64d0f5 100644
10136--- a/arch/um/include/asm/kmap_types.h
10137+++ b/arch/um/include/asm/kmap_types.h
10138@@ -8,6 +8,6 @@
10139
10140 /* No more #include "asm/arch/kmap_types.h" ! */
10141
10142-#define KM_TYPE_NR 14
10143+#define KM_TYPE_NR 15
10144
10145 #endif
10146diff --git a/arch/um/include/asm/page.h b/arch/um/include/asm/page.h
10147index 5ff53d9..5850cdf 100644
10148--- a/arch/um/include/asm/page.h
10149+++ b/arch/um/include/asm/page.h
10150@@ -14,6 +14,9 @@
10151 #define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
10152 #define PAGE_MASK (~(PAGE_SIZE-1))
10153
10154+#define ktla_ktva(addr) (addr)
10155+#define ktva_ktla(addr) (addr)
10156+
10157 #ifndef __ASSEMBLY__
10158
10159 struct page;
10160diff --git a/arch/um/include/asm/pgtable-3level.h b/arch/um/include/asm/pgtable-3level.h
10161index 0032f92..cd151e0 100644
10162--- a/arch/um/include/asm/pgtable-3level.h
10163+++ b/arch/um/include/asm/pgtable-3level.h
10164@@ -58,6 +58,7 @@
10165 #define pud_present(x) (pud_val(x) & _PAGE_PRESENT)
10166 #define pud_populate(mm, pud, pmd) \
10167 set_pud(pud, __pud(_PAGE_TABLE + __pa(pmd)))
10168+#define pud_populate_kernel(mm, pud, pmd) pud_populate((mm), (pud), (pmd))
10169
10170 #ifdef CONFIG_64BIT
10171 #define set_pud(pudptr, pudval) set_64bit((u64 *) (pudptr), pud_val(pudval))
10172diff --git a/arch/um/kernel/process.c b/arch/um/kernel/process.c
10173index bbcef52..6a2a483 100644
10174--- a/arch/um/kernel/process.c
10175+++ b/arch/um/kernel/process.c
10176@@ -367,22 +367,6 @@ int singlestepping(void * t)
10177 return 2;
10178 }
10179
10180-/*
10181- * Only x86 and x86_64 have an arch_align_stack().
10182- * All other arches have "#define arch_align_stack(x) (x)"
10183- * in their asm/system.h
10184- * As this is included in UML from asm-um/system-generic.h,
10185- * we can use it to behave as the subarch does.
10186- */
10187-#ifndef arch_align_stack
10188-unsigned long arch_align_stack(unsigned long sp)
10189-{
10190- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
10191- sp -= get_random_int() % 8192;
10192- return sp & ~0xf;
10193-}
10194-#endif
10195-
10196 unsigned long get_wchan(struct task_struct *p)
10197 {
10198 unsigned long stack_page, sp, ip;
10199diff --git a/arch/unicore32/include/asm/cache.h b/arch/unicore32/include/asm/cache.h
10200index ad8f795..2c7eec6 100644
10201--- a/arch/unicore32/include/asm/cache.h
10202+++ b/arch/unicore32/include/asm/cache.h
10203@@ -12,8 +12,10 @@
10204 #ifndef __UNICORE_CACHE_H__
10205 #define __UNICORE_CACHE_H__
10206
10207-#define L1_CACHE_SHIFT (5)
10208-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
10209+#include <linux/const.h>
10210+
10211+#define L1_CACHE_SHIFT 5
10212+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
10213
10214 /*
10215 * Memory returned by kmalloc() may be used for DMA, so we must make
10216diff --git a/arch/x86/Kconfig b/arch/x86/Kconfig
10217index fe120da..24177f7 100644
10218--- a/arch/x86/Kconfig
10219+++ b/arch/x86/Kconfig
10220@@ -239,7 +239,7 @@ config X86_HT
10221
10222 config X86_32_LAZY_GS
10223 def_bool y
10224- depends on X86_32 && !CC_STACKPROTECTOR
10225+ depends on X86_32 && !CC_STACKPROTECTOR && !PAX_MEMORY_UDEREF
10226
10227 config ARCH_HWEIGHT_CFLAGS
10228 string
10229@@ -1073,6 +1073,7 @@ config MICROCODE_EARLY
10230
10231 config X86_MSR
10232 tristate "/dev/cpu/*/msr - Model-specific register support"
10233+ depends on !GRKERNSEC_KMEM
10234 ---help---
10235 This device gives privileged processes access to the x86
10236 Model-Specific Registers (MSRs). It is a character device with
10237@@ -1096,7 +1097,7 @@ choice
10238
10239 config NOHIGHMEM
10240 bool "off"
10241- depends on !X86_NUMAQ
10242+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10243 ---help---
10244 Linux can use up to 64 Gigabytes of physical memory on x86 systems.
10245 However, the address space of 32-bit x86 processors is only 4
10246@@ -1133,7 +1134,7 @@ config NOHIGHMEM
10247
10248 config HIGHMEM4G
10249 bool "4GB"
10250- depends on !X86_NUMAQ
10251+ depends on !X86_NUMAQ && !(PAX_PAGEEXEC && PAX_ENABLE_PAE)
10252 ---help---
10253 Select this if you have a 32-bit processor and between 1 and 4
10254 gigabytes of physical RAM.
10255@@ -1186,7 +1187,7 @@ config PAGE_OFFSET
10256 hex
10257 default 0xB0000000 if VMSPLIT_3G_OPT
10258 default 0x80000000 if VMSPLIT_2G
10259- default 0x78000000 if VMSPLIT_2G_OPT
10260+ default 0x70000000 if VMSPLIT_2G_OPT
10261 default 0x40000000 if VMSPLIT_1G
10262 default 0xC0000000
10263 depends on X86_32
10264@@ -1584,6 +1585,7 @@ config SECCOMP
10265
10266 config CC_STACKPROTECTOR
10267 bool "Enable -fstack-protector buffer overflow detection"
10268+ depends on X86_64 || !PAX_MEMORY_UDEREF
10269 ---help---
10270 This option turns on the -fstack-protector GCC feature. This
10271 feature puts, at the beginning of functions, a canary value on
10272@@ -1703,6 +1705,8 @@ config X86_NEED_RELOCS
10273 config PHYSICAL_ALIGN
10274 hex "Alignment value to which kernel should be aligned" if X86_32
10275 default "0x1000000"
10276+ range 0x200000 0x1000000 if PAX_KERNEXEC && X86_PAE
10277+ range 0x400000 0x1000000 if PAX_KERNEXEC && !X86_PAE
10278 range 0x2000 0x1000000
10279 ---help---
10280 This value puts the alignment restrictions on physical address
10281@@ -1778,9 +1782,10 @@ config DEBUG_HOTPLUG_CPU0
10282 If unsure, say N.
10283
10284 config COMPAT_VDSO
10285- def_bool y
10286+ def_bool n
10287 prompt "Compat VDSO support"
10288 depends on X86_32 || IA32_EMULATION
10289+ depends on !PAX_PAGEEXEC && !PAX_SEGMEXEC && !PAX_KERNEXEC && !PAX_MEMORY_UDEREF
10290 ---help---
10291 Map the 32-bit VDSO to the predictable old-style address too.
10292
10293diff --git a/arch/x86/Kconfig.cpu b/arch/x86/Kconfig.cpu
10294index c026cca..14657ae 100644
10295--- a/arch/x86/Kconfig.cpu
10296+++ b/arch/x86/Kconfig.cpu
10297@@ -319,7 +319,7 @@ config X86_PPRO_FENCE
10298
10299 config X86_F00F_BUG
10300 def_bool y
10301- depends on M586MMX || M586TSC || M586 || M486
10302+ depends on (M586MMX || M586TSC || M586 || M486) && !PAX_KERNEXEC
10303
10304 config X86_INVD_BUG
10305 def_bool y
10306@@ -327,7 +327,7 @@ config X86_INVD_BUG
10307
10308 config X86_ALIGNMENT_16
10309 def_bool y
10310- depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || MELAN || MK6 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10311+ depends on MWINCHIP3D || MWINCHIPC6 || MCYRIXIII || X86_ELAN || MK8 || MK7 || MK6 || MCORE2 || MPENTIUM4 || MPENTIUMIII || MPENTIUMII || M686 || M586MMX || M586TSC || M586 || M486 || MVIAC3_2 || MGEODEGX1
10312
10313 config X86_INTEL_USERCOPY
10314 def_bool y
10315@@ -373,7 +373,7 @@ config X86_CMPXCHG64
10316 # generates cmov.
10317 config X86_CMOV
10318 def_bool y
10319- depends on (MK8 || MK7 || MCORE2 || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10320+ depends on (MK8 || MK7 || MCORE2 || MPSC || MPENTIUM4 || MPENTIUMM || MPENTIUMIII || MPENTIUMII || M686 || MVIAC3_2 || MVIAC7 || MCRUSOE || MEFFICEON || X86_64 || MATOM || MGEODE_LX)
10321
10322 config X86_MINIMUM_CPU_FAMILY
10323 int
10324diff --git a/arch/x86/Kconfig.debug b/arch/x86/Kconfig.debug
10325index c198b7e..63eea60 100644
10326--- a/arch/x86/Kconfig.debug
10327+++ b/arch/x86/Kconfig.debug
10328@@ -84,7 +84,7 @@ config X86_PTDUMP
10329 config DEBUG_RODATA
10330 bool "Write protect kernel read-only data structures"
10331 default y
10332- depends on DEBUG_KERNEL
10333+ depends on DEBUG_KERNEL && BROKEN
10334 ---help---
10335 Mark the kernel read-only data as write-protected in the pagetables,
10336 in order to catch accidental (and incorrect) writes to such const
10337@@ -102,7 +102,7 @@ config DEBUG_RODATA_TEST
10338
10339 config DEBUG_SET_MODULE_RONX
10340 bool "Set loadable kernel module data as NX and text as RO"
10341- depends on MODULES
10342+ depends on MODULES && BROKEN
10343 ---help---
10344 This option helps catch unintended modifications to loadable
10345 kernel module's text and read-only data. It also prevents execution
10346diff --git a/arch/x86/Makefile b/arch/x86/Makefile
10347index 5c47726..8c4fa67 100644
10348--- a/arch/x86/Makefile
10349+++ b/arch/x86/Makefile
10350@@ -54,6 +54,7 @@ else
10351 UTS_MACHINE := x86_64
10352 CHECKFLAGS += -D__x86_64__ -m64
10353
10354+ biarch := $(call cc-option,-m64)
10355 KBUILD_AFLAGS += -m64
10356 KBUILD_CFLAGS += -m64
10357
10358@@ -234,3 +235,12 @@ define archhelp
10359 echo ' FDARGS="..." arguments for the booted kernel'
10360 echo ' FDINITRD=file initrd for the booted kernel'
10361 endef
10362+
10363+define OLD_LD
10364+
10365+*** ${VERSION}.${PATCHLEVEL} PaX kernels no longer build correctly with old versions of binutils.
10366+*** Please upgrade your binutils to 2.18 or newer
10367+endef
10368+
10369+archprepare:
10370+ $(if $(LDFLAGS_BUILD_ID),,$(error $(OLD_LD)))
10371diff --git a/arch/x86/boot/Makefile b/arch/x86/boot/Makefile
10372index 379814b..add62ce 100644
10373--- a/arch/x86/boot/Makefile
10374+++ b/arch/x86/boot/Makefile
10375@@ -65,6 +65,9 @@ KBUILD_CFLAGS := $(USERINCLUDE) -g -Os -D_SETUP -D__KERNEL__ \
10376 $(call cc-option, -fno-stack-protector) \
10377 $(call cc-option, -mpreferred-stack-boundary=2)
10378 KBUILD_CFLAGS += $(call cc-option, -m32)
10379+ifdef CONSTIFY_PLUGIN
10380+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10381+endif
10382 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10383 GCOV_PROFILE := n
10384
10385diff --git a/arch/x86/boot/bitops.h b/arch/x86/boot/bitops.h
10386index 878e4b9..20537ab 100644
10387--- a/arch/x86/boot/bitops.h
10388+++ b/arch/x86/boot/bitops.h
10389@@ -26,7 +26,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10390 u8 v;
10391 const u32 *p = (const u32 *)addr;
10392
10393- asm("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10394+ asm volatile("btl %2,%1; setc %0" : "=qm" (v) : "m" (*p), "Ir" (nr));
10395 return v;
10396 }
10397
10398@@ -37,7 +37,7 @@ static inline int variable_test_bit(int nr, const void *addr)
10399
10400 static inline void set_bit(int nr, void *addr)
10401 {
10402- asm("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10403+ asm volatile("btsl %1,%0" : "+m" (*(u32 *)addr) : "Ir" (nr));
10404 }
10405
10406 #endif /* BOOT_BITOPS_H */
10407diff --git a/arch/x86/boot/boot.h b/arch/x86/boot/boot.h
10408index 5b75319..331a4ca 100644
10409--- a/arch/x86/boot/boot.h
10410+++ b/arch/x86/boot/boot.h
10411@@ -85,7 +85,7 @@ static inline void io_delay(void)
10412 static inline u16 ds(void)
10413 {
10414 u16 seg;
10415- asm("movw %%ds,%0" : "=rm" (seg));
10416+ asm volatile("movw %%ds,%0" : "=rm" (seg));
10417 return seg;
10418 }
10419
10420@@ -181,7 +181,7 @@ static inline void wrgs32(u32 v, addr_t addr)
10421 static inline int memcmp(const void *s1, const void *s2, size_t len)
10422 {
10423 u8 diff;
10424- asm("repe; cmpsb; setnz %0"
10425+ asm volatile("repe; cmpsb; setnz %0"
10426 : "=qm" (diff), "+D" (s1), "+S" (s2), "+c" (len));
10427 return diff;
10428 }
10429diff --git a/arch/x86/boot/compressed/Makefile b/arch/x86/boot/compressed/Makefile
10430index 5ef205c..342191d 100644
10431--- a/arch/x86/boot/compressed/Makefile
10432+++ b/arch/x86/boot/compressed/Makefile
10433@@ -14,6 +14,9 @@ cflags-$(CONFIG_X86_64) := -mcmodel=small
10434 KBUILD_CFLAGS += $(cflags-y)
10435 KBUILD_CFLAGS += $(call cc-option,-ffreestanding)
10436 KBUILD_CFLAGS += $(call cc-option,-fno-stack-protector)
10437+ifdef CONSTIFY_PLUGIN
10438+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
10439+endif
10440
10441 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
10442 GCOV_PROFILE := n
10443diff --git a/arch/x86/boot/compressed/eboot.c b/arch/x86/boot/compressed/eboot.c
10444index d606463..b887794 100644
10445--- a/arch/x86/boot/compressed/eboot.c
10446+++ b/arch/x86/boot/compressed/eboot.c
10447@@ -150,7 +150,6 @@ again:
10448 *addr = max_addr;
10449 }
10450
10451-free_pool:
10452 efi_call_phys1(sys_table->boottime->free_pool, map);
10453
10454 fail:
10455@@ -214,7 +213,6 @@ static efi_status_t low_alloc(unsigned long size, unsigned long align,
10456 if (i == map_size / desc_size)
10457 status = EFI_NOT_FOUND;
10458
10459-free_pool:
10460 efi_call_phys1(sys_table->boottime->free_pool, map);
10461 fail:
10462 return status;
10463diff --git a/arch/x86/boot/compressed/efi_stub_32.S b/arch/x86/boot/compressed/efi_stub_32.S
10464index a53440e..c3dbf1e 100644
10465--- a/arch/x86/boot/compressed/efi_stub_32.S
10466+++ b/arch/x86/boot/compressed/efi_stub_32.S
10467@@ -46,16 +46,13 @@ ENTRY(efi_call_phys)
10468 * parameter 2, ..., param n. To make things easy, we save the return
10469 * address of efi_call_phys in a global variable.
10470 */
10471- popl %ecx
10472- movl %ecx, saved_return_addr(%edx)
10473- /* get the function pointer into ECX*/
10474- popl %ecx
10475- movl %ecx, efi_rt_function_ptr(%edx)
10476+ popl saved_return_addr(%edx)
10477+ popl efi_rt_function_ptr(%edx)
10478
10479 /*
10480 * 3. Call the physical function.
10481 */
10482- call *%ecx
10483+ call *efi_rt_function_ptr(%edx)
10484
10485 /*
10486 * 4. Balance the stack. And because EAX contain the return value,
10487@@ -67,15 +64,12 @@ ENTRY(efi_call_phys)
10488 1: popl %edx
10489 subl $1b, %edx
10490
10491- movl efi_rt_function_ptr(%edx), %ecx
10492- pushl %ecx
10493+ pushl efi_rt_function_ptr(%edx)
10494
10495 /*
10496 * 10. Push the saved return address onto the stack and return.
10497 */
10498- movl saved_return_addr(%edx), %ecx
10499- pushl %ecx
10500- ret
10501+ jmpl *saved_return_addr(%edx)
10502 ENDPROC(efi_call_phys)
10503 .previous
10504
10505diff --git a/arch/x86/boot/compressed/head_32.S b/arch/x86/boot/compressed/head_32.S
10506index 1e3184f..0d11e2e 100644
10507--- a/arch/x86/boot/compressed/head_32.S
10508+++ b/arch/x86/boot/compressed/head_32.S
10509@@ -118,7 +118,7 @@ preferred_addr:
10510 notl %eax
10511 andl %eax, %ebx
10512 #else
10513- movl $LOAD_PHYSICAL_ADDR, %ebx
10514+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10515 #endif
10516
10517 /* Target address to relocate to for decompression */
10518@@ -204,7 +204,7 @@ relocated:
10519 * and where it was actually loaded.
10520 */
10521 movl %ebp, %ebx
10522- subl $LOAD_PHYSICAL_ADDR, %ebx
10523+ subl $____LOAD_PHYSICAL_ADDR, %ebx
10524 jz 2f /* Nothing to be done if loaded at compiled addr. */
10525 /*
10526 * Process relocations.
10527@@ -212,8 +212,7 @@ relocated:
10528
10529 1: subl $4, %edi
10530 movl (%edi), %ecx
10531- testl %ecx, %ecx
10532- jz 2f
10533+ jecxz 2f
10534 addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
10535 jmp 1b
10536 2:
10537diff --git a/arch/x86/boot/compressed/head_64.S b/arch/x86/boot/compressed/head_64.S
10538index 16f24e6..47491a3 100644
10539--- a/arch/x86/boot/compressed/head_64.S
10540+++ b/arch/x86/boot/compressed/head_64.S
10541@@ -97,7 +97,7 @@ ENTRY(startup_32)
10542 notl %eax
10543 andl %eax, %ebx
10544 #else
10545- movl $LOAD_PHYSICAL_ADDR, %ebx
10546+ movl $____LOAD_PHYSICAL_ADDR, %ebx
10547 #endif
10548
10549 /* Target address to relocate to for decompression */
10550@@ -272,7 +272,7 @@ preferred_addr:
10551 notq %rax
10552 andq %rax, %rbp
10553 #else
10554- movq $LOAD_PHYSICAL_ADDR, %rbp
10555+ movq $____LOAD_PHYSICAL_ADDR, %rbp
10556 #endif
10557
10558 /* Target address to relocate to for decompression */
10559@@ -363,8 +363,8 @@ gdt:
10560 .long gdt
10561 .word 0
10562 .quad 0x0000000000000000 /* NULL descriptor */
10563- .quad 0x00af9a000000ffff /* __KERNEL_CS */
10564- .quad 0x00cf92000000ffff /* __KERNEL_DS */
10565+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
10566+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
10567 .quad 0x0080890000000000 /* TS descriptor */
10568 .quad 0x0000000000000000 /* TS continued */
10569 gdt_end:
10570diff --git a/arch/x86/boot/compressed/misc.c b/arch/x86/boot/compressed/misc.c
10571index 7cb56c6..d382d84 100644
10572--- a/arch/x86/boot/compressed/misc.c
10573+++ b/arch/x86/boot/compressed/misc.c
10574@@ -303,7 +303,7 @@ static void parse_elf(void *output)
10575 case PT_LOAD:
10576 #ifdef CONFIG_RELOCATABLE
10577 dest = output;
10578- dest += (phdr->p_paddr - LOAD_PHYSICAL_ADDR);
10579+ dest += (phdr->p_paddr - ____LOAD_PHYSICAL_ADDR);
10580 #else
10581 dest = (void *)(phdr->p_paddr);
10582 #endif
10583@@ -354,7 +354,7 @@ asmlinkage void decompress_kernel(void *rmode, memptr heap,
10584 error("Destination address too large");
10585 #endif
10586 #ifndef CONFIG_RELOCATABLE
10587- if ((unsigned long)output != LOAD_PHYSICAL_ADDR)
10588+ if ((unsigned long)output != ____LOAD_PHYSICAL_ADDR)
10589 error("Wrong destination address");
10590 #endif
10591
10592diff --git a/arch/x86/boot/cpucheck.c b/arch/x86/boot/cpucheck.c
10593index 4d3ff03..e4972ff 100644
10594--- a/arch/x86/boot/cpucheck.c
10595+++ b/arch/x86/boot/cpucheck.c
10596@@ -74,7 +74,7 @@ static int has_fpu(void)
10597 u16 fcw = -1, fsw = -1;
10598 u32 cr0;
10599
10600- asm("movl %%cr0,%0" : "=r" (cr0));
10601+ asm volatile("movl %%cr0,%0" : "=r" (cr0));
10602 if (cr0 & (X86_CR0_EM|X86_CR0_TS)) {
10603 cr0 &= ~(X86_CR0_EM|X86_CR0_TS);
10604 asm volatile("movl %0,%%cr0" : : "r" (cr0));
10605@@ -90,7 +90,7 @@ static int has_eflag(u32 mask)
10606 {
10607 u32 f0, f1;
10608
10609- asm("pushfl ; "
10610+ asm volatile("pushfl ; "
10611 "pushfl ; "
10612 "popl %0 ; "
10613 "movl %0,%1 ; "
10614@@ -115,7 +115,7 @@ static void get_flags(void)
10615 set_bit(X86_FEATURE_FPU, cpu.flags);
10616
10617 if (has_eflag(X86_EFLAGS_ID)) {
10618- asm("cpuid"
10619+ asm volatile("cpuid"
10620 : "=a" (max_intel_level),
10621 "=b" (cpu_vendor[0]),
10622 "=d" (cpu_vendor[1]),
10623@@ -124,7 +124,7 @@ static void get_flags(void)
10624
10625 if (max_intel_level >= 0x00000001 &&
10626 max_intel_level <= 0x0000ffff) {
10627- asm("cpuid"
10628+ asm volatile("cpuid"
10629 : "=a" (tfms),
10630 "=c" (cpu.flags[4]),
10631 "=d" (cpu.flags[0])
10632@@ -136,7 +136,7 @@ static void get_flags(void)
10633 cpu.model += ((tfms >> 16) & 0xf) << 4;
10634 }
10635
10636- asm("cpuid"
10637+ asm volatile("cpuid"
10638 : "=a" (max_amd_level)
10639 : "a" (0x80000000)
10640 : "ebx", "ecx", "edx");
10641@@ -144,7 +144,7 @@ static void get_flags(void)
10642 if (max_amd_level >= 0x80000001 &&
10643 max_amd_level <= 0x8000ffff) {
10644 u32 eax = 0x80000001;
10645- asm("cpuid"
10646+ asm volatile("cpuid"
10647 : "+a" (eax),
10648 "=c" (cpu.flags[6]),
10649 "=d" (cpu.flags[1])
10650@@ -203,9 +203,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10651 u32 ecx = MSR_K7_HWCR;
10652 u32 eax, edx;
10653
10654- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10655+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10656 eax &= ~(1 << 15);
10657- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10658+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10659
10660 get_flags(); /* Make sure it really did something */
10661 err = check_flags();
10662@@ -218,9 +218,9 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10663 u32 ecx = MSR_VIA_FCR;
10664 u32 eax, edx;
10665
10666- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10667+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10668 eax |= (1<<1)|(1<<7);
10669- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10670+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10671
10672 set_bit(X86_FEATURE_CX8, cpu.flags);
10673 err = check_flags();
10674@@ -231,12 +231,12 @@ int check_cpu(int *cpu_level_ptr, int *req_level_ptr, u32 **err_flags_ptr)
10675 u32 eax, edx;
10676 u32 level = 1;
10677
10678- asm("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10679- asm("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10680- asm("cpuid"
10681+ asm volatile("rdmsr" : "=a" (eax), "=d" (edx) : "c" (ecx));
10682+ asm volatile("wrmsr" : : "a" (~0), "d" (edx), "c" (ecx));
10683+ asm volatile("cpuid"
10684 : "+a" (level), "=d" (cpu.flags[0])
10685 : : "ecx", "ebx");
10686- asm("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10687+ asm volatile("wrmsr" : : "a" (eax), "d" (edx), "c" (ecx));
10688
10689 err = check_flags();
10690 }
10691diff --git a/arch/x86/boot/header.S b/arch/x86/boot/header.S
10692index 9ec06a1..2c25e79 100644
10693--- a/arch/x86/boot/header.S
10694+++ b/arch/x86/boot/header.S
10695@@ -409,10 +409,14 @@ setup_data: .quad 0 # 64-bit physical pointer to
10696 # single linked list of
10697 # struct setup_data
10698
10699-pref_address: .quad LOAD_PHYSICAL_ADDR # preferred load addr
10700+pref_address: .quad ____LOAD_PHYSICAL_ADDR # preferred load addr
10701
10702 #define ZO_INIT_SIZE (ZO__end - ZO_startup_32 + ZO_z_extract_offset)
10703+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
10704+#define VO_INIT_SIZE (VO__end - VO__text - __PAGE_OFFSET - ____LOAD_PHYSICAL_ADDR)
10705+#else
10706 #define VO_INIT_SIZE (VO__end - VO__text)
10707+#endif
10708 #if ZO_INIT_SIZE > VO_INIT_SIZE
10709 #define INIT_SIZE ZO_INIT_SIZE
10710 #else
10711diff --git a/arch/x86/boot/memory.c b/arch/x86/boot/memory.c
10712index db75d07..8e6d0af 100644
10713--- a/arch/x86/boot/memory.c
10714+++ b/arch/x86/boot/memory.c
10715@@ -19,7 +19,7 @@
10716
10717 static int detect_memory_e820(void)
10718 {
10719- int count = 0;
10720+ unsigned int count = 0;
10721 struct biosregs ireg, oreg;
10722 struct e820entry *desc = boot_params.e820_map;
10723 static struct e820entry buf; /* static so it is zeroed */
10724diff --git a/arch/x86/boot/video-vesa.c b/arch/x86/boot/video-vesa.c
10725index 11e8c6e..fdbb1ed 100644
10726--- a/arch/x86/boot/video-vesa.c
10727+++ b/arch/x86/boot/video-vesa.c
10728@@ -200,6 +200,7 @@ static void vesa_store_pm_info(void)
10729
10730 boot_params.screen_info.vesapm_seg = oreg.es;
10731 boot_params.screen_info.vesapm_off = oreg.di;
10732+ boot_params.screen_info.vesapm_size = oreg.cx;
10733 }
10734
10735 /*
10736diff --git a/arch/x86/boot/video.c b/arch/x86/boot/video.c
10737index 43eda28..5ab5fdb 100644
10738--- a/arch/x86/boot/video.c
10739+++ b/arch/x86/boot/video.c
10740@@ -96,7 +96,7 @@ static void store_mode_params(void)
10741 static unsigned int get_entry(void)
10742 {
10743 char entry_buf[4];
10744- int i, len = 0;
10745+ unsigned int i, len = 0;
10746 int key;
10747 unsigned int v;
10748
10749diff --git a/arch/x86/crypto/aes-x86_64-asm_64.S b/arch/x86/crypto/aes-x86_64-asm_64.S
10750index 9105655..5e37f27 100644
10751--- a/arch/x86/crypto/aes-x86_64-asm_64.S
10752+++ b/arch/x86/crypto/aes-x86_64-asm_64.S
10753@@ -8,6 +8,8 @@
10754 * including this sentence is retained in full.
10755 */
10756
10757+#include <asm/alternative-asm.h>
10758+
10759 .extern crypto_ft_tab
10760 .extern crypto_it_tab
10761 .extern crypto_fl_tab
10762@@ -70,6 +72,8 @@
10763 je B192; \
10764 leaq 32(r9),r9;
10765
10766+#define ret pax_force_retaddr 0, 1; ret
10767+
10768 #define epilogue(FUNC,r1,r2,r3,r4,r5,r6,r7,r8,r9) \
10769 movq r1,r2; \
10770 movq r3,r4; \
10771diff --git a/arch/x86/crypto/aesni-intel_asm.S b/arch/x86/crypto/aesni-intel_asm.S
10772index 477e9d7..3ab339f 100644
10773--- a/arch/x86/crypto/aesni-intel_asm.S
10774+++ b/arch/x86/crypto/aesni-intel_asm.S
10775@@ -31,6 +31,7 @@
10776
10777 #include <linux/linkage.h>
10778 #include <asm/inst.h>
10779+#include <asm/alternative-asm.h>
10780
10781 #ifdef __x86_64__
10782 .data
10783@@ -1441,6 +1442,7 @@ _return_T_done_decrypt:
10784 pop %r14
10785 pop %r13
10786 pop %r12
10787+ pax_force_retaddr 0, 1
10788 ret
10789 ENDPROC(aesni_gcm_dec)
10790
10791@@ -1705,6 +1707,7 @@ _return_T_done_encrypt:
10792 pop %r14
10793 pop %r13
10794 pop %r12
10795+ pax_force_retaddr 0, 1
10796 ret
10797 ENDPROC(aesni_gcm_enc)
10798
10799@@ -1722,6 +1725,7 @@ _key_expansion_256a:
10800 pxor %xmm1, %xmm0
10801 movaps %xmm0, (TKEYP)
10802 add $0x10, TKEYP
10803+ pax_force_retaddr_bts
10804 ret
10805 ENDPROC(_key_expansion_128)
10806 ENDPROC(_key_expansion_256a)
10807@@ -1748,6 +1752,7 @@ _key_expansion_192a:
10808 shufps $0b01001110, %xmm2, %xmm1
10809 movaps %xmm1, 0x10(TKEYP)
10810 add $0x20, TKEYP
10811+ pax_force_retaddr_bts
10812 ret
10813 ENDPROC(_key_expansion_192a)
10814
10815@@ -1768,6 +1773,7 @@ _key_expansion_192b:
10816
10817 movaps %xmm0, (TKEYP)
10818 add $0x10, TKEYP
10819+ pax_force_retaddr_bts
10820 ret
10821 ENDPROC(_key_expansion_192b)
10822
10823@@ -1781,6 +1787,7 @@ _key_expansion_256b:
10824 pxor %xmm1, %xmm2
10825 movaps %xmm2, (TKEYP)
10826 add $0x10, TKEYP
10827+ pax_force_retaddr_bts
10828 ret
10829 ENDPROC(_key_expansion_256b)
10830
10831@@ -1894,6 +1901,7 @@ ENTRY(aesni_set_key)
10832 #ifndef __x86_64__
10833 popl KEYP
10834 #endif
10835+ pax_force_retaddr 0, 1
10836 ret
10837 ENDPROC(aesni_set_key)
10838
10839@@ -1916,6 +1924,7 @@ ENTRY(aesni_enc)
10840 popl KLEN
10841 popl KEYP
10842 #endif
10843+ pax_force_retaddr 0, 1
10844 ret
10845 ENDPROC(aesni_enc)
10846
10847@@ -1974,6 +1983,7 @@ _aesni_enc1:
10848 AESENC KEY STATE
10849 movaps 0x70(TKEYP), KEY
10850 AESENCLAST KEY STATE
10851+ pax_force_retaddr_bts
10852 ret
10853 ENDPROC(_aesni_enc1)
10854
10855@@ -2083,6 +2093,7 @@ _aesni_enc4:
10856 AESENCLAST KEY STATE2
10857 AESENCLAST KEY STATE3
10858 AESENCLAST KEY STATE4
10859+ pax_force_retaddr_bts
10860 ret
10861 ENDPROC(_aesni_enc4)
10862
10863@@ -2106,6 +2117,7 @@ ENTRY(aesni_dec)
10864 popl KLEN
10865 popl KEYP
10866 #endif
10867+ pax_force_retaddr 0, 1
10868 ret
10869 ENDPROC(aesni_dec)
10870
10871@@ -2164,6 +2176,7 @@ _aesni_dec1:
10872 AESDEC KEY STATE
10873 movaps 0x70(TKEYP), KEY
10874 AESDECLAST KEY STATE
10875+ pax_force_retaddr_bts
10876 ret
10877 ENDPROC(_aesni_dec1)
10878
10879@@ -2273,6 +2286,7 @@ _aesni_dec4:
10880 AESDECLAST KEY STATE2
10881 AESDECLAST KEY STATE3
10882 AESDECLAST KEY STATE4
10883+ pax_force_retaddr_bts
10884 ret
10885 ENDPROC(_aesni_dec4)
10886
10887@@ -2331,6 +2345,7 @@ ENTRY(aesni_ecb_enc)
10888 popl KEYP
10889 popl LEN
10890 #endif
10891+ pax_force_retaddr 0, 1
10892 ret
10893 ENDPROC(aesni_ecb_enc)
10894
10895@@ -2390,6 +2405,7 @@ ENTRY(aesni_ecb_dec)
10896 popl KEYP
10897 popl LEN
10898 #endif
10899+ pax_force_retaddr 0, 1
10900 ret
10901 ENDPROC(aesni_ecb_dec)
10902
10903@@ -2432,6 +2448,7 @@ ENTRY(aesni_cbc_enc)
10904 popl LEN
10905 popl IVP
10906 #endif
10907+ pax_force_retaddr 0, 1
10908 ret
10909 ENDPROC(aesni_cbc_enc)
10910
10911@@ -2523,6 +2540,7 @@ ENTRY(aesni_cbc_dec)
10912 popl LEN
10913 popl IVP
10914 #endif
10915+ pax_force_retaddr 0, 1
10916 ret
10917 ENDPROC(aesni_cbc_dec)
10918
10919@@ -2550,6 +2568,7 @@ _aesni_inc_init:
10920 mov $1, TCTR_LOW
10921 MOVQ_R64_XMM TCTR_LOW INC
10922 MOVQ_R64_XMM CTR TCTR_LOW
10923+ pax_force_retaddr_bts
10924 ret
10925 ENDPROC(_aesni_inc_init)
10926
10927@@ -2579,6 +2598,7 @@ _aesni_inc:
10928 .Linc_low:
10929 movaps CTR, IV
10930 PSHUFB_XMM BSWAP_MASK IV
10931+ pax_force_retaddr_bts
10932 ret
10933 ENDPROC(_aesni_inc)
10934
10935@@ -2640,6 +2660,7 @@ ENTRY(aesni_ctr_enc)
10936 .Lctr_enc_ret:
10937 movups IV, (IVP)
10938 .Lctr_enc_just_ret:
10939+ pax_force_retaddr 0, 1
10940 ret
10941 ENDPROC(aesni_ctr_enc)
10942
10943@@ -2766,6 +2787,7 @@ ENTRY(aesni_xts_crypt8)
10944 pxor INC, STATE4
10945 movdqu STATE4, 0x70(OUTP)
10946
10947+ pax_force_retaddr 0, 1
10948 ret
10949 ENDPROC(aesni_xts_crypt8)
10950
10951diff --git a/arch/x86/crypto/blowfish-x86_64-asm_64.S b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10952index 246c670..4d1ed00 100644
10953--- a/arch/x86/crypto/blowfish-x86_64-asm_64.S
10954+++ b/arch/x86/crypto/blowfish-x86_64-asm_64.S
10955@@ -21,6 +21,7 @@
10956 */
10957
10958 #include <linux/linkage.h>
10959+#include <asm/alternative-asm.h>
10960
10961 .file "blowfish-x86_64-asm.S"
10962 .text
10963@@ -149,9 +150,11 @@ ENTRY(__blowfish_enc_blk)
10964 jnz .L__enc_xor;
10965
10966 write_block();
10967+ pax_force_retaddr 0, 1
10968 ret;
10969 .L__enc_xor:
10970 xor_block();
10971+ pax_force_retaddr 0, 1
10972 ret;
10973 ENDPROC(__blowfish_enc_blk)
10974
10975@@ -183,6 +186,7 @@ ENTRY(blowfish_dec_blk)
10976
10977 movq %r11, %rbp;
10978
10979+ pax_force_retaddr 0, 1
10980 ret;
10981 ENDPROC(blowfish_dec_blk)
10982
10983@@ -334,6 +338,7 @@ ENTRY(__blowfish_enc_blk_4way)
10984
10985 popq %rbx;
10986 popq %rbp;
10987+ pax_force_retaddr 0, 1
10988 ret;
10989
10990 .L__enc_xor4:
10991@@ -341,6 +346,7 @@ ENTRY(__blowfish_enc_blk_4way)
10992
10993 popq %rbx;
10994 popq %rbp;
10995+ pax_force_retaddr 0, 1
10996 ret;
10997 ENDPROC(__blowfish_enc_blk_4way)
10998
10999@@ -375,5 +381,6 @@ ENTRY(blowfish_dec_blk_4way)
11000 popq %rbx;
11001 popq %rbp;
11002
11003+ pax_force_retaddr 0, 1
11004 ret;
11005 ENDPROC(blowfish_dec_blk_4way)
11006diff --git a/arch/x86/crypto/camellia-x86_64-asm_64.S b/arch/x86/crypto/camellia-x86_64-asm_64.S
11007index 310319c..ce174a4 100644
11008--- a/arch/x86/crypto/camellia-x86_64-asm_64.S
11009+++ b/arch/x86/crypto/camellia-x86_64-asm_64.S
11010@@ -21,6 +21,7 @@
11011 */
11012
11013 #include <linux/linkage.h>
11014+#include <asm/alternative-asm.h>
11015
11016 .file "camellia-x86_64-asm_64.S"
11017 .text
11018@@ -228,12 +229,14 @@ ENTRY(__camellia_enc_blk)
11019 enc_outunpack(mov, RT1);
11020
11021 movq RRBP, %rbp;
11022+ pax_force_retaddr 0, 1
11023 ret;
11024
11025 .L__enc_xor:
11026 enc_outunpack(xor, RT1);
11027
11028 movq RRBP, %rbp;
11029+ pax_force_retaddr 0, 1
11030 ret;
11031 ENDPROC(__camellia_enc_blk)
11032
11033@@ -272,6 +275,7 @@ ENTRY(camellia_dec_blk)
11034 dec_outunpack();
11035
11036 movq RRBP, %rbp;
11037+ pax_force_retaddr 0, 1
11038 ret;
11039 ENDPROC(camellia_dec_blk)
11040
11041@@ -463,6 +467,7 @@ ENTRY(__camellia_enc_blk_2way)
11042
11043 movq RRBP, %rbp;
11044 popq %rbx;
11045+ pax_force_retaddr 0, 1
11046 ret;
11047
11048 .L__enc2_xor:
11049@@ -470,6 +475,7 @@ ENTRY(__camellia_enc_blk_2way)
11050
11051 movq RRBP, %rbp;
11052 popq %rbx;
11053+ pax_force_retaddr 0, 1
11054 ret;
11055 ENDPROC(__camellia_enc_blk_2way)
11056
11057@@ -510,5 +516,6 @@ ENTRY(camellia_dec_blk_2way)
11058
11059 movq RRBP, %rbp;
11060 movq RXOR, %rbx;
11061+ pax_force_retaddr 0, 1
11062 ret;
11063 ENDPROC(camellia_dec_blk_2way)
11064diff --git a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11065index c35fd5d..c1ee236 100644
11066--- a/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11067+++ b/arch/x86/crypto/cast5-avx-x86_64-asm_64.S
11068@@ -24,6 +24,7 @@
11069 */
11070
11071 #include <linux/linkage.h>
11072+#include <asm/alternative-asm.h>
11073
11074 .file "cast5-avx-x86_64-asm_64.S"
11075
11076@@ -281,6 +282,7 @@ __cast5_enc_blk16:
11077 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11078 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11079
11080+ pax_force_retaddr 0, 1
11081 ret;
11082 ENDPROC(__cast5_enc_blk16)
11083
11084@@ -352,6 +354,7 @@ __cast5_dec_blk16:
11085 outunpack_blocks(RR3, RL3, RTMP, RX, RKM);
11086 outunpack_blocks(RR4, RL4, RTMP, RX, RKM);
11087
11088+ pax_force_retaddr 0, 1
11089 ret;
11090
11091 .L__skip_dec:
11092@@ -388,6 +391,7 @@ ENTRY(cast5_ecb_enc_16way)
11093 vmovdqu RR4, (6*4*4)(%r11);
11094 vmovdqu RL4, (7*4*4)(%r11);
11095
11096+ pax_force_retaddr
11097 ret;
11098 ENDPROC(cast5_ecb_enc_16way)
11099
11100@@ -420,6 +424,7 @@ ENTRY(cast5_ecb_dec_16way)
11101 vmovdqu RR4, (6*4*4)(%r11);
11102 vmovdqu RL4, (7*4*4)(%r11);
11103
11104+ pax_force_retaddr
11105 ret;
11106 ENDPROC(cast5_ecb_dec_16way)
11107
11108@@ -469,6 +474,7 @@ ENTRY(cast5_cbc_dec_16way)
11109
11110 popq %r12;
11111
11112+ pax_force_retaddr
11113 ret;
11114 ENDPROC(cast5_cbc_dec_16way)
11115
11116@@ -542,5 +548,6 @@ ENTRY(cast5_ctr_16way)
11117
11118 popq %r12;
11119
11120+ pax_force_retaddr
11121 ret;
11122 ENDPROC(cast5_ctr_16way)
11123diff --git a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11124index e3531f8..18ded3a 100644
11125--- a/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11126+++ b/arch/x86/crypto/cast6-avx-x86_64-asm_64.S
11127@@ -24,6 +24,7 @@
11128 */
11129
11130 #include <linux/linkage.h>
11131+#include <asm/alternative-asm.h>
11132 #include "glue_helper-asm-avx.S"
11133
11134 .file "cast6-avx-x86_64-asm_64.S"
11135@@ -295,6 +296,7 @@ __cast6_enc_blk8:
11136 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11137 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11138
11139+ pax_force_retaddr 0, 1
11140 ret;
11141 ENDPROC(__cast6_enc_blk8)
11142
11143@@ -340,6 +342,7 @@ __cast6_dec_blk8:
11144 outunpack_blocks(RA1, RB1, RC1, RD1, RTMP, RX, RKRF, RKM);
11145 outunpack_blocks(RA2, RB2, RC2, RD2, RTMP, RX, RKRF, RKM);
11146
11147+ pax_force_retaddr 0, 1
11148 ret;
11149 ENDPROC(__cast6_dec_blk8)
11150
11151@@ -358,6 +361,7 @@ ENTRY(cast6_ecb_enc_8way)
11152
11153 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11154
11155+ pax_force_retaddr
11156 ret;
11157 ENDPROC(cast6_ecb_enc_8way)
11158
11159@@ -376,6 +380,7 @@ ENTRY(cast6_ecb_dec_8way)
11160
11161 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11162
11163+ pax_force_retaddr
11164 ret;
11165 ENDPROC(cast6_ecb_dec_8way)
11166
11167@@ -399,6 +404,7 @@ ENTRY(cast6_cbc_dec_8way)
11168
11169 popq %r12;
11170
11171+ pax_force_retaddr
11172 ret;
11173 ENDPROC(cast6_cbc_dec_8way)
11174
11175@@ -424,6 +430,7 @@ ENTRY(cast6_ctr_8way)
11176
11177 popq %r12;
11178
11179+ pax_force_retaddr
11180 ret;
11181 ENDPROC(cast6_ctr_8way)
11182
11183@@ -446,6 +453,7 @@ ENTRY(cast6_xts_enc_8way)
11184 /* dst <= regs xor IVs(in dst) */
11185 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11186
11187+ pax_force_retaddr
11188 ret;
11189 ENDPROC(cast6_xts_enc_8way)
11190
11191@@ -468,5 +476,6 @@ ENTRY(cast6_xts_dec_8way)
11192 /* dst <= regs xor IVs(in dst) */
11193 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11194
11195+ pax_force_retaddr
11196 ret;
11197 ENDPROC(cast6_xts_dec_8way)
11198diff --git a/arch/x86/crypto/salsa20-x86_64-asm_64.S b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11199index 9279e0b..9270820 100644
11200--- a/arch/x86/crypto/salsa20-x86_64-asm_64.S
11201+++ b/arch/x86/crypto/salsa20-x86_64-asm_64.S
11202@@ -1,4 +1,5 @@
11203 #include <linux/linkage.h>
11204+#include <asm/alternative-asm.h>
11205
11206 # enter salsa20_encrypt_bytes
11207 ENTRY(salsa20_encrypt_bytes)
11208@@ -789,6 +790,7 @@ ENTRY(salsa20_encrypt_bytes)
11209 add %r11,%rsp
11210 mov %rdi,%rax
11211 mov %rsi,%rdx
11212+ pax_force_retaddr 0, 1
11213 ret
11214 # bytesatleast65:
11215 ._bytesatleast65:
11216@@ -889,6 +891,7 @@ ENTRY(salsa20_keysetup)
11217 add %r11,%rsp
11218 mov %rdi,%rax
11219 mov %rsi,%rdx
11220+ pax_force_retaddr
11221 ret
11222 ENDPROC(salsa20_keysetup)
11223
11224@@ -914,5 +917,6 @@ ENTRY(salsa20_ivsetup)
11225 add %r11,%rsp
11226 mov %rdi,%rax
11227 mov %rsi,%rdx
11228+ pax_force_retaddr
11229 ret
11230 ENDPROC(salsa20_ivsetup)
11231diff --git a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11232index 2f202f4..d9164d6 100644
11233--- a/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11234+++ b/arch/x86/crypto/serpent-avx-x86_64-asm_64.S
11235@@ -24,6 +24,7 @@
11236 */
11237
11238 #include <linux/linkage.h>
11239+#include <asm/alternative-asm.h>
11240 #include "glue_helper-asm-avx.S"
11241
11242 .file "serpent-avx-x86_64-asm_64.S"
11243@@ -618,6 +619,7 @@ __serpent_enc_blk8_avx:
11244 write_blocks(RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11245 write_blocks(RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11246
11247+ pax_force_retaddr
11248 ret;
11249 ENDPROC(__serpent_enc_blk8_avx)
11250
11251@@ -672,6 +674,7 @@ __serpent_dec_blk8_avx:
11252 write_blocks(RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11253 write_blocks(RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11254
11255+ pax_force_retaddr
11256 ret;
11257 ENDPROC(__serpent_dec_blk8_avx)
11258
11259@@ -688,6 +691,7 @@ ENTRY(serpent_ecb_enc_8way_avx)
11260
11261 store_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11262
11263+ pax_force_retaddr
11264 ret;
11265 ENDPROC(serpent_ecb_enc_8way_avx)
11266
11267@@ -704,6 +708,7 @@ ENTRY(serpent_ecb_dec_8way_avx)
11268
11269 store_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11270
11271+ pax_force_retaddr
11272 ret;
11273 ENDPROC(serpent_ecb_dec_8way_avx)
11274
11275@@ -720,6 +725,7 @@ ENTRY(serpent_cbc_dec_8way_avx)
11276
11277 store_cbc_8way(%rdx, %rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11278
11279+ pax_force_retaddr
11280 ret;
11281 ENDPROC(serpent_cbc_dec_8way_avx)
11282
11283@@ -738,6 +744,7 @@ ENTRY(serpent_ctr_8way_avx)
11284
11285 store_ctr_8way(%rdx, %rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11286
11287+ pax_force_retaddr
11288 ret;
11289 ENDPROC(serpent_ctr_8way_avx)
11290
11291@@ -758,6 +765,7 @@ ENTRY(serpent_xts_enc_8way_avx)
11292 /* dst <= regs xor IVs(in dst) */
11293 store_xts_8way(%rsi, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11294
11295+ pax_force_retaddr
11296 ret;
11297 ENDPROC(serpent_xts_enc_8way_avx)
11298
11299@@ -778,5 +786,6 @@ ENTRY(serpent_xts_dec_8way_avx)
11300 /* dst <= regs xor IVs(in dst) */
11301 store_xts_8way(%rsi, RC1, RD1, RB1, RE1, RC2, RD2, RB2, RE2);
11302
11303+ pax_force_retaddr
11304 ret;
11305 ENDPROC(serpent_xts_dec_8way_avx)
11306diff --git a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11307index acc066c..1559cc4 100644
11308--- a/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11309+++ b/arch/x86/crypto/serpent-sse2-x86_64-asm_64.S
11310@@ -25,6 +25,7 @@
11311 */
11312
11313 #include <linux/linkage.h>
11314+#include <asm/alternative-asm.h>
11315
11316 .file "serpent-sse2-x86_64-asm_64.S"
11317 .text
11318@@ -690,12 +691,14 @@ ENTRY(__serpent_enc_blk_8way)
11319 write_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11320 write_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11321
11322+ pax_force_retaddr
11323 ret;
11324
11325 .L__enc_xor8:
11326 xor_blocks(%rsi, RA1, RB1, RC1, RD1, RK0, RK1, RK2);
11327 xor_blocks(%rax, RA2, RB2, RC2, RD2, RK0, RK1, RK2);
11328
11329+ pax_force_retaddr
11330 ret;
11331 ENDPROC(__serpent_enc_blk_8way)
11332
11333@@ -750,5 +753,6 @@ ENTRY(serpent_dec_blk_8way)
11334 write_blocks(%rsi, RC1, RD1, RB1, RE1, RK0, RK1, RK2);
11335 write_blocks(%rax, RC2, RD2, RB2, RE2, RK0, RK1, RK2);
11336
11337+ pax_force_retaddr
11338 ret;
11339 ENDPROC(serpent_dec_blk_8way)
11340diff --git a/arch/x86/crypto/sha1_ssse3_asm.S b/arch/x86/crypto/sha1_ssse3_asm.S
11341index a410950..3356d42 100644
11342--- a/arch/x86/crypto/sha1_ssse3_asm.S
11343+++ b/arch/x86/crypto/sha1_ssse3_asm.S
11344@@ -29,6 +29,7 @@
11345 */
11346
11347 #include <linux/linkage.h>
11348+#include <asm/alternative-asm.h>
11349
11350 #define CTX %rdi // arg1
11351 #define BUF %rsi // arg2
11352@@ -104,6 +105,7 @@
11353 pop %r12
11354 pop %rbp
11355 pop %rbx
11356+ pax_force_retaddr 0, 1
11357 ret
11358
11359 ENDPROC(\name)
11360diff --git a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11361index 0505813..63b1d00 100644
11362--- a/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11363+++ b/arch/x86/crypto/twofish-avx-x86_64-asm_64.S
11364@@ -24,6 +24,7 @@
11365 */
11366
11367 #include <linux/linkage.h>
11368+#include <asm/alternative-asm.h>
11369 #include "glue_helper-asm-avx.S"
11370
11371 .file "twofish-avx-x86_64-asm_64.S"
11372@@ -284,6 +285,7 @@ __twofish_enc_blk8:
11373 outunpack_blocks(RC1, RD1, RA1, RB1, RK1, RX0, RY0, RK2);
11374 outunpack_blocks(RC2, RD2, RA2, RB2, RK1, RX0, RY0, RK2);
11375
11376+ pax_force_retaddr 0, 1
11377 ret;
11378 ENDPROC(__twofish_enc_blk8)
11379
11380@@ -324,6 +326,7 @@ __twofish_dec_blk8:
11381 outunpack_blocks(RA1, RB1, RC1, RD1, RK1, RX0, RY0, RK2);
11382 outunpack_blocks(RA2, RB2, RC2, RD2, RK1, RX0, RY0, RK2);
11383
11384+ pax_force_retaddr 0, 1
11385 ret;
11386 ENDPROC(__twofish_dec_blk8)
11387
11388@@ -342,6 +345,7 @@ ENTRY(twofish_ecb_enc_8way)
11389
11390 store_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11391
11392+ pax_force_retaddr 0, 1
11393 ret;
11394 ENDPROC(twofish_ecb_enc_8way)
11395
11396@@ -360,6 +364,7 @@ ENTRY(twofish_ecb_dec_8way)
11397
11398 store_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11399
11400+ pax_force_retaddr 0, 1
11401 ret;
11402 ENDPROC(twofish_ecb_dec_8way)
11403
11404@@ -383,6 +388,7 @@ ENTRY(twofish_cbc_dec_8way)
11405
11406 popq %r12;
11407
11408+ pax_force_retaddr 0, 1
11409 ret;
11410 ENDPROC(twofish_cbc_dec_8way)
11411
11412@@ -408,6 +414,7 @@ ENTRY(twofish_ctr_8way)
11413
11414 popq %r12;
11415
11416+ pax_force_retaddr 0, 1
11417 ret;
11418 ENDPROC(twofish_ctr_8way)
11419
11420@@ -430,6 +437,7 @@ ENTRY(twofish_xts_enc_8way)
11421 /* dst <= regs xor IVs(in dst) */
11422 store_xts_8way(%r11, RC1, RD1, RA1, RB1, RC2, RD2, RA2, RB2);
11423
11424+ pax_force_retaddr 0, 1
11425 ret;
11426 ENDPROC(twofish_xts_enc_8way)
11427
11428@@ -452,5 +460,6 @@ ENTRY(twofish_xts_dec_8way)
11429 /* dst <= regs xor IVs(in dst) */
11430 store_xts_8way(%r11, RA1, RB1, RC1, RD1, RA2, RB2, RC2, RD2);
11431
11432+ pax_force_retaddr 0, 1
11433 ret;
11434 ENDPROC(twofish_xts_dec_8way)
11435diff --git a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11436index 1c3b7ce..b365c5e 100644
11437--- a/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11438+++ b/arch/x86/crypto/twofish-x86_64-asm_64-3way.S
11439@@ -21,6 +21,7 @@
11440 */
11441
11442 #include <linux/linkage.h>
11443+#include <asm/alternative-asm.h>
11444
11445 .file "twofish-x86_64-asm-3way.S"
11446 .text
11447@@ -258,6 +259,7 @@ ENTRY(__twofish_enc_blk_3way)
11448 popq %r13;
11449 popq %r14;
11450 popq %r15;
11451+ pax_force_retaddr 0, 1
11452 ret;
11453
11454 .L__enc_xor3:
11455@@ -269,6 +271,7 @@ ENTRY(__twofish_enc_blk_3way)
11456 popq %r13;
11457 popq %r14;
11458 popq %r15;
11459+ pax_force_retaddr 0, 1
11460 ret;
11461 ENDPROC(__twofish_enc_blk_3way)
11462
11463@@ -308,5 +311,6 @@ ENTRY(twofish_dec_blk_3way)
11464 popq %r13;
11465 popq %r14;
11466 popq %r15;
11467+ pax_force_retaddr 0, 1
11468 ret;
11469 ENDPROC(twofish_dec_blk_3way)
11470diff --git a/arch/x86/crypto/twofish-x86_64-asm_64.S b/arch/x86/crypto/twofish-x86_64-asm_64.S
11471index a039d21..29e7615 100644
11472--- a/arch/x86/crypto/twofish-x86_64-asm_64.S
11473+++ b/arch/x86/crypto/twofish-x86_64-asm_64.S
11474@@ -22,6 +22,7 @@
11475
11476 #include <linux/linkage.h>
11477 #include <asm/asm-offsets.h>
11478+#include <asm/alternative-asm.h>
11479
11480 #define a_offset 0
11481 #define b_offset 4
11482@@ -265,6 +266,7 @@ ENTRY(twofish_enc_blk)
11483
11484 popq R1
11485 movq $1,%rax
11486+ pax_force_retaddr 0, 1
11487 ret
11488 ENDPROC(twofish_enc_blk)
11489
11490@@ -317,5 +319,6 @@ ENTRY(twofish_dec_blk)
11491
11492 popq R1
11493 movq $1,%rax
11494+ pax_force_retaddr 0, 1
11495 ret
11496 ENDPROC(twofish_dec_blk)
11497diff --git a/arch/x86/ia32/ia32_aout.c b/arch/x86/ia32/ia32_aout.c
11498index 52ff81c..98af645 100644
11499--- a/arch/x86/ia32/ia32_aout.c
11500+++ b/arch/x86/ia32/ia32_aout.c
11501@@ -159,6 +159,8 @@ static int aout_core_dump(long signr, struct pt_regs *regs, struct file *file,
11502 unsigned long dump_start, dump_size;
11503 struct user32 dump;
11504
11505+ memset(&dump, 0, sizeof(dump));
11506+
11507 fs = get_fs();
11508 set_fs(KERNEL_DS);
11509 has_dumped = 1;
11510diff --git a/arch/x86/ia32/ia32_signal.c b/arch/x86/ia32/ia32_signal.c
11511index cf1a471..3bc4cf8 100644
11512--- a/arch/x86/ia32/ia32_signal.c
11513+++ b/arch/x86/ia32/ia32_signal.c
11514@@ -340,7 +340,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
11515 sp -= frame_size;
11516 /* Align the stack pointer according to the i386 ABI,
11517 * i.e. so that on function entry ((sp + 4) & 15) == 0. */
11518- sp = ((sp + 4) & -16ul) - 4;
11519+ sp = ((sp - 12) & -16ul) - 4;
11520 return (void __user *) sp;
11521 }
11522
11523@@ -398,7 +398,7 @@ int ia32_setup_frame(int sig, struct ksignal *ksig,
11524 * These are actually not used anymore, but left because some
11525 * gdb versions depend on them as a marker.
11526 */
11527- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11528+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11529 } put_user_catch(err);
11530
11531 if (err)
11532@@ -440,7 +440,7 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11533 0xb8,
11534 __NR_ia32_rt_sigreturn,
11535 0x80cd,
11536- 0,
11537+ 0
11538 };
11539
11540 frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);
11541@@ -463,16 +463,18 @@ int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
11542
11543 if (ksig->ka.sa.sa_flags & SA_RESTORER)
11544 restorer = ksig->ka.sa.sa_restorer;
11545+ else if (current->mm->context.vdso)
11546+ /* Return stub is in 32bit vsyscall page */
11547+ restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
11548 else
11549- restorer = VDSO32_SYMBOL(current->mm->context.vdso,
11550- rt_sigreturn);
11551+ restorer = &frame->retcode;
11552 put_user_ex(ptr_to_compat(restorer), &frame->pretcode);
11553
11554 /*
11555 * Not actually used anymore, but left because some gdb
11556 * versions need it.
11557 */
11558- put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
11559+ put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
11560 } put_user_catch(err);
11561
11562 err |= copy_siginfo_to_user32(&frame->info, &ksig->info);
11563diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
11564index 474dc1b..24aaa3e 100644
11565--- a/arch/x86/ia32/ia32entry.S
11566+++ b/arch/x86/ia32/ia32entry.S
11567@@ -15,8 +15,10 @@
11568 #include <asm/irqflags.h>
11569 #include <asm/asm.h>
11570 #include <asm/smap.h>
11571+#include <asm/pgtable.h>
11572 #include <linux/linkage.h>
11573 #include <linux/err.h>
11574+#include <asm/alternative-asm.h>
11575
11576 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
11577 #include <linux/elf-em.h>
11578@@ -96,6 +98,32 @@ ENTRY(native_irq_enable_sysexit)
11579 ENDPROC(native_irq_enable_sysexit)
11580 #endif
11581
11582+ .macro pax_enter_kernel_user
11583+ pax_set_fptr_mask
11584+#ifdef CONFIG_PAX_MEMORY_UDEREF
11585+ call pax_enter_kernel_user
11586+#endif
11587+ .endm
11588+
11589+ .macro pax_exit_kernel_user
11590+#ifdef CONFIG_PAX_MEMORY_UDEREF
11591+ call pax_exit_kernel_user
11592+#endif
11593+#ifdef CONFIG_PAX_RANDKSTACK
11594+ pushq %rax
11595+ pushq %r11
11596+ call pax_randomize_kstack
11597+ popq %r11
11598+ popq %rax
11599+#endif
11600+ .endm
11601+
11602+ .macro pax_erase_kstack
11603+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
11604+ call pax_erase_kstack
11605+#endif
11606+ .endm
11607+
11608 /*
11609 * 32bit SYSENTER instruction entry.
11610 *
11611@@ -122,12 +150,6 @@ ENTRY(ia32_sysenter_target)
11612 CFI_REGISTER rsp,rbp
11613 SWAPGS_UNSAFE_STACK
11614 movq PER_CPU_VAR(kernel_stack), %rsp
11615- addq $(KERNEL_STACK_OFFSET),%rsp
11616- /*
11617- * No need to follow this irqs on/off section: the syscall
11618- * disabled irqs, here we enable it straight after entry:
11619- */
11620- ENABLE_INTERRUPTS(CLBR_NONE)
11621 movl %ebp,%ebp /* zero extension */
11622 pushq_cfi $__USER32_DS
11623 /*CFI_REL_OFFSET ss,0*/
11624@@ -135,24 +157,44 @@ ENTRY(ia32_sysenter_target)
11625 CFI_REL_OFFSET rsp,0
11626 pushfq_cfi
11627 /*CFI_REL_OFFSET rflags,0*/
11628- movl TI_sysenter_return+THREAD_INFO(%rsp,3*8-KERNEL_STACK_OFFSET),%r10d
11629- CFI_REGISTER rip,r10
11630+ orl $X86_EFLAGS_IF,(%rsp)
11631+ GET_THREAD_INFO(%r11)
11632+ movl TI_sysenter_return(%r11), %r11d
11633+ CFI_REGISTER rip,r11
11634 pushq_cfi $__USER32_CS
11635 /*CFI_REL_OFFSET cs,0*/
11636 movl %eax, %eax
11637- pushq_cfi %r10
11638+ pushq_cfi %r11
11639 CFI_REL_OFFSET rip,0
11640 pushq_cfi %rax
11641 cld
11642 SAVE_ARGS 0,1,0
11643+ pax_enter_kernel_user
11644+
11645+#ifdef CONFIG_PAX_RANDKSTACK
11646+ pax_erase_kstack
11647+#endif
11648+
11649+ /*
11650+ * No need to follow this irqs on/off section: the syscall
11651+ * disabled irqs, here we enable it straight after entry:
11652+ */
11653+ ENABLE_INTERRUPTS(CLBR_NONE)
11654 /* no need to do an access_ok check here because rbp has been
11655 32bit zero extended */
11656+
11657+#ifdef CONFIG_PAX_MEMORY_UDEREF
11658+ mov pax_user_shadow_base,%r11
11659+ add %r11,%rbp
11660+#endif
11661+
11662 ASM_STAC
11663 1: movl (%rbp),%ebp
11664 _ASM_EXTABLE(1b,ia32_badarg)
11665 ASM_CLAC
11666- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11667- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11668+ GET_THREAD_INFO(%r11)
11669+ orl $TS_COMPAT,TI_status(%r11)
11670+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11671 CFI_REMEMBER_STATE
11672 jnz sysenter_tracesys
11673 cmpq $(IA32_NR_syscalls-1),%rax
11674@@ -162,12 +204,15 @@ sysenter_do_call:
11675 sysenter_dispatch:
11676 call *ia32_sys_call_table(,%rax,8)
11677 movq %rax,RAX-ARGOFFSET(%rsp)
11678+ GET_THREAD_INFO(%r11)
11679 DISABLE_INTERRUPTS(CLBR_NONE)
11680 TRACE_IRQS_OFF
11681- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11682+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11683 jnz sysexit_audit
11684 sysexit_from_sys_call:
11685- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11686+ pax_exit_kernel_user
11687+ pax_erase_kstack
11688+ andl $~TS_COMPAT,TI_status(%r11)
11689 /* clear IF, that popfq doesn't enable interrupts early */
11690 andl $~0x200,EFLAGS-R11(%rsp)
11691 movl RIP-R11(%rsp),%edx /* User %eip */
11692@@ -193,6 +238,9 @@ sysexit_from_sys_call:
11693 movl %eax,%esi /* 2nd arg: syscall number */
11694 movl $AUDIT_ARCH_I386,%edi /* 1st arg: audit arch */
11695 call __audit_syscall_entry
11696+
11697+ pax_erase_kstack
11698+
11699 movl RAX-ARGOFFSET(%rsp),%eax /* reload syscall number */
11700 cmpq $(IA32_NR_syscalls-1),%rax
11701 ja ia32_badsys
11702@@ -204,7 +252,7 @@ sysexit_from_sys_call:
11703 .endm
11704
11705 .macro auditsys_exit exit
11706- testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11707+ testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11708 jnz ia32_ret_from_sys_call
11709 TRACE_IRQS_ON
11710 ENABLE_INTERRUPTS(CLBR_NONE)
11711@@ -215,11 +263,12 @@ sysexit_from_sys_call:
11712 1: setbe %al /* 1 if error, 0 if not */
11713 movzbl %al,%edi /* zero-extend that into %edi */
11714 call __audit_syscall_exit
11715+ GET_THREAD_INFO(%r11)
11716 movq RAX-ARGOFFSET(%rsp),%rax /* reload syscall return value */
11717 movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
11718 DISABLE_INTERRUPTS(CLBR_NONE)
11719 TRACE_IRQS_OFF
11720- testl %edi,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11721+ testl %edi,TI_flags(%r11)
11722 jz \exit
11723 CLEAR_RREGS -ARGOFFSET
11724 jmp int_with_check
11725@@ -237,7 +286,7 @@ sysexit_audit:
11726
11727 sysenter_tracesys:
11728 #ifdef CONFIG_AUDITSYSCALL
11729- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11730+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11731 jz sysenter_auditsys
11732 #endif
11733 SAVE_REST
11734@@ -249,6 +298,9 @@ sysenter_tracesys:
11735 RESTORE_REST
11736 cmpq $(IA32_NR_syscalls-1),%rax
11737 ja int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
11738+
11739+ pax_erase_kstack
11740+
11741 jmp sysenter_do_call
11742 CFI_ENDPROC
11743 ENDPROC(ia32_sysenter_target)
11744@@ -276,19 +328,25 @@ ENDPROC(ia32_sysenter_target)
11745 ENTRY(ia32_cstar_target)
11746 CFI_STARTPROC32 simple
11747 CFI_SIGNAL_FRAME
11748- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
11749+ CFI_DEF_CFA rsp,0
11750 CFI_REGISTER rip,rcx
11751 /*CFI_REGISTER rflags,r11*/
11752 SWAPGS_UNSAFE_STACK
11753 movl %esp,%r8d
11754 CFI_REGISTER rsp,r8
11755 movq PER_CPU_VAR(kernel_stack),%rsp
11756+ SAVE_ARGS 8*6,0,0
11757+ pax_enter_kernel_user
11758+
11759+#ifdef CONFIG_PAX_RANDKSTACK
11760+ pax_erase_kstack
11761+#endif
11762+
11763 /*
11764 * No need to follow this irqs on/off section: the syscall
11765 * disabled irqs and here we enable it straight after entry:
11766 */
11767 ENABLE_INTERRUPTS(CLBR_NONE)
11768- SAVE_ARGS 8,0,0
11769 movl %eax,%eax /* zero extension */
11770 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
11771 movq %rcx,RIP-ARGOFFSET(%rsp)
11772@@ -304,12 +362,19 @@ ENTRY(ia32_cstar_target)
11773 /* no need to do an access_ok check here because r8 has been
11774 32bit zero extended */
11775 /* hardware stack frame is complete now */
11776+
11777+#ifdef CONFIG_PAX_MEMORY_UDEREF
11778+ mov pax_user_shadow_base,%r11
11779+ add %r11,%r8
11780+#endif
11781+
11782 ASM_STAC
11783 1: movl (%r8),%r9d
11784 _ASM_EXTABLE(1b,ia32_badarg)
11785 ASM_CLAC
11786- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11787- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11788+ GET_THREAD_INFO(%r11)
11789+ orl $TS_COMPAT,TI_status(%r11)
11790+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11791 CFI_REMEMBER_STATE
11792 jnz cstar_tracesys
11793 cmpq $IA32_NR_syscalls-1,%rax
11794@@ -319,12 +384,15 @@ cstar_do_call:
11795 cstar_dispatch:
11796 call *ia32_sys_call_table(,%rax,8)
11797 movq %rax,RAX-ARGOFFSET(%rsp)
11798+ GET_THREAD_INFO(%r11)
11799 DISABLE_INTERRUPTS(CLBR_NONE)
11800 TRACE_IRQS_OFF
11801- testl $_TIF_ALLWORK_MASK,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11802+ testl $_TIF_ALLWORK_MASK,TI_flags(%r11)
11803 jnz sysretl_audit
11804 sysretl_from_sys_call:
11805- andl $~TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11806+ pax_exit_kernel_user
11807+ pax_erase_kstack
11808+ andl $~TS_COMPAT,TI_status(%r11)
11809 RESTORE_ARGS 0,-ARG_SKIP,0,0,0
11810 movl RIP-ARGOFFSET(%rsp),%ecx
11811 CFI_REGISTER rip,rcx
11812@@ -352,7 +420,7 @@ sysretl_audit:
11813
11814 cstar_tracesys:
11815 #ifdef CONFIG_AUDITSYSCALL
11816- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11817+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%r11)
11818 jz cstar_auditsys
11819 #endif
11820 xchgl %r9d,%ebp
11821@@ -366,6 +434,9 @@ cstar_tracesys:
11822 xchgl %ebp,%r9d
11823 cmpq $(IA32_NR_syscalls-1),%rax
11824 ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
11825+
11826+ pax_erase_kstack
11827+
11828 jmp cstar_do_call
11829 END(ia32_cstar_target)
11830
11831@@ -407,19 +478,26 @@ ENTRY(ia32_syscall)
11832 CFI_REL_OFFSET rip,RIP-RIP
11833 PARAVIRT_ADJUST_EXCEPTION_FRAME
11834 SWAPGS
11835- /*
11836- * No need to follow this irqs on/off section: the syscall
11837- * disabled irqs and here we enable it straight after entry:
11838- */
11839- ENABLE_INTERRUPTS(CLBR_NONE)
11840 movl %eax,%eax
11841 pushq_cfi %rax
11842 cld
11843 /* note the registers are not zero extended to the sf.
11844 this could be a problem. */
11845 SAVE_ARGS 0,1,0
11846- orl $TS_COMPAT,TI_status+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11847- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
11848+ pax_enter_kernel_user
11849+
11850+#ifdef CONFIG_PAX_RANDKSTACK
11851+ pax_erase_kstack
11852+#endif
11853+
11854+ /*
11855+ * No need to follow this irqs on/off section: the syscall
11856+ * disabled irqs and here we enable it straight after entry:
11857+ */
11858+ ENABLE_INTERRUPTS(CLBR_NONE)
11859+ GET_THREAD_INFO(%r11)
11860+ orl $TS_COMPAT,TI_status(%r11)
11861+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%r11)
11862 jnz ia32_tracesys
11863 cmpq $(IA32_NR_syscalls-1),%rax
11864 ja ia32_badsys
11865@@ -442,6 +520,9 @@ ia32_tracesys:
11866 RESTORE_REST
11867 cmpq $(IA32_NR_syscalls-1),%rax
11868 ja int_ret_from_sys_call /* ia32_tracesys has set RAX(%rsp) */
11869+
11870+ pax_erase_kstack
11871+
11872 jmp ia32_do_call
11873 END(ia32_syscall)
11874
11875diff --git a/arch/x86/ia32/sys_ia32.c b/arch/x86/ia32/sys_ia32.c
11876index 8e0ceec..af13504 100644
11877--- a/arch/x86/ia32/sys_ia32.c
11878+++ b/arch/x86/ia32/sys_ia32.c
11879@@ -69,8 +69,8 @@ asmlinkage long sys32_ftruncate64(unsigned int fd, unsigned long offset_low,
11880 */
11881 static int cp_stat64(struct stat64 __user *ubuf, struct kstat *stat)
11882 {
11883- typeof(ubuf->st_uid) uid = 0;
11884- typeof(ubuf->st_gid) gid = 0;
11885+ typeof(((struct stat64 *)0)->st_uid) uid = 0;
11886+ typeof(((struct stat64 *)0)->st_gid) gid = 0;
11887 SET_UID(uid, from_kuid_munged(current_user_ns(), stat->uid));
11888 SET_GID(gid, from_kgid_munged(current_user_ns(), stat->gid));
11889 if (!access_ok(VERIFY_WRITE, ubuf, sizeof(struct stat64)) ||
11890diff --git a/arch/x86/include/asm/alternative-asm.h b/arch/x86/include/asm/alternative-asm.h
11891index 372231c..a5aa1a1 100644
11892--- a/arch/x86/include/asm/alternative-asm.h
11893+++ b/arch/x86/include/asm/alternative-asm.h
11894@@ -18,6 +18,45 @@
11895 .endm
11896 #endif
11897
11898+#ifdef KERNEXEC_PLUGIN
11899+ .macro pax_force_retaddr_bts rip=0
11900+ btsq $63,\rip(%rsp)
11901+ .endm
11902+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
11903+ .macro pax_force_retaddr rip=0, reload=0
11904+ btsq $63,\rip(%rsp)
11905+ .endm
11906+ .macro pax_force_fptr ptr
11907+ btsq $63,\ptr
11908+ .endm
11909+ .macro pax_set_fptr_mask
11910+ .endm
11911+#endif
11912+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
11913+ .macro pax_force_retaddr rip=0, reload=0
11914+ .if \reload
11915+ pax_set_fptr_mask
11916+ .endif
11917+ orq %r10,\rip(%rsp)
11918+ .endm
11919+ .macro pax_force_fptr ptr
11920+ orq %r10,\ptr
11921+ .endm
11922+ .macro pax_set_fptr_mask
11923+ movabs $0x8000000000000000,%r10
11924+ .endm
11925+#endif
11926+#else
11927+ .macro pax_force_retaddr rip=0, reload=0
11928+ .endm
11929+ .macro pax_force_fptr ptr
11930+ .endm
11931+ .macro pax_force_retaddr_bts rip=0
11932+ .endm
11933+ .macro pax_set_fptr_mask
11934+ .endm
11935+#endif
11936+
11937 .macro altinstruction_entry orig alt feature orig_len alt_len
11938 .long \orig - .
11939 .long \alt - .
11940diff --git a/arch/x86/include/asm/alternative.h b/arch/x86/include/asm/alternative.h
11941index 58ed6d9..f1cbe58 100644
11942--- a/arch/x86/include/asm/alternative.h
11943+++ b/arch/x86/include/asm/alternative.h
11944@@ -105,7 +105,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11945 ".pushsection .discard,\"aw\",@progbits\n" \
11946 DISCARD_ENTRY(1) \
11947 ".popsection\n" \
11948- ".pushsection .altinstr_replacement, \"ax\"\n" \
11949+ ".pushsection .altinstr_replacement, \"a\"\n" \
11950 ALTINSTR_REPLACEMENT(newinstr, feature, 1) \
11951 ".popsection"
11952
11953@@ -119,7 +119,7 @@ static inline int alternatives_text_reserved(void *start, void *end)
11954 DISCARD_ENTRY(1) \
11955 DISCARD_ENTRY(2) \
11956 ".popsection\n" \
11957- ".pushsection .altinstr_replacement, \"ax\"\n" \
11958+ ".pushsection .altinstr_replacement, \"a\"\n" \
11959 ALTINSTR_REPLACEMENT(newinstr1, feature1, 1) \
11960 ALTINSTR_REPLACEMENT(newinstr2, feature2, 2) \
11961 ".popsection"
11962diff --git a/arch/x86/include/asm/apic.h b/arch/x86/include/asm/apic.h
11963index 3388034..050f0b9 100644
11964--- a/arch/x86/include/asm/apic.h
11965+++ b/arch/x86/include/asm/apic.h
11966@@ -44,7 +44,7 @@ static inline void generic_apic_probe(void)
11967
11968 #ifdef CONFIG_X86_LOCAL_APIC
11969
11970-extern unsigned int apic_verbosity;
11971+extern int apic_verbosity;
11972 extern int local_apic_timer_c2_ok;
11973
11974 extern int disable_apic;
11975diff --git a/arch/x86/include/asm/apm.h b/arch/x86/include/asm/apm.h
11976index 20370c6..a2eb9b0 100644
11977--- a/arch/x86/include/asm/apm.h
11978+++ b/arch/x86/include/asm/apm.h
11979@@ -34,7 +34,7 @@ static inline void apm_bios_call_asm(u32 func, u32 ebx_in, u32 ecx_in,
11980 __asm__ __volatile__(APM_DO_ZERO_SEGS
11981 "pushl %%edi\n\t"
11982 "pushl %%ebp\n\t"
11983- "lcall *%%cs:apm_bios_entry\n\t"
11984+ "lcall *%%ss:apm_bios_entry\n\t"
11985 "setc %%al\n\t"
11986 "popl %%ebp\n\t"
11987 "popl %%edi\n\t"
11988@@ -58,7 +58,7 @@ static inline u8 apm_bios_call_simple_asm(u32 func, u32 ebx_in,
11989 __asm__ __volatile__(APM_DO_ZERO_SEGS
11990 "pushl %%edi\n\t"
11991 "pushl %%ebp\n\t"
11992- "lcall *%%cs:apm_bios_entry\n\t"
11993+ "lcall *%%ss:apm_bios_entry\n\t"
11994 "setc %%bl\n\t"
11995 "popl %%ebp\n\t"
11996 "popl %%edi\n\t"
11997diff --git a/arch/x86/include/asm/atomic.h b/arch/x86/include/asm/atomic.h
11998index 722aa3b..3a0bb27 100644
11999--- a/arch/x86/include/asm/atomic.h
12000+++ b/arch/x86/include/asm/atomic.h
12001@@ -22,7 +22,18 @@
12002 */
12003 static inline int atomic_read(const atomic_t *v)
12004 {
12005- return (*(volatile int *)&(v)->counter);
12006+ return (*(volatile const int *)&(v)->counter);
12007+}
12008+
12009+/**
12010+ * atomic_read_unchecked - read atomic variable
12011+ * @v: pointer of type atomic_unchecked_t
12012+ *
12013+ * Atomically reads the value of @v.
12014+ */
12015+static inline int atomic_read_unchecked(const atomic_unchecked_t *v)
12016+{
12017+ return (*(volatile const int *)&(v)->counter);
12018 }
12019
12020 /**
12021@@ -38,6 +49,18 @@ static inline void atomic_set(atomic_t *v, int i)
12022 }
12023
12024 /**
12025+ * atomic_set_unchecked - set atomic variable
12026+ * @v: pointer of type atomic_unchecked_t
12027+ * @i: required value
12028+ *
12029+ * Atomically sets the value of @v to @i.
12030+ */
12031+static inline void atomic_set_unchecked(atomic_unchecked_t *v, int i)
12032+{
12033+ v->counter = i;
12034+}
12035+
12036+/**
12037 * atomic_add - add integer to atomic variable
12038 * @i: integer value to add
12039 * @v: pointer of type atomic_t
12040@@ -46,7 +69,29 @@ static inline void atomic_set(atomic_t *v, int i)
12041 */
12042 static inline void atomic_add(int i, atomic_t *v)
12043 {
12044- asm volatile(LOCK_PREFIX "addl %1,%0"
12045+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12046+
12047+#ifdef CONFIG_PAX_REFCOUNT
12048+ "jno 0f\n"
12049+ LOCK_PREFIX "subl %1,%0\n"
12050+ "int $4\n0:\n"
12051+ _ASM_EXTABLE(0b, 0b)
12052+#endif
12053+
12054+ : "+m" (v->counter)
12055+ : "ir" (i));
12056+}
12057+
12058+/**
12059+ * atomic_add_unchecked - add integer to atomic variable
12060+ * @i: integer value to add
12061+ * @v: pointer of type atomic_unchecked_t
12062+ *
12063+ * Atomically adds @i to @v.
12064+ */
12065+static inline void atomic_add_unchecked(int i, atomic_unchecked_t *v)
12066+{
12067+ asm volatile(LOCK_PREFIX "addl %1,%0\n"
12068 : "+m" (v->counter)
12069 : "ir" (i));
12070 }
12071@@ -60,7 +105,29 @@ static inline void atomic_add(int i, atomic_t *v)
12072 */
12073 static inline void atomic_sub(int i, atomic_t *v)
12074 {
12075- asm volatile(LOCK_PREFIX "subl %1,%0"
12076+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12077+
12078+#ifdef CONFIG_PAX_REFCOUNT
12079+ "jno 0f\n"
12080+ LOCK_PREFIX "addl %1,%0\n"
12081+ "int $4\n0:\n"
12082+ _ASM_EXTABLE(0b, 0b)
12083+#endif
12084+
12085+ : "+m" (v->counter)
12086+ : "ir" (i));
12087+}
12088+
12089+/**
12090+ * atomic_sub_unchecked - subtract integer from atomic variable
12091+ * @i: integer value to subtract
12092+ * @v: pointer of type atomic_unchecked_t
12093+ *
12094+ * Atomically subtracts @i from @v.
12095+ */
12096+static inline void atomic_sub_unchecked(int i, atomic_unchecked_t *v)
12097+{
12098+ asm volatile(LOCK_PREFIX "subl %1,%0\n"
12099 : "+m" (v->counter)
12100 : "ir" (i));
12101 }
12102@@ -78,7 +145,16 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12103 {
12104 unsigned char c;
12105
12106- asm volatile(LOCK_PREFIX "subl %2,%0; sete %1"
12107+ asm volatile(LOCK_PREFIX "subl %2,%0\n"
12108+
12109+#ifdef CONFIG_PAX_REFCOUNT
12110+ "jno 0f\n"
12111+ LOCK_PREFIX "addl %2,%0\n"
12112+ "int $4\n0:\n"
12113+ _ASM_EXTABLE(0b, 0b)
12114+#endif
12115+
12116+ "sete %1\n"
12117 : "+m" (v->counter), "=qm" (c)
12118 : "ir" (i) : "memory");
12119 return c;
12120@@ -92,7 +168,27 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
12121 */
12122 static inline void atomic_inc(atomic_t *v)
12123 {
12124- asm volatile(LOCK_PREFIX "incl %0"
12125+ asm volatile(LOCK_PREFIX "incl %0\n"
12126+
12127+#ifdef CONFIG_PAX_REFCOUNT
12128+ "jno 0f\n"
12129+ LOCK_PREFIX "decl %0\n"
12130+ "int $4\n0:\n"
12131+ _ASM_EXTABLE(0b, 0b)
12132+#endif
12133+
12134+ : "+m" (v->counter));
12135+}
12136+
12137+/**
12138+ * atomic_inc_unchecked - increment atomic variable
12139+ * @v: pointer of type atomic_unchecked_t
12140+ *
12141+ * Atomically increments @v by 1.
12142+ */
12143+static inline void atomic_inc_unchecked(atomic_unchecked_t *v)
12144+{
12145+ asm volatile(LOCK_PREFIX "incl %0\n"
12146 : "+m" (v->counter));
12147 }
12148
12149@@ -104,7 +200,27 @@ static inline void atomic_inc(atomic_t *v)
12150 */
12151 static inline void atomic_dec(atomic_t *v)
12152 {
12153- asm volatile(LOCK_PREFIX "decl %0"
12154+ asm volatile(LOCK_PREFIX "decl %0\n"
12155+
12156+#ifdef CONFIG_PAX_REFCOUNT
12157+ "jno 0f\n"
12158+ LOCK_PREFIX "incl %0\n"
12159+ "int $4\n0:\n"
12160+ _ASM_EXTABLE(0b, 0b)
12161+#endif
12162+
12163+ : "+m" (v->counter));
12164+}
12165+
12166+/**
12167+ * atomic_dec_unchecked - decrement atomic variable
12168+ * @v: pointer of type atomic_unchecked_t
12169+ *
12170+ * Atomically decrements @v by 1.
12171+ */
12172+static inline void atomic_dec_unchecked(atomic_unchecked_t *v)
12173+{
12174+ asm volatile(LOCK_PREFIX "decl %0\n"
12175 : "+m" (v->counter));
12176 }
12177
12178@@ -120,7 +236,16 @@ static inline int atomic_dec_and_test(atomic_t *v)
12179 {
12180 unsigned char c;
12181
12182- asm volatile(LOCK_PREFIX "decl %0; sete %1"
12183+ asm volatile(LOCK_PREFIX "decl %0\n"
12184+
12185+#ifdef CONFIG_PAX_REFCOUNT
12186+ "jno 0f\n"
12187+ LOCK_PREFIX "incl %0\n"
12188+ "int $4\n0:\n"
12189+ _ASM_EXTABLE(0b, 0b)
12190+#endif
12191+
12192+ "sete %1\n"
12193 : "+m" (v->counter), "=qm" (c)
12194 : : "memory");
12195 return c != 0;
12196@@ -138,7 +263,35 @@ static inline int atomic_inc_and_test(atomic_t *v)
12197 {
12198 unsigned char c;
12199
12200- asm volatile(LOCK_PREFIX "incl %0; sete %1"
12201+ asm volatile(LOCK_PREFIX "incl %0\n"
12202+
12203+#ifdef CONFIG_PAX_REFCOUNT
12204+ "jno 0f\n"
12205+ LOCK_PREFIX "decl %0\n"
12206+ "int $4\n0:\n"
12207+ _ASM_EXTABLE(0b, 0b)
12208+#endif
12209+
12210+ "sete %1\n"
12211+ : "+m" (v->counter), "=qm" (c)
12212+ : : "memory");
12213+ return c != 0;
12214+}
12215+
12216+/**
12217+ * atomic_inc_and_test_unchecked - increment and test
12218+ * @v: pointer of type atomic_unchecked_t
12219+ *
12220+ * Atomically increments @v by 1
12221+ * and returns true if the result is zero, or false for all
12222+ * other cases.
12223+ */
12224+static inline int atomic_inc_and_test_unchecked(atomic_unchecked_t *v)
12225+{
12226+ unsigned char c;
12227+
12228+ asm volatile(LOCK_PREFIX "incl %0\n"
12229+ "sete %1\n"
12230 : "+m" (v->counter), "=qm" (c)
12231 : : "memory");
12232 return c != 0;
12233@@ -157,7 +310,16 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12234 {
12235 unsigned char c;
12236
12237- asm volatile(LOCK_PREFIX "addl %2,%0; sets %1"
12238+ asm volatile(LOCK_PREFIX "addl %2,%0\n"
12239+
12240+#ifdef CONFIG_PAX_REFCOUNT
12241+ "jno 0f\n"
12242+ LOCK_PREFIX "subl %2,%0\n"
12243+ "int $4\n0:\n"
12244+ _ASM_EXTABLE(0b, 0b)
12245+#endif
12246+
12247+ "sets %1\n"
12248 : "+m" (v->counter), "=qm" (c)
12249 : "ir" (i) : "memory");
12250 return c;
12251@@ -172,6 +334,18 @@ static inline int atomic_add_negative(int i, atomic_t *v)
12252 */
12253 static inline int atomic_add_return(int i, atomic_t *v)
12254 {
12255+ return i + xadd_check_overflow(&v->counter, i);
12256+}
12257+
12258+/**
12259+ * atomic_add_return_unchecked - add integer and return
12260+ * @i: integer value to add
12261+ * @v: pointer of type atomic_unchecked_t
12262+ *
12263+ * Atomically adds @i to @v and returns @i + @v
12264+ */
12265+static inline int atomic_add_return_unchecked(int i, atomic_unchecked_t *v)
12266+{
12267 return i + xadd(&v->counter, i);
12268 }
12269
12270@@ -188,6 +362,10 @@ static inline int atomic_sub_return(int i, atomic_t *v)
12271 }
12272
12273 #define atomic_inc_return(v) (atomic_add_return(1, v))
12274+static inline int atomic_inc_return_unchecked(atomic_unchecked_t *v)
12275+{
12276+ return atomic_add_return_unchecked(1, v);
12277+}
12278 #define atomic_dec_return(v) (atomic_sub_return(1, v))
12279
12280 static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12281@@ -195,11 +373,21 @@ static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
12282 return cmpxchg(&v->counter, old, new);
12283 }
12284
12285+static inline int atomic_cmpxchg_unchecked(atomic_unchecked_t *v, int old, int new)
12286+{
12287+ return cmpxchg(&v->counter, old, new);
12288+}
12289+
12290 static inline int atomic_xchg(atomic_t *v, int new)
12291 {
12292 return xchg(&v->counter, new);
12293 }
12294
12295+static inline int atomic_xchg_unchecked(atomic_unchecked_t *v, int new)
12296+{
12297+ return xchg(&v->counter, new);
12298+}
12299+
12300 /**
12301 * __atomic_add_unless - add unless the number is already a given value
12302 * @v: pointer of type atomic_t
12303@@ -211,12 +399,25 @@ static inline int atomic_xchg(atomic_t *v, int new)
12304 */
12305 static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12306 {
12307- int c, old;
12308+ int c, old, new;
12309 c = atomic_read(v);
12310 for (;;) {
12311- if (unlikely(c == (u)))
12312+ if (unlikely(c == u))
12313 break;
12314- old = atomic_cmpxchg((v), c, c + (a));
12315+
12316+ asm volatile("addl %2,%0\n"
12317+
12318+#ifdef CONFIG_PAX_REFCOUNT
12319+ "jno 0f\n"
12320+ "subl %2,%0\n"
12321+ "int $4\n0:\n"
12322+ _ASM_EXTABLE(0b, 0b)
12323+#endif
12324+
12325+ : "=r" (new)
12326+ : "0" (c), "ir" (a));
12327+
12328+ old = atomic_cmpxchg(v, c, new);
12329 if (likely(old == c))
12330 break;
12331 c = old;
12332@@ -225,6 +426,49 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
12333 }
12334
12335 /**
12336+ * atomic_inc_not_zero_hint - increment if not null
12337+ * @v: pointer of type atomic_t
12338+ * @hint: probable value of the atomic before the increment
12339+ *
12340+ * This version of atomic_inc_not_zero() gives a hint of probable
12341+ * value of the atomic. This helps processor to not read the memory
12342+ * before doing the atomic read/modify/write cycle, lowering
12343+ * number of bus transactions on some arches.
12344+ *
12345+ * Returns: 0 if increment was not done, 1 otherwise.
12346+ */
12347+#define atomic_inc_not_zero_hint atomic_inc_not_zero_hint
12348+static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
12349+{
12350+ int val, c = hint, new;
12351+
12352+ /* sanity test, should be removed by compiler if hint is a constant */
12353+ if (!hint)
12354+ return __atomic_add_unless(v, 1, 0);
12355+
12356+ do {
12357+ asm volatile("incl %0\n"
12358+
12359+#ifdef CONFIG_PAX_REFCOUNT
12360+ "jno 0f\n"
12361+ "decl %0\n"
12362+ "int $4\n0:\n"
12363+ _ASM_EXTABLE(0b, 0b)
12364+#endif
12365+
12366+ : "=r" (new)
12367+ : "0" (c));
12368+
12369+ val = atomic_cmpxchg(v, c, new);
12370+ if (val == c)
12371+ return 1;
12372+ c = val;
12373+ } while (c);
12374+
12375+ return 0;
12376+}
12377+
12378+/**
12379 * atomic_inc_short - increment of a short integer
12380 * @v: pointer to type int
12381 *
12382@@ -253,14 +497,37 @@ static inline void atomic_or_long(unsigned long *v1, unsigned long v2)
12383 #endif
12384
12385 /* These are x86-specific, used by some header files */
12386-#define atomic_clear_mask(mask, addr) \
12387- asm volatile(LOCK_PREFIX "andl %0,%1" \
12388- : : "r" (~(mask)), "m" (*(addr)) : "memory")
12389+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
12390+{
12391+ asm volatile(LOCK_PREFIX "andl %1,%0"
12392+ : "+m" (v->counter)
12393+ : "r" (~(mask))
12394+ : "memory");
12395+}
12396
12397-#define atomic_set_mask(mask, addr) \
12398- asm volatile(LOCK_PREFIX "orl %0,%1" \
12399- : : "r" ((unsigned)(mask)), "m" (*(addr)) \
12400- : "memory")
12401+static inline void atomic_clear_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12402+{
12403+ asm volatile(LOCK_PREFIX "andl %1,%0"
12404+ : "+m" (v->counter)
12405+ : "r" (~(mask))
12406+ : "memory");
12407+}
12408+
12409+static inline void atomic_set_mask(unsigned int mask, atomic_t *v)
12410+{
12411+ asm volatile(LOCK_PREFIX "orl %1,%0"
12412+ : "+m" (v->counter)
12413+ : "r" (mask)
12414+ : "memory");
12415+}
12416+
12417+static inline void atomic_set_mask_unchecked(unsigned int mask, atomic_unchecked_t *v)
12418+{
12419+ asm volatile(LOCK_PREFIX "orl %1,%0"
12420+ : "+m" (v->counter)
12421+ : "r" (mask)
12422+ : "memory");
12423+}
12424
12425 /* Atomic operations are already serializing on x86 */
12426 #define smp_mb__before_atomic_dec() barrier()
12427diff --git a/arch/x86/include/asm/atomic64_32.h b/arch/x86/include/asm/atomic64_32.h
12428index b154de7..aadebd8 100644
12429--- a/arch/x86/include/asm/atomic64_32.h
12430+++ b/arch/x86/include/asm/atomic64_32.h
12431@@ -12,6 +12,14 @@ typedef struct {
12432 u64 __aligned(8) counter;
12433 } atomic64_t;
12434
12435+#ifdef CONFIG_PAX_REFCOUNT
12436+typedef struct {
12437+ u64 __aligned(8) counter;
12438+} atomic64_unchecked_t;
12439+#else
12440+typedef atomic64_t atomic64_unchecked_t;
12441+#endif
12442+
12443 #define ATOMIC64_INIT(val) { (val) }
12444
12445 #define __ATOMIC64_DECL(sym) void atomic64_##sym(atomic64_t *, ...)
12446@@ -37,21 +45,31 @@ typedef struct {
12447 ATOMIC64_DECL_ONE(sym##_386)
12448
12449 ATOMIC64_DECL_ONE(add_386);
12450+ATOMIC64_DECL_ONE(add_unchecked_386);
12451 ATOMIC64_DECL_ONE(sub_386);
12452+ATOMIC64_DECL_ONE(sub_unchecked_386);
12453 ATOMIC64_DECL_ONE(inc_386);
12454+ATOMIC64_DECL_ONE(inc_unchecked_386);
12455 ATOMIC64_DECL_ONE(dec_386);
12456+ATOMIC64_DECL_ONE(dec_unchecked_386);
12457 #endif
12458
12459 #define alternative_atomic64(f, out, in...) \
12460 __alternative_atomic64(f, f, ASM_OUTPUT2(out), ## in)
12461
12462 ATOMIC64_DECL(read);
12463+ATOMIC64_DECL(read_unchecked);
12464 ATOMIC64_DECL(set);
12465+ATOMIC64_DECL(set_unchecked);
12466 ATOMIC64_DECL(xchg);
12467 ATOMIC64_DECL(add_return);
12468+ATOMIC64_DECL(add_return_unchecked);
12469 ATOMIC64_DECL(sub_return);
12470+ATOMIC64_DECL(sub_return_unchecked);
12471 ATOMIC64_DECL(inc_return);
12472+ATOMIC64_DECL(inc_return_unchecked);
12473 ATOMIC64_DECL(dec_return);
12474+ATOMIC64_DECL(dec_return_unchecked);
12475 ATOMIC64_DECL(dec_if_positive);
12476 ATOMIC64_DECL(inc_not_zero);
12477 ATOMIC64_DECL(add_unless);
12478@@ -77,6 +95,21 @@ static inline long long atomic64_cmpxchg(atomic64_t *v, long long o, long long n
12479 }
12480
12481 /**
12482+ * atomic64_cmpxchg_unchecked - cmpxchg atomic64 variable
12483+ * @p: pointer to type atomic64_unchecked_t
12484+ * @o: expected value
12485+ * @n: new value
12486+ *
12487+ * Atomically sets @v to @n if it was equal to @o and returns
12488+ * the old value.
12489+ */
12490+
12491+static inline long long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long long o, long long n)
12492+{
12493+ return cmpxchg64(&v->counter, o, n);
12494+}
12495+
12496+/**
12497 * atomic64_xchg - xchg atomic64 variable
12498 * @v: pointer to type atomic64_t
12499 * @n: value to assign
12500@@ -112,6 +145,22 @@ static inline void atomic64_set(atomic64_t *v, long long i)
12501 }
12502
12503 /**
12504+ * atomic64_set_unchecked - set atomic64 variable
12505+ * @v: pointer to type atomic64_unchecked_t
12506+ * @n: value to assign
12507+ *
12508+ * Atomically sets the value of @v to @n.
12509+ */
12510+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long long i)
12511+{
12512+ unsigned high = (unsigned)(i >> 32);
12513+ unsigned low = (unsigned)i;
12514+ alternative_atomic64(set, /* no output */,
12515+ "S" (v), "b" (low), "c" (high)
12516+ : "eax", "edx", "memory");
12517+}
12518+
12519+/**
12520 * atomic64_read - read atomic64 variable
12521 * @v: pointer to type atomic64_t
12522 *
12523@@ -125,6 +174,19 @@ static inline long long atomic64_read(const atomic64_t *v)
12524 }
12525
12526 /**
12527+ * atomic64_read_unchecked - read atomic64 variable
12528+ * @v: pointer to type atomic64_unchecked_t
12529+ *
12530+ * Atomically reads the value of @v and returns it.
12531+ */
12532+static inline long long atomic64_read_unchecked(atomic64_unchecked_t *v)
12533+{
12534+ long long r;
12535+ alternative_atomic64(read, "=&A" (r), "c" (v) : "memory");
12536+ return r;
12537+ }
12538+
12539+/**
12540 * atomic64_add_return - add and return
12541 * @i: integer value to add
12542 * @v: pointer to type atomic64_t
12543@@ -139,6 +201,21 @@ static inline long long atomic64_add_return(long long i, atomic64_t *v)
12544 return i;
12545 }
12546
12547+/**
12548+ * atomic64_add_return_unchecked - add and return
12549+ * @i: integer value to add
12550+ * @v: pointer to type atomic64_unchecked_t
12551+ *
12552+ * Atomically adds @i to @v and returns @i + *@v
12553+ */
12554+static inline long long atomic64_add_return_unchecked(long long i, atomic64_unchecked_t *v)
12555+{
12556+ alternative_atomic64(add_return_unchecked,
12557+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12558+ ASM_NO_INPUT_CLOBBER("memory"));
12559+ return i;
12560+}
12561+
12562 /*
12563 * Other variants with different arithmetic operators:
12564 */
12565@@ -158,6 +235,14 @@ static inline long long atomic64_inc_return(atomic64_t *v)
12566 return a;
12567 }
12568
12569+static inline long long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12570+{
12571+ long long a;
12572+ alternative_atomic64(inc_return_unchecked, "=&A" (a),
12573+ "S" (v) : "memory", "ecx");
12574+ return a;
12575+}
12576+
12577 static inline long long atomic64_dec_return(atomic64_t *v)
12578 {
12579 long long a;
12580@@ -182,6 +267,21 @@ static inline long long atomic64_add(long long i, atomic64_t *v)
12581 }
12582
12583 /**
12584+ * atomic64_add_unchecked - add integer to atomic64 variable
12585+ * @i: integer value to add
12586+ * @v: pointer to type atomic64_unchecked_t
12587+ *
12588+ * Atomically adds @i to @v.
12589+ */
12590+static inline long long atomic64_add_unchecked(long long i, atomic64_unchecked_t *v)
12591+{
12592+ __alternative_atomic64(add_unchecked, add_return_unchecked,
12593+ ASM_OUTPUT2("+A" (i), "+c" (v)),
12594+ ASM_NO_INPUT_CLOBBER("memory"));
12595+ return i;
12596+}
12597+
12598+/**
12599 * atomic64_sub - subtract the atomic64 variable
12600 * @i: integer value to subtract
12601 * @v: pointer to type atomic64_t
12602diff --git a/arch/x86/include/asm/atomic64_64.h b/arch/x86/include/asm/atomic64_64.h
12603index 0e1cbfc..5623683 100644
12604--- a/arch/x86/include/asm/atomic64_64.h
12605+++ b/arch/x86/include/asm/atomic64_64.h
12606@@ -18,7 +18,19 @@
12607 */
12608 static inline long atomic64_read(const atomic64_t *v)
12609 {
12610- return (*(volatile long *)&(v)->counter);
12611+ return (*(volatile const long *)&(v)->counter);
12612+}
12613+
12614+/**
12615+ * atomic64_read_unchecked - read atomic64 variable
12616+ * @v: pointer of type atomic64_unchecked_t
12617+ *
12618+ * Atomically reads the value of @v.
12619+ * Doesn't imply a read memory barrier.
12620+ */
12621+static inline long atomic64_read_unchecked(const atomic64_unchecked_t *v)
12622+{
12623+ return (*(volatile const long *)&(v)->counter);
12624 }
12625
12626 /**
12627@@ -34,6 +46,18 @@ static inline void atomic64_set(atomic64_t *v, long i)
12628 }
12629
12630 /**
12631+ * atomic64_set_unchecked - set atomic64 variable
12632+ * @v: pointer to type atomic64_unchecked_t
12633+ * @i: required value
12634+ *
12635+ * Atomically sets the value of @v to @i.
12636+ */
12637+static inline void atomic64_set_unchecked(atomic64_unchecked_t *v, long i)
12638+{
12639+ v->counter = i;
12640+}
12641+
12642+/**
12643 * atomic64_add - add integer to atomic64 variable
12644 * @i: integer value to add
12645 * @v: pointer to type atomic64_t
12646@@ -42,6 +66,28 @@ static inline void atomic64_set(atomic64_t *v, long i)
12647 */
12648 static inline void atomic64_add(long i, atomic64_t *v)
12649 {
12650+ asm volatile(LOCK_PREFIX "addq %1,%0\n"
12651+
12652+#ifdef CONFIG_PAX_REFCOUNT
12653+ "jno 0f\n"
12654+ LOCK_PREFIX "subq %1,%0\n"
12655+ "int $4\n0:\n"
12656+ _ASM_EXTABLE(0b, 0b)
12657+#endif
12658+
12659+ : "=m" (v->counter)
12660+ : "er" (i), "m" (v->counter));
12661+}
12662+
12663+/**
12664+ * atomic64_add_unchecked - add integer to atomic64 variable
12665+ * @i: integer value to add
12666+ * @v: pointer to type atomic64_unchecked_t
12667+ *
12668+ * Atomically adds @i to @v.
12669+ */
12670+static inline void atomic64_add_unchecked(long i, atomic64_unchecked_t *v)
12671+{
12672 asm volatile(LOCK_PREFIX "addq %1,%0"
12673 : "=m" (v->counter)
12674 : "er" (i), "m" (v->counter));
12675@@ -56,7 +102,29 @@ static inline void atomic64_add(long i, atomic64_t *v)
12676 */
12677 static inline void atomic64_sub(long i, atomic64_t *v)
12678 {
12679- asm volatile(LOCK_PREFIX "subq %1,%0"
12680+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12681+
12682+#ifdef CONFIG_PAX_REFCOUNT
12683+ "jno 0f\n"
12684+ LOCK_PREFIX "addq %1,%0\n"
12685+ "int $4\n0:\n"
12686+ _ASM_EXTABLE(0b, 0b)
12687+#endif
12688+
12689+ : "=m" (v->counter)
12690+ : "er" (i), "m" (v->counter));
12691+}
12692+
12693+/**
12694+ * atomic64_sub_unchecked - subtract the atomic64 variable
12695+ * @i: integer value to subtract
12696+ * @v: pointer to type atomic64_unchecked_t
12697+ *
12698+ * Atomically subtracts @i from @v.
12699+ */
12700+static inline void atomic64_sub_unchecked(long i, atomic64_unchecked_t *v)
12701+{
12702+ asm volatile(LOCK_PREFIX "subq %1,%0\n"
12703 : "=m" (v->counter)
12704 : "er" (i), "m" (v->counter));
12705 }
12706@@ -74,7 +142,16 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12707 {
12708 unsigned char c;
12709
12710- asm volatile(LOCK_PREFIX "subq %2,%0; sete %1"
12711+ asm volatile(LOCK_PREFIX "subq %2,%0\n"
12712+
12713+#ifdef CONFIG_PAX_REFCOUNT
12714+ "jno 0f\n"
12715+ LOCK_PREFIX "addq %2,%0\n"
12716+ "int $4\n0:\n"
12717+ _ASM_EXTABLE(0b, 0b)
12718+#endif
12719+
12720+ "sete %1\n"
12721 : "=m" (v->counter), "=qm" (c)
12722 : "er" (i), "m" (v->counter) : "memory");
12723 return c;
12724@@ -88,6 +165,27 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
12725 */
12726 static inline void atomic64_inc(atomic64_t *v)
12727 {
12728+ asm volatile(LOCK_PREFIX "incq %0\n"
12729+
12730+#ifdef CONFIG_PAX_REFCOUNT
12731+ "jno 0f\n"
12732+ LOCK_PREFIX "decq %0\n"
12733+ "int $4\n0:\n"
12734+ _ASM_EXTABLE(0b, 0b)
12735+#endif
12736+
12737+ : "=m" (v->counter)
12738+ : "m" (v->counter));
12739+}
12740+
12741+/**
12742+ * atomic64_inc_unchecked - increment atomic64 variable
12743+ * @v: pointer to type atomic64_unchecked_t
12744+ *
12745+ * Atomically increments @v by 1.
12746+ */
12747+static inline void atomic64_inc_unchecked(atomic64_unchecked_t *v)
12748+{
12749 asm volatile(LOCK_PREFIX "incq %0"
12750 : "=m" (v->counter)
12751 : "m" (v->counter));
12752@@ -101,7 +199,28 @@ static inline void atomic64_inc(atomic64_t *v)
12753 */
12754 static inline void atomic64_dec(atomic64_t *v)
12755 {
12756- asm volatile(LOCK_PREFIX "decq %0"
12757+ asm volatile(LOCK_PREFIX "decq %0\n"
12758+
12759+#ifdef CONFIG_PAX_REFCOUNT
12760+ "jno 0f\n"
12761+ LOCK_PREFIX "incq %0\n"
12762+ "int $4\n0:\n"
12763+ _ASM_EXTABLE(0b, 0b)
12764+#endif
12765+
12766+ : "=m" (v->counter)
12767+ : "m" (v->counter));
12768+}
12769+
12770+/**
12771+ * atomic64_dec_unchecked - decrement atomic64 variable
12772+ * @v: pointer to type atomic64_t
12773+ *
12774+ * Atomically decrements @v by 1.
12775+ */
12776+static inline void atomic64_dec_unchecked(atomic64_unchecked_t *v)
12777+{
12778+ asm volatile(LOCK_PREFIX "decq %0\n"
12779 : "=m" (v->counter)
12780 : "m" (v->counter));
12781 }
12782@@ -118,7 +237,16 @@ static inline int atomic64_dec_and_test(atomic64_t *v)
12783 {
12784 unsigned char c;
12785
12786- asm volatile(LOCK_PREFIX "decq %0; sete %1"
12787+ asm volatile(LOCK_PREFIX "decq %0\n"
12788+
12789+#ifdef CONFIG_PAX_REFCOUNT
12790+ "jno 0f\n"
12791+ LOCK_PREFIX "incq %0\n"
12792+ "int $4\n0:\n"
12793+ _ASM_EXTABLE(0b, 0b)
12794+#endif
12795+
12796+ "sete %1\n"
12797 : "=m" (v->counter), "=qm" (c)
12798 : "m" (v->counter) : "memory");
12799 return c != 0;
12800@@ -136,7 +264,16 @@ static inline int atomic64_inc_and_test(atomic64_t *v)
12801 {
12802 unsigned char c;
12803
12804- asm volatile(LOCK_PREFIX "incq %0; sete %1"
12805+ asm volatile(LOCK_PREFIX "incq %0\n"
12806+
12807+#ifdef CONFIG_PAX_REFCOUNT
12808+ "jno 0f\n"
12809+ LOCK_PREFIX "decq %0\n"
12810+ "int $4\n0:\n"
12811+ _ASM_EXTABLE(0b, 0b)
12812+#endif
12813+
12814+ "sete %1\n"
12815 : "=m" (v->counter), "=qm" (c)
12816 : "m" (v->counter) : "memory");
12817 return c != 0;
12818@@ -155,7 +292,16 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12819 {
12820 unsigned char c;
12821
12822- asm volatile(LOCK_PREFIX "addq %2,%0; sets %1"
12823+ asm volatile(LOCK_PREFIX "addq %2,%0\n"
12824+
12825+#ifdef CONFIG_PAX_REFCOUNT
12826+ "jno 0f\n"
12827+ LOCK_PREFIX "subq %2,%0\n"
12828+ "int $4\n0:\n"
12829+ _ASM_EXTABLE(0b, 0b)
12830+#endif
12831+
12832+ "sets %1\n"
12833 : "=m" (v->counter), "=qm" (c)
12834 : "er" (i), "m" (v->counter) : "memory");
12835 return c;
12836@@ -170,6 +316,18 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
12837 */
12838 static inline long atomic64_add_return(long i, atomic64_t *v)
12839 {
12840+ return i + xadd_check_overflow(&v->counter, i);
12841+}
12842+
12843+/**
12844+ * atomic64_add_return_unchecked - add and return
12845+ * @i: integer value to add
12846+ * @v: pointer to type atomic64_unchecked_t
12847+ *
12848+ * Atomically adds @i to @v and returns @i + @v
12849+ */
12850+static inline long atomic64_add_return_unchecked(long i, atomic64_unchecked_t *v)
12851+{
12852 return i + xadd(&v->counter, i);
12853 }
12854
12855@@ -179,6 +337,10 @@ static inline long atomic64_sub_return(long i, atomic64_t *v)
12856 }
12857
12858 #define atomic64_inc_return(v) (atomic64_add_return(1, (v)))
12859+static inline long atomic64_inc_return_unchecked(atomic64_unchecked_t *v)
12860+{
12861+ return atomic64_add_return_unchecked(1, v);
12862+}
12863 #define atomic64_dec_return(v) (atomic64_sub_return(1, (v)))
12864
12865 static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12866@@ -186,6 +348,11 @@ static inline long atomic64_cmpxchg(atomic64_t *v, long old, long new)
12867 return cmpxchg(&v->counter, old, new);
12868 }
12869
12870+static inline long atomic64_cmpxchg_unchecked(atomic64_unchecked_t *v, long old, long new)
12871+{
12872+ return cmpxchg(&v->counter, old, new);
12873+}
12874+
12875 static inline long atomic64_xchg(atomic64_t *v, long new)
12876 {
12877 return xchg(&v->counter, new);
12878@@ -202,17 +369,30 @@ static inline long atomic64_xchg(atomic64_t *v, long new)
12879 */
12880 static inline int atomic64_add_unless(atomic64_t *v, long a, long u)
12881 {
12882- long c, old;
12883+ long c, old, new;
12884 c = atomic64_read(v);
12885 for (;;) {
12886- if (unlikely(c == (u)))
12887+ if (unlikely(c == u))
12888 break;
12889- old = atomic64_cmpxchg((v), c, c + (a));
12890+
12891+ asm volatile("add %2,%0\n"
12892+
12893+#ifdef CONFIG_PAX_REFCOUNT
12894+ "jno 0f\n"
12895+ "sub %2,%0\n"
12896+ "int $4\n0:\n"
12897+ _ASM_EXTABLE(0b, 0b)
12898+#endif
12899+
12900+ : "=r" (new)
12901+ : "0" (c), "ir" (a));
12902+
12903+ old = atomic64_cmpxchg(v, c, new);
12904 if (likely(old == c))
12905 break;
12906 c = old;
12907 }
12908- return c != (u);
12909+ return c != u;
12910 }
12911
12912 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
12913diff --git a/arch/x86/include/asm/bitops.h b/arch/x86/include/asm/bitops.h
12914index 6dfd019..28e188d 100644
12915--- a/arch/x86/include/asm/bitops.h
12916+++ b/arch/x86/include/asm/bitops.h
12917@@ -40,7 +40,7 @@
12918 * a mask operation on a byte.
12919 */
12920 #define IS_IMMEDIATE(nr) (__builtin_constant_p(nr))
12921-#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((void *)(addr) + ((nr)>>3))
12922+#define CONST_MASK_ADDR(nr, addr) BITOP_ADDR((volatile void *)(addr) + ((nr)>>3))
12923 #define CONST_MASK(nr) (1 << ((nr) & 7))
12924
12925 /**
12926@@ -486,7 +486,7 @@ static inline int fls(int x)
12927 * at position 64.
12928 */
12929 #ifdef CONFIG_X86_64
12930-static __always_inline int fls64(__u64 x)
12931+static __always_inline long fls64(__u64 x)
12932 {
12933 int bitpos = -1;
12934 /*
12935diff --git a/arch/x86/include/asm/boot.h b/arch/x86/include/asm/boot.h
12936index 4fa687a..60f2d39 100644
12937--- a/arch/x86/include/asm/boot.h
12938+++ b/arch/x86/include/asm/boot.h
12939@@ -6,10 +6,15 @@
12940 #include <uapi/asm/boot.h>
12941
12942 /* Physical address where kernel should be loaded. */
12943-#define LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12944+#define ____LOAD_PHYSICAL_ADDR ((CONFIG_PHYSICAL_START \
12945 + (CONFIG_PHYSICAL_ALIGN - 1)) \
12946 & ~(CONFIG_PHYSICAL_ALIGN - 1))
12947
12948+#ifndef __ASSEMBLY__
12949+extern unsigned char __LOAD_PHYSICAL_ADDR[];
12950+#define LOAD_PHYSICAL_ADDR ((unsigned long)__LOAD_PHYSICAL_ADDR)
12951+#endif
12952+
12953 /* Minimum kernel alignment, as a power of two */
12954 #ifdef CONFIG_X86_64
12955 #define MIN_KERNEL_ALIGN_LG2 PMD_SHIFT
12956diff --git a/arch/x86/include/asm/cache.h b/arch/x86/include/asm/cache.h
12957index 48f99f1..d78ebf9 100644
12958--- a/arch/x86/include/asm/cache.h
12959+++ b/arch/x86/include/asm/cache.h
12960@@ -5,12 +5,13 @@
12961
12962 /* L1 cache line size */
12963 #define L1_CACHE_SHIFT (CONFIG_X86_L1_CACHE_SHIFT)
12964-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
12965+#define L1_CACHE_BYTES (_AC(1,UL) << L1_CACHE_SHIFT)
12966
12967 #define __read_mostly __attribute__((__section__(".data..read_mostly")))
12968+#define __read_only __attribute__((__section__(".data..read_only")))
12969
12970 #define INTERNODE_CACHE_SHIFT CONFIG_X86_INTERNODE_CACHE_SHIFT
12971-#define INTERNODE_CACHE_BYTES (1 << INTERNODE_CACHE_SHIFT)
12972+#define INTERNODE_CACHE_BYTES (_AC(1,UL) << INTERNODE_CACHE_SHIFT)
12973
12974 #ifdef CONFIG_X86_VSMP
12975 #ifdef CONFIG_SMP
12976diff --git a/arch/x86/include/asm/cacheflush.h b/arch/x86/include/asm/cacheflush.h
12977index 9863ee3..4a1f8e1 100644
12978--- a/arch/x86/include/asm/cacheflush.h
12979+++ b/arch/x86/include/asm/cacheflush.h
12980@@ -27,7 +27,7 @@ static inline unsigned long get_page_memtype(struct page *pg)
12981 unsigned long pg_flags = pg->flags & _PGMT_MASK;
12982
12983 if (pg_flags == _PGMT_DEFAULT)
12984- return -1;
12985+ return ~0UL;
12986 else if (pg_flags == _PGMT_WC)
12987 return _PAGE_CACHE_WC;
12988 else if (pg_flags == _PGMT_UC_MINUS)
12989diff --git a/arch/x86/include/asm/checksum_32.h b/arch/x86/include/asm/checksum_32.h
12990index 46fc474..b02b0f9 100644
12991--- a/arch/x86/include/asm/checksum_32.h
12992+++ b/arch/x86/include/asm/checksum_32.h
12993@@ -31,6 +31,14 @@ asmlinkage __wsum csum_partial_copy_generic(const void *src, void *dst,
12994 int len, __wsum sum,
12995 int *src_err_ptr, int *dst_err_ptr);
12996
12997+asmlinkage __wsum csum_partial_copy_generic_to_user(const void *src, void *dst,
12998+ int len, __wsum sum,
12999+ int *src_err_ptr, int *dst_err_ptr);
13000+
13001+asmlinkage __wsum csum_partial_copy_generic_from_user(const void *src, void *dst,
13002+ int len, __wsum sum,
13003+ int *src_err_ptr, int *dst_err_ptr);
13004+
13005 /*
13006 * Note: when you get a NULL pointer exception here this means someone
13007 * passed in an incorrect kernel address to one of these functions.
13008@@ -50,7 +58,7 @@ static inline __wsum csum_partial_copy_from_user(const void __user *src,
13009 int *err_ptr)
13010 {
13011 might_sleep();
13012- return csum_partial_copy_generic((__force void *)src, dst,
13013+ return csum_partial_copy_generic_from_user((__force void *)src, dst,
13014 len, sum, err_ptr, NULL);
13015 }
13016
13017@@ -178,7 +186,7 @@ static inline __wsum csum_and_copy_to_user(const void *src,
13018 {
13019 might_sleep();
13020 if (access_ok(VERIFY_WRITE, dst, len))
13021- return csum_partial_copy_generic(src, (__force void *)dst,
13022+ return csum_partial_copy_generic_to_user(src, (__force void *)dst,
13023 len, sum, NULL, err_ptr);
13024
13025 if (len)
13026diff --git a/arch/x86/include/asm/cmpxchg.h b/arch/x86/include/asm/cmpxchg.h
13027index d47786a..ce1b05d 100644
13028--- a/arch/x86/include/asm/cmpxchg.h
13029+++ b/arch/x86/include/asm/cmpxchg.h
13030@@ -14,8 +14,12 @@ extern void __cmpxchg_wrong_size(void)
13031 __compiletime_error("Bad argument size for cmpxchg");
13032 extern void __xadd_wrong_size(void)
13033 __compiletime_error("Bad argument size for xadd");
13034+extern void __xadd_check_overflow_wrong_size(void)
13035+ __compiletime_error("Bad argument size for xadd_check_overflow");
13036 extern void __add_wrong_size(void)
13037 __compiletime_error("Bad argument size for add");
13038+extern void __add_check_overflow_wrong_size(void)
13039+ __compiletime_error("Bad argument size for add_check_overflow");
13040
13041 /*
13042 * Constants for operation sizes. On 32-bit, the 64-bit size it set to
13043@@ -67,6 +71,34 @@ extern void __add_wrong_size(void)
13044 __ret; \
13045 })
13046
13047+#define __xchg_op_check_overflow(ptr, arg, op, lock) \
13048+ ({ \
13049+ __typeof__ (*(ptr)) __ret = (arg); \
13050+ switch (sizeof(*(ptr))) { \
13051+ case __X86_CASE_L: \
13052+ asm volatile (lock #op "l %0, %1\n" \
13053+ "jno 0f\n" \
13054+ "mov %0,%1\n" \
13055+ "int $4\n0:\n" \
13056+ _ASM_EXTABLE(0b, 0b) \
13057+ : "+r" (__ret), "+m" (*(ptr)) \
13058+ : : "memory", "cc"); \
13059+ break; \
13060+ case __X86_CASE_Q: \
13061+ asm volatile (lock #op "q %q0, %1\n" \
13062+ "jno 0f\n" \
13063+ "mov %0,%1\n" \
13064+ "int $4\n0:\n" \
13065+ _ASM_EXTABLE(0b, 0b) \
13066+ : "+r" (__ret), "+m" (*(ptr)) \
13067+ : : "memory", "cc"); \
13068+ break; \
13069+ default: \
13070+ __ ## op ## _check_overflow_wrong_size(); \
13071+ } \
13072+ __ret; \
13073+ })
13074+
13075 /*
13076 * Note: no "lock" prefix even on SMP: xchg always implies lock anyway.
13077 * Since this is generally used to protect other memory information, we
13078@@ -167,6 +199,9 @@ extern void __add_wrong_size(void)
13079 #define xadd_sync(ptr, inc) __xadd((ptr), (inc), "lock; ")
13080 #define xadd_local(ptr, inc) __xadd((ptr), (inc), "")
13081
13082+#define __xadd_check_overflow(ptr, inc, lock) __xchg_op_check_overflow((ptr), (inc), xadd, lock)
13083+#define xadd_check_overflow(ptr, inc) __xadd_check_overflow((ptr), (inc), LOCK_PREFIX)
13084+
13085 #define __add(ptr, inc, lock) \
13086 ({ \
13087 __typeof__ (*(ptr)) __ret = (inc); \
13088diff --git a/arch/x86/include/asm/compat.h b/arch/x86/include/asm/compat.h
13089index 59c6c40..5e0b22c 100644
13090--- a/arch/x86/include/asm/compat.h
13091+++ b/arch/x86/include/asm/compat.h
13092@@ -41,7 +41,7 @@ typedef s64 __attribute__((aligned(4))) compat_s64;
13093 typedef u32 compat_uint_t;
13094 typedef u32 compat_ulong_t;
13095 typedef u64 __attribute__((aligned(4))) compat_u64;
13096-typedef u32 compat_uptr_t;
13097+typedef u32 __user compat_uptr_t;
13098
13099 struct compat_timespec {
13100 compat_time_t tv_sec;
13101diff --git a/arch/x86/include/asm/cpufeature.h b/arch/x86/include/asm/cpufeature.h
13102index e99ac27..e89e28c 100644
13103--- a/arch/x86/include/asm/cpufeature.h
13104+++ b/arch/x86/include/asm/cpufeature.h
13105@@ -211,7 +211,7 @@
13106 #define X86_FEATURE_BMI1 (9*32+ 3) /* 1st group bit manipulation extensions */
13107 #define X86_FEATURE_HLE (9*32+ 4) /* Hardware Lock Elision */
13108 #define X86_FEATURE_AVX2 (9*32+ 5) /* AVX2 instructions */
13109-#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Protection */
13110+#define X86_FEATURE_SMEP (9*32+ 7) /* Supervisor Mode Execution Prevention */
13111 #define X86_FEATURE_BMI2 (9*32+ 8) /* 2nd group bit manipulation extensions */
13112 #define X86_FEATURE_ERMS (9*32+ 9) /* Enhanced REP MOVSB/STOSB */
13113 #define X86_FEATURE_INVPCID (9*32+10) /* Invalidate Processor Context ID */
13114@@ -394,7 +394,7 @@ static __always_inline __pure bool __static_cpu_has(u16 bit)
13115 ".section .discard,\"aw\",@progbits\n"
13116 " .byte 0xff + (4f-3f) - (2b-1b)\n" /* size check */
13117 ".previous\n"
13118- ".section .altinstr_replacement,\"ax\"\n"
13119+ ".section .altinstr_replacement,\"a\"\n"
13120 "3: movb $1,%0\n"
13121 "4:\n"
13122 ".previous\n"
13123diff --git a/arch/x86/include/asm/desc.h b/arch/x86/include/asm/desc.h
13124index 8bf1c06..b6ae785 100644
13125--- a/arch/x86/include/asm/desc.h
13126+++ b/arch/x86/include/asm/desc.h
13127@@ -4,6 +4,7 @@
13128 #include <asm/desc_defs.h>
13129 #include <asm/ldt.h>
13130 #include <asm/mmu.h>
13131+#include <asm/pgtable.h>
13132
13133 #include <linux/smp.h>
13134 #include <linux/percpu.h>
13135@@ -17,6 +18,7 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13136
13137 desc->type = (info->read_exec_only ^ 1) << 1;
13138 desc->type |= info->contents << 2;
13139+ desc->type |= info->seg_not_present ^ 1;
13140
13141 desc->s = 1;
13142 desc->dpl = 0x3;
13143@@ -35,19 +37,14 @@ static inline void fill_ldt(struct desc_struct *desc, const struct user_desc *in
13144 }
13145
13146 extern struct desc_ptr idt_descr;
13147-extern gate_desc idt_table[];
13148 extern struct desc_ptr nmi_idt_descr;
13149-extern gate_desc nmi_idt_table[];
13150-
13151-struct gdt_page {
13152- struct desc_struct gdt[GDT_ENTRIES];
13153-} __attribute__((aligned(PAGE_SIZE)));
13154-
13155-DECLARE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page);
13156+extern gate_desc idt_table[256];
13157+extern gate_desc nmi_idt_table[256];
13158
13159+extern struct desc_struct cpu_gdt_table[NR_CPUS][PAGE_SIZE / sizeof(struct desc_struct)];
13160 static inline struct desc_struct *get_cpu_gdt_table(unsigned int cpu)
13161 {
13162- return per_cpu(gdt_page, cpu).gdt;
13163+ return cpu_gdt_table[cpu];
13164 }
13165
13166 #ifdef CONFIG_X86_64
13167@@ -72,8 +69,14 @@ static inline void pack_gate(gate_desc *gate, unsigned char type,
13168 unsigned long base, unsigned dpl, unsigned flags,
13169 unsigned short seg)
13170 {
13171- gate->a = (seg << 16) | (base & 0xffff);
13172- gate->b = (base & 0xffff0000) | (((0x80 | type | (dpl << 5)) & 0xff) << 8);
13173+ gate->gate.offset_low = base;
13174+ gate->gate.seg = seg;
13175+ gate->gate.reserved = 0;
13176+ gate->gate.type = type;
13177+ gate->gate.s = 0;
13178+ gate->gate.dpl = dpl;
13179+ gate->gate.p = 1;
13180+ gate->gate.offset_high = base >> 16;
13181 }
13182
13183 #endif
13184@@ -118,12 +121,16 @@ static inline void paravirt_free_ldt(struct desc_struct *ldt, unsigned entries)
13185
13186 static inline void native_write_idt_entry(gate_desc *idt, int entry, const gate_desc *gate)
13187 {
13188+ pax_open_kernel();
13189 memcpy(&idt[entry], gate, sizeof(*gate));
13190+ pax_close_kernel();
13191 }
13192
13193 static inline void native_write_ldt_entry(struct desc_struct *ldt, int entry, const void *desc)
13194 {
13195+ pax_open_kernel();
13196 memcpy(&ldt[entry], desc, 8);
13197+ pax_close_kernel();
13198 }
13199
13200 static inline void
13201@@ -137,7 +144,9 @@ native_write_gdt_entry(struct desc_struct *gdt, int entry, const void *desc, int
13202 default: size = sizeof(*gdt); break;
13203 }
13204
13205+ pax_open_kernel();
13206 memcpy(&gdt[entry], desc, size);
13207+ pax_close_kernel();
13208 }
13209
13210 static inline void pack_descriptor(struct desc_struct *desc, unsigned long base,
13211@@ -210,7 +219,9 @@ static inline void native_set_ldt(const void *addr, unsigned int entries)
13212
13213 static inline void native_load_tr_desc(void)
13214 {
13215+ pax_open_kernel();
13216 asm volatile("ltr %w0"::"q" (GDT_ENTRY_TSS*8));
13217+ pax_close_kernel();
13218 }
13219
13220 static inline void native_load_gdt(const struct desc_ptr *dtr)
13221@@ -247,8 +258,10 @@ static inline void native_load_tls(struct thread_struct *t, unsigned int cpu)
13222 struct desc_struct *gdt = get_cpu_gdt_table(cpu);
13223 unsigned int i;
13224
13225+ pax_open_kernel();
13226 for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++)
13227 gdt[GDT_ENTRY_TLS_MIN + i] = t->tls_array[i];
13228+ pax_close_kernel();
13229 }
13230
13231 #define _LDT_empty(info) \
13232@@ -287,7 +300,7 @@ static inline void load_LDT(mm_context_t *pc)
13233 preempt_enable();
13234 }
13235
13236-static inline unsigned long get_desc_base(const struct desc_struct *desc)
13237+static inline unsigned long __intentional_overflow(-1) get_desc_base(const struct desc_struct *desc)
13238 {
13239 return (unsigned)(desc->base0 | ((desc->base1) << 16) | ((desc->base2) << 24));
13240 }
13241@@ -311,7 +324,7 @@ static inline void set_desc_limit(struct desc_struct *desc, unsigned long limit)
13242 }
13243
13244 #ifdef CONFIG_X86_64
13245-static inline void set_nmi_gate(int gate, void *addr)
13246+static inline void set_nmi_gate(int gate, const void *addr)
13247 {
13248 gate_desc s;
13249
13250@@ -320,7 +333,7 @@ static inline void set_nmi_gate(int gate, void *addr)
13251 }
13252 #endif
13253
13254-static inline void _set_gate(int gate, unsigned type, void *addr,
13255+static inline void _set_gate(int gate, unsigned type, const void *addr,
13256 unsigned dpl, unsigned ist, unsigned seg)
13257 {
13258 gate_desc s;
13259@@ -339,7 +352,7 @@ static inline void _set_gate(int gate, unsigned type, void *addr,
13260 * Pentium F0 0F bugfix can have resulted in the mapped
13261 * IDT being write-protected.
13262 */
13263-static inline void set_intr_gate(unsigned int n, void *addr)
13264+static inline void set_intr_gate(unsigned int n, const void *addr)
13265 {
13266 BUG_ON((unsigned)n > 0xFF);
13267 _set_gate(n, GATE_INTERRUPT, addr, 0, 0, __KERNEL_CS);
13268@@ -369,19 +382,19 @@ static inline void alloc_intr_gate(unsigned int n, void *addr)
13269 /*
13270 * This routine sets up an interrupt gate at directory privilege level 3.
13271 */
13272-static inline void set_system_intr_gate(unsigned int n, void *addr)
13273+static inline void set_system_intr_gate(unsigned int n, const void *addr)
13274 {
13275 BUG_ON((unsigned)n > 0xFF);
13276 _set_gate(n, GATE_INTERRUPT, addr, 0x3, 0, __KERNEL_CS);
13277 }
13278
13279-static inline void set_system_trap_gate(unsigned int n, void *addr)
13280+static inline void set_system_trap_gate(unsigned int n, const void *addr)
13281 {
13282 BUG_ON((unsigned)n > 0xFF);
13283 _set_gate(n, GATE_TRAP, addr, 0x3, 0, __KERNEL_CS);
13284 }
13285
13286-static inline void set_trap_gate(unsigned int n, void *addr)
13287+static inline void set_trap_gate(unsigned int n, const void *addr)
13288 {
13289 BUG_ON((unsigned)n > 0xFF);
13290 _set_gate(n, GATE_TRAP, addr, 0, 0, __KERNEL_CS);
13291@@ -390,19 +403,31 @@ static inline void set_trap_gate(unsigned int n, void *addr)
13292 static inline void set_task_gate(unsigned int n, unsigned int gdt_entry)
13293 {
13294 BUG_ON((unsigned)n > 0xFF);
13295- _set_gate(n, GATE_TASK, (void *)0, 0, 0, (gdt_entry<<3));
13296+ _set_gate(n, GATE_TASK, (const void *)0, 0, 0, (gdt_entry<<3));
13297 }
13298
13299-static inline void set_intr_gate_ist(int n, void *addr, unsigned ist)
13300+static inline void set_intr_gate_ist(int n, const void *addr, unsigned ist)
13301 {
13302 BUG_ON((unsigned)n > 0xFF);
13303 _set_gate(n, GATE_INTERRUPT, addr, 0, ist, __KERNEL_CS);
13304 }
13305
13306-static inline void set_system_intr_gate_ist(int n, void *addr, unsigned ist)
13307+static inline void set_system_intr_gate_ist(int n, const void *addr, unsigned ist)
13308 {
13309 BUG_ON((unsigned)n > 0xFF);
13310 _set_gate(n, GATE_INTERRUPT, addr, 0x3, ist, __KERNEL_CS);
13311 }
13312
13313+#ifdef CONFIG_X86_32
13314+static inline void set_user_cs(unsigned long base, unsigned long limit, int cpu)
13315+{
13316+ struct desc_struct d;
13317+
13318+ if (likely(limit))
13319+ limit = (limit - 1UL) >> PAGE_SHIFT;
13320+ pack_descriptor(&d, base, limit, 0xFB, 0xC);
13321+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_DEFAULT_USER_CS, &d, DESCTYPE_S);
13322+}
13323+#endif
13324+
13325 #endif /* _ASM_X86_DESC_H */
13326diff --git a/arch/x86/include/asm/desc_defs.h b/arch/x86/include/asm/desc_defs.h
13327index 278441f..b95a174 100644
13328--- a/arch/x86/include/asm/desc_defs.h
13329+++ b/arch/x86/include/asm/desc_defs.h
13330@@ -31,6 +31,12 @@ struct desc_struct {
13331 unsigned base1: 8, type: 4, s: 1, dpl: 2, p: 1;
13332 unsigned limit: 4, avl: 1, l: 1, d: 1, g: 1, base2: 8;
13333 };
13334+ struct {
13335+ u16 offset_low;
13336+ u16 seg;
13337+ unsigned reserved: 8, type: 4, s: 1, dpl: 2, p: 1;
13338+ unsigned offset_high: 16;
13339+ } gate;
13340 };
13341 } __attribute__((packed));
13342
13343diff --git a/arch/x86/include/asm/div64.h b/arch/x86/include/asm/div64.h
13344index ced283a..ffe04cc 100644
13345--- a/arch/x86/include/asm/div64.h
13346+++ b/arch/x86/include/asm/div64.h
13347@@ -39,7 +39,7 @@
13348 __mod; \
13349 })
13350
13351-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13352+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
13353 {
13354 union {
13355 u64 v64;
13356diff --git a/arch/x86/include/asm/elf.h b/arch/x86/include/asm/elf.h
13357index 9c999c1..3860cb8 100644
13358--- a/arch/x86/include/asm/elf.h
13359+++ b/arch/x86/include/asm/elf.h
13360@@ -243,7 +243,25 @@ extern int force_personality32;
13361 the loader. We need to make sure that it is out of the way of the program
13362 that it will "exec", and that there is sufficient room for the brk. */
13363
13364+#ifdef CONFIG_PAX_SEGMEXEC
13365+#define ELF_ET_DYN_BASE ((current->mm->pax_flags & MF_PAX_SEGMEXEC) ? SEGMEXEC_TASK_SIZE/3*2 : TASK_SIZE/3*2)
13366+#else
13367 #define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
13368+#endif
13369+
13370+#ifdef CONFIG_PAX_ASLR
13371+#ifdef CONFIG_X86_32
13372+#define PAX_ELF_ET_DYN_BASE 0x10000000UL
13373+
13374+#define PAX_DELTA_MMAP_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13375+#define PAX_DELTA_STACK_LEN (current->mm->pax_flags & MF_PAX_SEGMEXEC ? 15 : 16)
13376+#else
13377+#define PAX_ELF_ET_DYN_BASE 0x400000UL
13378+
13379+#define PAX_DELTA_MMAP_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13380+#define PAX_DELTA_STACK_LEN ((test_thread_flag(TIF_ADDR32)) ? 16 : TASK_SIZE_MAX_SHIFT - PAGE_SHIFT - 3)
13381+#endif
13382+#endif
13383
13384 /* This yields a mask that user programs can use to figure out what
13385 instruction set this CPU supports. This could be done in user space,
13386@@ -296,16 +314,12 @@ do { \
13387
13388 #define ARCH_DLINFO \
13389 do { \
13390- if (vdso_enabled) \
13391- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13392- (unsigned long)current->mm->context.vdso); \
13393+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13394 } while (0)
13395
13396 #define ARCH_DLINFO_X32 \
13397 do { \
13398- if (vdso_enabled) \
13399- NEW_AUX_ENT(AT_SYSINFO_EHDR, \
13400- (unsigned long)current->mm->context.vdso); \
13401+ NEW_AUX_ENT(AT_SYSINFO_EHDR, current->mm->context.vdso); \
13402 } while (0)
13403
13404 #define AT_SYSINFO 32
13405@@ -320,7 +334,7 @@ else \
13406
13407 #endif /* !CONFIG_X86_32 */
13408
13409-#define VDSO_CURRENT_BASE ((unsigned long)current->mm->context.vdso)
13410+#define VDSO_CURRENT_BASE (current->mm->context.vdso)
13411
13412 #define VDSO_ENTRY \
13413 ((unsigned long)VDSO32_SYMBOL(VDSO_CURRENT_BASE, vsyscall))
13414@@ -336,9 +350,6 @@ extern int x32_setup_additional_pages(struct linux_binprm *bprm,
13415 extern int syscall32_setup_pages(struct linux_binprm *, int exstack);
13416 #define compat_arch_setup_additional_pages syscall32_setup_pages
13417
13418-extern unsigned long arch_randomize_brk(struct mm_struct *mm);
13419-#define arch_randomize_brk arch_randomize_brk
13420-
13421 /*
13422 * True on X86_32 or when emulating IA32 on X86_64
13423 */
13424diff --git a/arch/x86/include/asm/emergency-restart.h b/arch/x86/include/asm/emergency-restart.h
13425index 75ce3f4..882e801 100644
13426--- a/arch/x86/include/asm/emergency-restart.h
13427+++ b/arch/x86/include/asm/emergency-restart.h
13428@@ -13,6 +13,6 @@ enum reboot_type {
13429
13430 extern enum reboot_type reboot_type;
13431
13432-extern void machine_emergency_restart(void);
13433+extern void machine_emergency_restart(void) __noreturn;
13434
13435 #endif /* _ASM_X86_EMERGENCY_RESTART_H */
13436diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
13437index e25cc33..425d099 100644
13438--- a/arch/x86/include/asm/fpu-internal.h
13439+++ b/arch/x86/include/asm/fpu-internal.h
13440@@ -127,7 +127,9 @@ static inline void sanitize_i387_state(struct task_struct *tsk)
13441 ({ \
13442 int err; \
13443 asm volatile(ASM_STAC "\n" \
13444- "1:" #insn "\n\t" \
13445+ "1:" \
13446+ __copyuser_seg \
13447+ #insn "\n\t" \
13448 "2: " ASM_CLAC "\n" \
13449 ".section .fixup,\"ax\"\n" \
13450 "3: movl $-1,%[err]\n" \
13451@@ -300,7 +302,7 @@ static inline int restore_fpu_checking(struct task_struct *tsk)
13452 "emms\n\t" /* clear stack tags */
13453 "fildl %P[addr]", /* set F?P to defined value */
13454 X86_FEATURE_FXSAVE_LEAK,
13455- [addr] "m" (tsk->thread.fpu.has_fpu));
13456+ [addr] "m" (init_tss[raw_smp_processor_id()].x86_tss.sp0));
13457
13458 return fpu_restore_checking(&tsk->thread.fpu);
13459 }
13460diff --git a/arch/x86/include/asm/futex.h b/arch/x86/include/asm/futex.h
13461index be27ba1..8f13ff9 100644
13462--- a/arch/x86/include/asm/futex.h
13463+++ b/arch/x86/include/asm/futex.h
13464@@ -12,6 +12,7 @@
13465 #include <asm/smap.h>
13466
13467 #define __futex_atomic_op1(insn, ret, oldval, uaddr, oparg) \
13468+ typecheck(u32 __user *, uaddr); \
13469 asm volatile("\t" ASM_STAC "\n" \
13470 "1:\t" insn "\n" \
13471 "2:\t" ASM_CLAC "\n" \
13472@@ -20,15 +21,16 @@
13473 "\tjmp\t2b\n" \
13474 "\t.previous\n" \
13475 _ASM_EXTABLE(1b, 3b) \
13476- : "=r" (oldval), "=r" (ret), "+m" (*uaddr) \
13477+ : "=r" (oldval), "=r" (ret), "+m" (*(u32 __user *)____m(uaddr)) \
13478 : "i" (-EFAULT), "0" (oparg), "1" (0))
13479
13480 #define __futex_atomic_op2(insn, ret, oldval, uaddr, oparg) \
13481+ typecheck(u32 __user *, uaddr); \
13482 asm volatile("\t" ASM_STAC "\n" \
13483 "1:\tmovl %2, %0\n" \
13484 "\tmovl\t%0, %3\n" \
13485 "\t" insn "\n" \
13486- "2:\t" LOCK_PREFIX "cmpxchgl %3, %2\n" \
13487+ "2:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %3, %2\n" \
13488 "\tjnz\t1b\n" \
13489 "3:\t" ASM_CLAC "\n" \
13490 "\t.section .fixup,\"ax\"\n" \
13491@@ -38,7 +40,7 @@
13492 _ASM_EXTABLE(1b, 4b) \
13493 _ASM_EXTABLE(2b, 4b) \
13494 : "=&a" (oldval), "=&r" (ret), \
13495- "+m" (*uaddr), "=&r" (tem) \
13496+ "+m" (*(u32 __user *)____m(uaddr)), "=&r" (tem) \
13497 : "r" (oparg), "i" (-EFAULT), "1" (0))
13498
13499 static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13500@@ -59,10 +61,10 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
13501
13502 switch (op) {
13503 case FUTEX_OP_SET:
13504- __futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
13505+ __futex_atomic_op1(__copyuser_seg"xchgl %0, %2", ret, oldval, uaddr, oparg);
13506 break;
13507 case FUTEX_OP_ADD:
13508- __futex_atomic_op1(LOCK_PREFIX "xaddl %0, %2", ret, oldval,
13509+ __futex_atomic_op1(LOCK_PREFIX __copyuser_seg"xaddl %0, %2", ret, oldval,
13510 uaddr, oparg);
13511 break;
13512 case FUTEX_OP_OR:
13513@@ -116,14 +118,14 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
13514 return -EFAULT;
13515
13516 asm volatile("\t" ASM_STAC "\n"
13517- "1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n"
13518+ "1:\t" LOCK_PREFIX __copyuser_seg"cmpxchgl %4, %2\n"
13519 "2:\t" ASM_CLAC "\n"
13520 "\t.section .fixup, \"ax\"\n"
13521 "3:\tmov %3, %0\n"
13522 "\tjmp 2b\n"
13523 "\t.previous\n"
13524 _ASM_EXTABLE(1b, 3b)
13525- : "+r" (ret), "=a" (oldval), "+m" (*uaddr)
13526+ : "+r" (ret), "=a" (oldval), "+m" (*(u32 __user *)____m(uaddr))
13527 : "i" (-EFAULT), "r" (newval), "1" (oldval)
13528 : "memory"
13529 );
13530diff --git a/arch/x86/include/asm/hw_irq.h b/arch/x86/include/asm/hw_irq.h
13531index 1da97ef..9c2ebff 100644
13532--- a/arch/x86/include/asm/hw_irq.h
13533+++ b/arch/x86/include/asm/hw_irq.h
13534@@ -148,8 +148,8 @@ extern void setup_ioapic_dest(void);
13535 extern void enable_IO_APIC(void);
13536
13537 /* Statistics */
13538-extern atomic_t irq_err_count;
13539-extern atomic_t irq_mis_count;
13540+extern atomic_unchecked_t irq_err_count;
13541+extern atomic_unchecked_t irq_mis_count;
13542
13543 /* EISA */
13544 extern void eisa_set_level_irq(unsigned int irq);
13545diff --git a/arch/x86/include/asm/i8259.h b/arch/x86/include/asm/i8259.h
13546index a203659..9889f1c 100644
13547--- a/arch/x86/include/asm/i8259.h
13548+++ b/arch/x86/include/asm/i8259.h
13549@@ -62,7 +62,7 @@ struct legacy_pic {
13550 void (*init)(int auto_eoi);
13551 int (*irq_pending)(unsigned int irq);
13552 void (*make_irq)(unsigned int irq);
13553-};
13554+} __do_const;
13555
13556 extern struct legacy_pic *legacy_pic;
13557 extern struct legacy_pic null_legacy_pic;
13558diff --git a/arch/x86/include/asm/io.h b/arch/x86/include/asm/io.h
13559index d8e8eef..1765f78 100644
13560--- a/arch/x86/include/asm/io.h
13561+++ b/arch/x86/include/asm/io.h
13562@@ -51,12 +51,12 @@ static inline void name(type val, volatile void __iomem *addr) \
13563 "m" (*(volatile type __force *)addr) barrier); }
13564
13565 build_mmio_read(readb, "b", unsigned char, "=q", :"memory")
13566-build_mmio_read(readw, "w", unsigned short, "=r", :"memory")
13567-build_mmio_read(readl, "l", unsigned int, "=r", :"memory")
13568+build_mmio_read(__intentional_overflow(-1) readw, "w", unsigned short, "=r", :"memory")
13569+build_mmio_read(__intentional_overflow(-1) readl, "l", unsigned int, "=r", :"memory")
13570
13571 build_mmio_read(__readb, "b", unsigned char, "=q", )
13572-build_mmio_read(__readw, "w", unsigned short, "=r", )
13573-build_mmio_read(__readl, "l", unsigned int, "=r", )
13574+build_mmio_read(__intentional_overflow(-1) __readw, "w", unsigned short, "=r", )
13575+build_mmio_read(__intentional_overflow(-1) __readl, "l", unsigned int, "=r", )
13576
13577 build_mmio_write(writeb, "b", unsigned char, "q", :"memory")
13578 build_mmio_write(writew, "w", unsigned short, "r", :"memory")
13579@@ -184,7 +184,7 @@ static inline void __iomem *ioremap(resource_size_t offset, unsigned long size)
13580 return ioremap_nocache(offset, size);
13581 }
13582
13583-extern void iounmap(volatile void __iomem *addr);
13584+extern void iounmap(const volatile void __iomem *addr);
13585
13586 extern void set_iounmap_nonlazy(void);
13587
13588@@ -194,6 +194,17 @@ extern void set_iounmap_nonlazy(void);
13589
13590 #include <linux/vmalloc.h>
13591
13592+#define ARCH_HAS_VALID_PHYS_ADDR_RANGE
13593+static inline int valid_phys_addr_range(unsigned long addr, size_t count)
13594+{
13595+ return ((addr + count + PAGE_SIZE - 1) >> PAGE_SHIFT) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13596+}
13597+
13598+static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t count)
13599+{
13600+ return (pfn + (count >> PAGE_SHIFT)) < (1ULL << (boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) ? 1 : 0;
13601+}
13602+
13603 /*
13604 * Convert a virtual cached pointer to an uncached pointer
13605 */
13606diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h
13607index bba3cf8..06bc8da 100644
13608--- a/arch/x86/include/asm/irqflags.h
13609+++ b/arch/x86/include/asm/irqflags.h
13610@@ -141,6 +141,11 @@ static inline notrace unsigned long arch_local_irq_save(void)
13611 sti; \
13612 sysexit
13613
13614+#define GET_CR0_INTO_RDI mov %cr0, %rdi
13615+#define SET_RDI_INTO_CR0 mov %rdi, %cr0
13616+#define GET_CR3_INTO_RDI mov %cr3, %rdi
13617+#define SET_RDI_INTO_CR3 mov %rdi, %cr3
13618+
13619 #else
13620 #define INTERRUPT_RETURN iret
13621 #define ENABLE_INTERRUPTS_SYSEXIT sti; sysexit
13622diff --git a/arch/x86/include/asm/kprobes.h b/arch/x86/include/asm/kprobes.h
13623index 5a6d287..f815789 100644
13624--- a/arch/x86/include/asm/kprobes.h
13625+++ b/arch/x86/include/asm/kprobes.h
13626@@ -38,13 +38,8 @@ typedef u8 kprobe_opcode_t;
13627 #define RELATIVEJUMP_SIZE 5
13628 #define RELATIVECALL_OPCODE 0xe8
13629 #define RELATIVE_ADDR_SIZE 4
13630-#define MAX_STACK_SIZE 64
13631-#define MIN_STACK_SIZE(ADDR) \
13632- (((MAX_STACK_SIZE) < (((unsigned long)current_thread_info()) + \
13633- THREAD_SIZE - (unsigned long)(ADDR))) \
13634- ? (MAX_STACK_SIZE) \
13635- : (((unsigned long)current_thread_info()) + \
13636- THREAD_SIZE - (unsigned long)(ADDR)))
13637+#define MAX_STACK_SIZE 64UL
13638+#define MIN_STACK_SIZE(ADDR) min(MAX_STACK_SIZE, current->thread.sp0 - (unsigned long)(ADDR))
13639
13640 #define flush_insn_slot(p) do { } while (0)
13641
13642diff --git a/arch/x86/include/asm/local.h b/arch/x86/include/asm/local.h
13643index 2d89e39..baee879 100644
13644--- a/arch/x86/include/asm/local.h
13645+++ b/arch/x86/include/asm/local.h
13646@@ -10,33 +10,97 @@ typedef struct {
13647 atomic_long_t a;
13648 } local_t;
13649
13650+typedef struct {
13651+ atomic_long_unchecked_t a;
13652+} local_unchecked_t;
13653+
13654 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
13655
13656 #define local_read(l) atomic_long_read(&(l)->a)
13657+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
13658 #define local_set(l, i) atomic_long_set(&(l)->a, (i))
13659+#define local_set_unchecked(l, i) atomic_long_set_unchecked(&(l)->a, (i))
13660
13661 static inline void local_inc(local_t *l)
13662 {
13663- asm volatile(_ASM_INC "%0"
13664+ asm volatile(_ASM_INC "%0\n"
13665+
13666+#ifdef CONFIG_PAX_REFCOUNT
13667+ "jno 0f\n"
13668+ _ASM_DEC "%0\n"
13669+ "int $4\n0:\n"
13670+ _ASM_EXTABLE(0b, 0b)
13671+#endif
13672+
13673+ : "+m" (l->a.counter));
13674+}
13675+
13676+static inline void local_inc_unchecked(local_unchecked_t *l)
13677+{
13678+ asm volatile(_ASM_INC "%0\n"
13679 : "+m" (l->a.counter));
13680 }
13681
13682 static inline void local_dec(local_t *l)
13683 {
13684- asm volatile(_ASM_DEC "%0"
13685+ asm volatile(_ASM_DEC "%0\n"
13686+
13687+#ifdef CONFIG_PAX_REFCOUNT
13688+ "jno 0f\n"
13689+ _ASM_INC "%0\n"
13690+ "int $4\n0:\n"
13691+ _ASM_EXTABLE(0b, 0b)
13692+#endif
13693+
13694+ : "+m" (l->a.counter));
13695+}
13696+
13697+static inline void local_dec_unchecked(local_unchecked_t *l)
13698+{
13699+ asm volatile(_ASM_DEC "%0\n"
13700 : "+m" (l->a.counter));
13701 }
13702
13703 static inline void local_add(long i, local_t *l)
13704 {
13705- asm volatile(_ASM_ADD "%1,%0"
13706+ asm volatile(_ASM_ADD "%1,%0\n"
13707+
13708+#ifdef CONFIG_PAX_REFCOUNT
13709+ "jno 0f\n"
13710+ _ASM_SUB "%1,%0\n"
13711+ "int $4\n0:\n"
13712+ _ASM_EXTABLE(0b, 0b)
13713+#endif
13714+
13715+ : "+m" (l->a.counter)
13716+ : "ir" (i));
13717+}
13718+
13719+static inline void local_add_unchecked(long i, local_unchecked_t *l)
13720+{
13721+ asm volatile(_ASM_ADD "%1,%0\n"
13722 : "+m" (l->a.counter)
13723 : "ir" (i));
13724 }
13725
13726 static inline void local_sub(long i, local_t *l)
13727 {
13728- asm volatile(_ASM_SUB "%1,%0"
13729+ asm volatile(_ASM_SUB "%1,%0\n"
13730+
13731+#ifdef CONFIG_PAX_REFCOUNT
13732+ "jno 0f\n"
13733+ _ASM_ADD "%1,%0\n"
13734+ "int $4\n0:\n"
13735+ _ASM_EXTABLE(0b, 0b)
13736+#endif
13737+
13738+ : "+m" (l->a.counter)
13739+ : "ir" (i));
13740+}
13741+
13742+static inline void local_sub_unchecked(long i, local_unchecked_t *l)
13743+{
13744+ asm volatile(_ASM_SUB "%1,%0\n"
13745 : "+m" (l->a.counter)
13746 : "ir" (i));
13747 }
13748@@ -54,7 +118,16 @@ static inline int local_sub_and_test(long i, local_t *l)
13749 {
13750 unsigned char c;
13751
13752- asm volatile(_ASM_SUB "%2,%0; sete %1"
13753+ asm volatile(_ASM_SUB "%2,%0\n"
13754+
13755+#ifdef CONFIG_PAX_REFCOUNT
13756+ "jno 0f\n"
13757+ _ASM_ADD "%2,%0\n"
13758+ "int $4\n0:\n"
13759+ _ASM_EXTABLE(0b, 0b)
13760+#endif
13761+
13762+ "sete %1\n"
13763 : "+m" (l->a.counter), "=qm" (c)
13764 : "ir" (i) : "memory");
13765 return c;
13766@@ -72,7 +145,16 @@ static inline int local_dec_and_test(local_t *l)
13767 {
13768 unsigned char c;
13769
13770- asm volatile(_ASM_DEC "%0; sete %1"
13771+ asm volatile(_ASM_DEC "%0\n"
13772+
13773+#ifdef CONFIG_PAX_REFCOUNT
13774+ "jno 0f\n"
13775+ _ASM_INC "%0\n"
13776+ "int $4\n0:\n"
13777+ _ASM_EXTABLE(0b, 0b)
13778+#endif
13779+
13780+ "sete %1\n"
13781 : "+m" (l->a.counter), "=qm" (c)
13782 : : "memory");
13783 return c != 0;
13784@@ -90,7 +172,16 @@ static inline int local_inc_and_test(local_t *l)
13785 {
13786 unsigned char c;
13787
13788- asm volatile(_ASM_INC "%0; sete %1"
13789+ asm volatile(_ASM_INC "%0\n"
13790+
13791+#ifdef CONFIG_PAX_REFCOUNT
13792+ "jno 0f\n"
13793+ _ASM_DEC "%0\n"
13794+ "int $4\n0:\n"
13795+ _ASM_EXTABLE(0b, 0b)
13796+#endif
13797+
13798+ "sete %1\n"
13799 : "+m" (l->a.counter), "=qm" (c)
13800 : : "memory");
13801 return c != 0;
13802@@ -109,7 +200,16 @@ static inline int local_add_negative(long i, local_t *l)
13803 {
13804 unsigned char c;
13805
13806- asm volatile(_ASM_ADD "%2,%0; sets %1"
13807+ asm volatile(_ASM_ADD "%2,%0\n"
13808+
13809+#ifdef CONFIG_PAX_REFCOUNT
13810+ "jno 0f\n"
13811+ _ASM_SUB "%2,%0\n"
13812+ "int $4\n0:\n"
13813+ _ASM_EXTABLE(0b, 0b)
13814+#endif
13815+
13816+ "sets %1\n"
13817 : "+m" (l->a.counter), "=qm" (c)
13818 : "ir" (i) : "memory");
13819 return c;
13820@@ -125,6 +225,30 @@ static inline int local_add_negative(long i, local_t *l)
13821 static inline long local_add_return(long i, local_t *l)
13822 {
13823 long __i = i;
13824+ asm volatile(_ASM_XADD "%0, %1\n"
13825+
13826+#ifdef CONFIG_PAX_REFCOUNT
13827+ "jno 0f\n"
13828+ _ASM_MOV "%0,%1\n"
13829+ "int $4\n0:\n"
13830+ _ASM_EXTABLE(0b, 0b)
13831+#endif
13832+
13833+ : "+r" (i), "+m" (l->a.counter)
13834+ : : "memory");
13835+ return i + __i;
13836+}
13837+
13838+/**
13839+ * local_add_return_unchecked - add and return
13840+ * @i: integer value to add
13841+ * @l: pointer to type local_unchecked_t
13842+ *
13843+ * Atomically adds @i to @l and returns @i + @l
13844+ */
13845+static inline long local_add_return_unchecked(long i, local_unchecked_t *l)
13846+{
13847+ long __i = i;
13848 asm volatile(_ASM_XADD "%0, %1;"
13849 : "+r" (i), "+m" (l->a.counter)
13850 : : "memory");
13851@@ -141,6 +265,8 @@ static inline long local_sub_return(long i, local_t *l)
13852
13853 #define local_cmpxchg(l, o, n) \
13854 (cmpxchg_local(&((l)->a.counter), (o), (n)))
13855+#define local_cmpxchg_unchecked(l, o, n) \
13856+ (cmpxchg_local(&((l)->a.counter), (o), (n)))
13857 /* Always has a lock prefix */
13858 #define local_xchg(l, n) (xchg(&((l)->a.counter), (n)))
13859
13860diff --git a/arch/x86/include/asm/mman.h b/arch/x86/include/asm/mman.h
13861new file mode 100644
13862index 0000000..2bfd3ba
13863--- /dev/null
13864+++ b/arch/x86/include/asm/mman.h
13865@@ -0,0 +1,15 @@
13866+#ifndef _X86_MMAN_H
13867+#define _X86_MMAN_H
13868+
13869+#include <uapi/asm/mman.h>
13870+
13871+#ifdef __KERNEL__
13872+#ifndef __ASSEMBLY__
13873+#ifdef CONFIG_X86_32
13874+#define arch_mmap_check i386_mmap_check
13875+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags);
13876+#endif
13877+#endif
13878+#endif
13879+
13880+#endif /* X86_MMAN_H */
13881diff --git a/arch/x86/include/asm/mmu.h b/arch/x86/include/asm/mmu.h
13882index 5f55e69..e20bfb1 100644
13883--- a/arch/x86/include/asm/mmu.h
13884+++ b/arch/x86/include/asm/mmu.h
13885@@ -9,7 +9,7 @@
13886 * we put the segment information here.
13887 */
13888 typedef struct {
13889- void *ldt;
13890+ struct desc_struct *ldt;
13891 int size;
13892
13893 #ifdef CONFIG_X86_64
13894@@ -18,7 +18,19 @@ typedef struct {
13895 #endif
13896
13897 struct mutex lock;
13898- void *vdso;
13899+ unsigned long vdso;
13900+
13901+#ifdef CONFIG_X86_32
13902+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
13903+ unsigned long user_cs_base;
13904+ unsigned long user_cs_limit;
13905+
13906+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13907+ cpumask_t cpu_user_cs_mask;
13908+#endif
13909+
13910+#endif
13911+#endif
13912 } mm_context_t;
13913
13914 #ifdef CONFIG_SMP
13915diff --git a/arch/x86/include/asm/mmu_context.h b/arch/x86/include/asm/mmu_context.h
13916index cdbf367..adb37ac 100644
13917--- a/arch/x86/include/asm/mmu_context.h
13918+++ b/arch/x86/include/asm/mmu_context.h
13919@@ -24,6 +24,18 @@ void destroy_context(struct mm_struct *mm);
13920
13921 static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
13922 {
13923+
13924+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
13925+ unsigned int i;
13926+ pgd_t *pgd;
13927+
13928+ pax_open_kernel();
13929+ pgd = get_cpu_pgd(smp_processor_id());
13930+ for (i = USER_PGD_PTRS; i < 2 * USER_PGD_PTRS; ++i)
13931+ set_pgd_batched(pgd+i, native_make_pgd(0));
13932+ pax_close_kernel();
13933+#endif
13934+
13935 #ifdef CONFIG_SMP
13936 if (this_cpu_read(cpu_tlbstate.state) == TLBSTATE_OK)
13937 this_cpu_write(cpu_tlbstate.state, TLBSTATE_LAZY);
13938@@ -34,16 +46,30 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13939 struct task_struct *tsk)
13940 {
13941 unsigned cpu = smp_processor_id();
13942+#if defined(CONFIG_X86_32) && defined(CONFIG_SMP) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13943+ int tlbstate = TLBSTATE_OK;
13944+#endif
13945
13946 if (likely(prev != next)) {
13947 #ifdef CONFIG_SMP
13948+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13949+ tlbstate = this_cpu_read(cpu_tlbstate.state);
13950+#endif
13951 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
13952 this_cpu_write(cpu_tlbstate.active_mm, next);
13953 #endif
13954 cpumask_set_cpu(cpu, mm_cpumask(next));
13955
13956 /* Re-load page tables */
13957+#ifdef CONFIG_PAX_PER_CPU_PGD
13958+ pax_open_kernel();
13959+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
13960+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
13961+ pax_close_kernel();
13962+ load_cr3(get_cpu_pgd(cpu));
13963+#else
13964 load_cr3(next->pgd);
13965+#endif
13966
13967 /* stop flush ipis for the previous mm */
13968 cpumask_clear_cpu(cpu, mm_cpumask(prev));
13969@@ -53,9 +79,38 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
13970 */
13971 if (unlikely(prev->context.ldt != next->context.ldt))
13972 load_LDT_nolock(&next->context);
13973- }
13974+
13975+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
13976+ if (!(__supported_pte_mask & _PAGE_NX)) {
13977+ smp_mb__before_clear_bit();
13978+ cpu_clear(cpu, prev->context.cpu_user_cs_mask);
13979+ smp_mb__after_clear_bit();
13980+ cpu_set(cpu, next->context.cpu_user_cs_mask);
13981+ }
13982+#endif
13983+
13984+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
13985+ if (unlikely(prev->context.user_cs_base != next->context.user_cs_base ||
13986+ prev->context.user_cs_limit != next->context.user_cs_limit))
13987+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13988 #ifdef CONFIG_SMP
13989+ else if (unlikely(tlbstate != TLBSTATE_OK))
13990+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
13991+#endif
13992+#endif
13993+
13994+ }
13995 else {
13996+
13997+#ifdef CONFIG_PAX_PER_CPU_PGD
13998+ pax_open_kernel();
13999+ __clone_user_pgds(get_cpu_pgd(cpu), next->pgd);
14000+ __shadow_user_pgds(get_cpu_pgd(cpu) + USER_PGD_PTRS, next->pgd);
14001+ pax_close_kernel();
14002+ load_cr3(get_cpu_pgd(cpu));
14003+#endif
14004+
14005+#ifdef CONFIG_SMP
14006 this_cpu_write(cpu_tlbstate.state, TLBSTATE_OK);
14007 BUG_ON(this_cpu_read(cpu_tlbstate.active_mm) != next);
14008
14009@@ -64,11 +119,28 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
14010 * tlb flush IPI delivery. We must reload CR3
14011 * to make sure to use no freed page tables.
14012 */
14013+
14014+#ifndef CONFIG_PAX_PER_CPU_PGD
14015 load_cr3(next->pgd);
14016+#endif
14017+
14018 load_LDT_nolock(&next->context);
14019+
14020+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
14021+ if (!(__supported_pte_mask & _PAGE_NX))
14022+ cpu_set(cpu, next->context.cpu_user_cs_mask);
14023+#endif
14024+
14025+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC))
14026+#ifdef CONFIG_PAX_PAGEEXEC
14027+ if (!((next->pax_flags & MF_PAX_PAGEEXEC) && (__supported_pte_mask & _PAGE_NX)))
14028+#endif
14029+ set_user_cs(next->context.user_cs_base, next->context.user_cs_limit, cpu);
14030+#endif
14031+
14032 }
14033+#endif
14034 }
14035-#endif
14036 }
14037
14038 #define activate_mm(prev, next) \
14039diff --git a/arch/x86/include/asm/module.h b/arch/x86/include/asm/module.h
14040index e3b7819..b257c64 100644
14041--- a/arch/x86/include/asm/module.h
14042+++ b/arch/x86/include/asm/module.h
14043@@ -5,6 +5,7 @@
14044
14045 #ifdef CONFIG_X86_64
14046 /* X86_64 does not define MODULE_PROC_FAMILY */
14047+#define MODULE_PROC_FAMILY ""
14048 #elif defined CONFIG_M486
14049 #define MODULE_PROC_FAMILY "486 "
14050 #elif defined CONFIG_M586
14051@@ -57,8 +58,20 @@
14052 #error unknown processor family
14053 #endif
14054
14055-#ifdef CONFIG_X86_32
14056-# define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY
14057+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_BTS
14058+#define MODULE_PAX_KERNEXEC "KERNEXEC_BTS "
14059+#elif defined(CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR)
14060+#define MODULE_PAX_KERNEXEC "KERNEXEC_OR "
14061+#else
14062+#define MODULE_PAX_KERNEXEC ""
14063 #endif
14064
14065+#ifdef CONFIG_PAX_MEMORY_UDEREF
14066+#define MODULE_PAX_UDEREF "UDEREF "
14067+#else
14068+#define MODULE_PAX_UDEREF ""
14069+#endif
14070+
14071+#define MODULE_ARCH_VERMAGIC MODULE_PROC_FAMILY MODULE_PAX_KERNEXEC MODULE_PAX_UDEREF
14072+
14073 #endif /* _ASM_X86_MODULE_H */
14074diff --git a/arch/x86/include/asm/nmi.h b/arch/x86/include/asm/nmi.h
14075index 86f9301..b365cda 100644
14076--- a/arch/x86/include/asm/nmi.h
14077+++ b/arch/x86/include/asm/nmi.h
14078@@ -40,11 +40,11 @@ struct nmiaction {
14079 nmi_handler_t handler;
14080 unsigned long flags;
14081 const char *name;
14082-};
14083+} __do_const;
14084
14085 #define register_nmi_handler(t, fn, fg, n, init...) \
14086 ({ \
14087- static struct nmiaction init fn##_na = { \
14088+ static const struct nmiaction init fn##_na = { \
14089 .handler = (fn), \
14090 .name = (n), \
14091 .flags = (fg), \
14092@@ -52,7 +52,7 @@ struct nmiaction {
14093 __register_nmi_handler((t), &fn##_na); \
14094 })
14095
14096-int __register_nmi_handler(unsigned int, struct nmiaction *);
14097+int __register_nmi_handler(unsigned int, const struct nmiaction *);
14098
14099 void unregister_nmi_handler(unsigned int, const char *);
14100
14101diff --git a/arch/x86/include/asm/page.h b/arch/x86/include/asm/page.h
14102index c878924..21f4889 100644
14103--- a/arch/x86/include/asm/page.h
14104+++ b/arch/x86/include/asm/page.h
14105@@ -52,6 +52,7 @@ static inline void copy_user_page(void *to, void *from, unsigned long vaddr,
14106 __phys_addr_symbol(__phys_reloc_hide((unsigned long)(x)))
14107
14108 #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
14109+#define __early_va(x) ((void *)((unsigned long)(x)+__START_KERNEL_map - phys_base))
14110
14111 #define __boot_va(x) __va(x)
14112 #define __boot_pa(x) __pa(x)
14113diff --git a/arch/x86/include/asm/page_64.h b/arch/x86/include/asm/page_64.h
14114index 0f1ddee..e2fc3d1 100644
14115--- a/arch/x86/include/asm/page_64.h
14116+++ b/arch/x86/include/asm/page_64.h
14117@@ -7,9 +7,9 @@
14118
14119 /* duplicated to the one in bootmem.h */
14120 extern unsigned long max_pfn;
14121-extern unsigned long phys_base;
14122+extern const unsigned long phys_base;
14123
14124-static inline unsigned long __phys_addr_nodebug(unsigned long x)
14125+static inline unsigned long __intentional_overflow(-1) __phys_addr_nodebug(unsigned long x)
14126 {
14127 unsigned long y = x - __START_KERNEL_map;
14128
14129diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h
14130index cfdc9ee..3f7b5d6 100644
14131--- a/arch/x86/include/asm/paravirt.h
14132+++ b/arch/x86/include/asm/paravirt.h
14133@@ -560,7 +560,7 @@ static inline pmd_t __pmd(pmdval_t val)
14134 return (pmd_t) { ret };
14135 }
14136
14137-static inline pmdval_t pmd_val(pmd_t pmd)
14138+static inline __intentional_overflow(-1) pmdval_t pmd_val(pmd_t pmd)
14139 {
14140 pmdval_t ret;
14141
14142@@ -626,6 +626,18 @@ static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
14143 val);
14144 }
14145
14146+static inline void set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14147+{
14148+ pgdval_t val = native_pgd_val(pgd);
14149+
14150+ if (sizeof(pgdval_t) > sizeof(long))
14151+ PVOP_VCALL3(pv_mmu_ops.set_pgd_batched, pgdp,
14152+ val, (u64)val >> 32);
14153+ else
14154+ PVOP_VCALL2(pv_mmu_ops.set_pgd_batched, pgdp,
14155+ val);
14156+}
14157+
14158 static inline void pgd_clear(pgd_t *pgdp)
14159 {
14160 set_pgd(pgdp, __pgd(0));
14161@@ -710,6 +722,21 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
14162 pv_mmu_ops.set_fixmap(idx, phys, flags);
14163 }
14164
14165+#ifdef CONFIG_PAX_KERNEXEC
14166+static inline unsigned long pax_open_kernel(void)
14167+{
14168+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_open_kernel);
14169+}
14170+
14171+static inline unsigned long pax_close_kernel(void)
14172+{
14173+ return PVOP_CALL0(unsigned long, pv_mmu_ops.pax_close_kernel);
14174+}
14175+#else
14176+static inline unsigned long pax_open_kernel(void) { return 0; }
14177+static inline unsigned long pax_close_kernel(void) { return 0; }
14178+#endif
14179+
14180 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
14181
14182 static inline int arch_spin_is_locked(struct arch_spinlock *lock)
14183@@ -926,7 +953,7 @@ extern void default_banner(void);
14184
14185 #define PARA_PATCH(struct, off) ((PARAVIRT_PATCH_##struct + (off)) / 4)
14186 #define PARA_SITE(ptype, clobbers, ops) _PVSITE(ptype, clobbers, ops, .long, 4)
14187-#define PARA_INDIRECT(addr) *%cs:addr
14188+#define PARA_INDIRECT(addr) *%ss:addr
14189 #endif
14190
14191 #define INTERRUPT_RETURN \
14192@@ -1001,6 +1028,21 @@ extern void default_banner(void);
14193 PARA_SITE(PARA_PATCH(pv_cpu_ops, PV_CPU_irq_enable_sysexit), \
14194 CLBR_NONE, \
14195 jmp PARA_INDIRECT(pv_cpu_ops+PV_CPU_irq_enable_sysexit))
14196+
14197+#define GET_CR0_INTO_RDI \
14198+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0); \
14199+ mov %rax,%rdi
14200+
14201+#define SET_RDI_INTO_CR0 \
14202+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
14203+
14204+#define GET_CR3_INTO_RDI \
14205+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_read_cr3); \
14206+ mov %rax,%rdi
14207+
14208+#define SET_RDI_INTO_CR3 \
14209+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_write_cr3)
14210+
14211 #endif /* CONFIG_X86_32 */
14212
14213 #endif /* __ASSEMBLY__ */
14214diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h
14215index 0db1fca..52310cc 100644
14216--- a/arch/x86/include/asm/paravirt_types.h
14217+++ b/arch/x86/include/asm/paravirt_types.h
14218@@ -84,7 +84,7 @@ struct pv_init_ops {
14219 */
14220 unsigned (*patch)(u8 type, u16 clobber, void *insnbuf,
14221 unsigned long addr, unsigned len);
14222-};
14223+} __no_const;
14224
14225
14226 struct pv_lazy_ops {
14227@@ -98,7 +98,7 @@ struct pv_time_ops {
14228 unsigned long long (*sched_clock)(void);
14229 unsigned long long (*steal_clock)(int cpu);
14230 unsigned long (*get_tsc_khz)(void);
14231-};
14232+} __no_const;
14233
14234 struct pv_cpu_ops {
14235 /* hooks for various privileged instructions */
14236@@ -192,7 +192,7 @@ struct pv_cpu_ops {
14237
14238 void (*start_context_switch)(struct task_struct *prev);
14239 void (*end_context_switch)(struct task_struct *next);
14240-};
14241+} __no_const;
14242
14243 struct pv_irq_ops {
14244 /*
14245@@ -223,7 +223,7 @@ struct pv_apic_ops {
14246 unsigned long start_eip,
14247 unsigned long start_esp);
14248 #endif
14249-};
14250+} __no_const;
14251
14252 struct pv_mmu_ops {
14253 unsigned long (*read_cr2)(void);
14254@@ -313,6 +313,7 @@ struct pv_mmu_ops {
14255 struct paravirt_callee_save make_pud;
14256
14257 void (*set_pgd)(pgd_t *pudp, pgd_t pgdval);
14258+ void (*set_pgd_batched)(pgd_t *pudp, pgd_t pgdval);
14259 #endif /* PAGETABLE_LEVELS == 4 */
14260 #endif /* PAGETABLE_LEVELS >= 3 */
14261
14262@@ -324,6 +325,12 @@ struct pv_mmu_ops {
14263 an mfn. We can tell which is which from the index. */
14264 void (*set_fixmap)(unsigned /* enum fixed_addresses */ idx,
14265 phys_addr_t phys, pgprot_t flags);
14266+
14267+#ifdef CONFIG_PAX_KERNEXEC
14268+ unsigned long (*pax_open_kernel)(void);
14269+ unsigned long (*pax_close_kernel)(void);
14270+#endif
14271+
14272 };
14273
14274 struct arch_spinlock;
14275@@ -334,7 +341,7 @@ struct pv_lock_ops {
14276 void (*spin_lock_flags)(struct arch_spinlock *lock, unsigned long flags);
14277 int (*spin_trylock)(struct arch_spinlock *lock);
14278 void (*spin_unlock)(struct arch_spinlock *lock);
14279-};
14280+} __no_const;
14281
14282 /* This contains all the paravirt structures: we get a convenient
14283 * number for each function using the offset which we use to indicate
14284diff --git a/arch/x86/include/asm/pgalloc.h b/arch/x86/include/asm/pgalloc.h
14285index b4389a4..7024269 100644
14286--- a/arch/x86/include/asm/pgalloc.h
14287+++ b/arch/x86/include/asm/pgalloc.h
14288@@ -63,6 +63,13 @@ static inline void pmd_populate_kernel(struct mm_struct *mm,
14289 pmd_t *pmd, pte_t *pte)
14290 {
14291 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14292+ set_pmd(pmd, __pmd(__pa(pte) | _KERNPG_TABLE));
14293+}
14294+
14295+static inline void pmd_populate_user(struct mm_struct *mm,
14296+ pmd_t *pmd, pte_t *pte)
14297+{
14298+ paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
14299 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
14300 }
14301
14302@@ -99,12 +106,22 @@ static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
14303
14304 #ifdef CONFIG_X86_PAE
14305 extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
14306+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
14307+{
14308+ pud_populate(mm, pudp, pmd);
14309+}
14310 #else /* !CONFIG_X86_PAE */
14311 static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14312 {
14313 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14314 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
14315 }
14316+
14317+static inline void pud_populate_kernel(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
14318+{
14319+ paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
14320+ set_pud(pud, __pud(_KERNPG_TABLE | __pa(pmd)));
14321+}
14322 #endif /* CONFIG_X86_PAE */
14323
14324 #if PAGETABLE_LEVELS > 3
14325@@ -114,6 +131,12 @@ static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14326 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
14327 }
14328
14329+static inline void pgd_populate_kernel(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
14330+{
14331+ paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
14332+ set_pgd(pgd, __pgd(_KERNPG_TABLE | __pa(pud)));
14333+}
14334+
14335 static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
14336 {
14337 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
14338diff --git a/arch/x86/include/asm/pgtable-2level.h b/arch/x86/include/asm/pgtable-2level.h
14339index f2b489c..4f7e2e5 100644
14340--- a/arch/x86/include/asm/pgtable-2level.h
14341+++ b/arch/x86/include/asm/pgtable-2level.h
14342@@ -18,7 +18,9 @@ static inline void native_set_pte(pte_t *ptep , pte_t pte)
14343
14344 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14345 {
14346+ pax_open_kernel();
14347 *pmdp = pmd;
14348+ pax_close_kernel();
14349 }
14350
14351 static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14352diff --git a/arch/x86/include/asm/pgtable-3level.h b/arch/x86/include/asm/pgtable-3level.h
14353index 4cc9f2b..5fd9226 100644
14354--- a/arch/x86/include/asm/pgtable-3level.h
14355+++ b/arch/x86/include/asm/pgtable-3level.h
14356@@ -92,12 +92,16 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14357
14358 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14359 {
14360+ pax_open_kernel();
14361 set_64bit((unsigned long long *)(pmdp), native_pmd_val(pmd));
14362+ pax_close_kernel();
14363 }
14364
14365 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14366 {
14367+ pax_open_kernel();
14368 set_64bit((unsigned long long *)(pudp), native_pud_val(pud));
14369+ pax_close_kernel();
14370 }
14371
14372 /*
14373diff --git a/arch/x86/include/asm/pgtable.h b/arch/x86/include/asm/pgtable.h
14374index 1e67223..dd6e7ea 100644
14375--- a/arch/x86/include/asm/pgtable.h
14376+++ b/arch/x86/include/asm/pgtable.h
14377@@ -44,6 +44,7 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14378
14379 #ifndef __PAGETABLE_PUD_FOLDED
14380 #define set_pgd(pgdp, pgd) native_set_pgd(pgdp, pgd)
14381+#define set_pgd_batched(pgdp, pgd) native_set_pgd_batched(pgdp, pgd)
14382 #define pgd_clear(pgd) native_pgd_clear(pgd)
14383 #endif
14384
14385@@ -81,12 +82,51 @@ extern struct mm_struct *pgd_page_get_mm(struct page *page);
14386
14387 #define arch_end_context_switch(prev) do {} while(0)
14388
14389+#define pax_open_kernel() native_pax_open_kernel()
14390+#define pax_close_kernel() native_pax_close_kernel()
14391 #endif /* CONFIG_PARAVIRT */
14392
14393+#define __HAVE_ARCH_PAX_OPEN_KERNEL
14394+#define __HAVE_ARCH_PAX_CLOSE_KERNEL
14395+
14396+#ifdef CONFIG_PAX_KERNEXEC
14397+static inline unsigned long native_pax_open_kernel(void)
14398+{
14399+ unsigned long cr0;
14400+
14401+ preempt_disable();
14402+ barrier();
14403+ cr0 = read_cr0() ^ X86_CR0_WP;
14404+ BUG_ON(cr0 & X86_CR0_WP);
14405+ write_cr0(cr0);
14406+ return cr0 ^ X86_CR0_WP;
14407+}
14408+
14409+static inline unsigned long native_pax_close_kernel(void)
14410+{
14411+ unsigned long cr0;
14412+
14413+ cr0 = read_cr0() ^ X86_CR0_WP;
14414+ BUG_ON(!(cr0 & X86_CR0_WP));
14415+ write_cr0(cr0);
14416+ barrier();
14417+ preempt_enable_no_resched();
14418+ return cr0 ^ X86_CR0_WP;
14419+}
14420+#else
14421+static inline unsigned long native_pax_open_kernel(void) { return 0; }
14422+static inline unsigned long native_pax_close_kernel(void) { return 0; }
14423+#endif
14424+
14425 /*
14426 * The following only work if pte_present() is true.
14427 * Undefined behaviour if not..
14428 */
14429+static inline int pte_user(pte_t pte)
14430+{
14431+ return pte_val(pte) & _PAGE_USER;
14432+}
14433+
14434 static inline int pte_dirty(pte_t pte)
14435 {
14436 return pte_flags(pte) & _PAGE_DIRTY;
14437@@ -147,6 +187,11 @@ static inline unsigned long pud_pfn(pud_t pud)
14438 return (pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT;
14439 }
14440
14441+static inline unsigned long pgd_pfn(pgd_t pgd)
14442+{
14443+ return (pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT;
14444+}
14445+
14446 #define pte_page(pte) pfn_to_page(pte_pfn(pte))
14447
14448 static inline int pmd_large(pmd_t pte)
14449@@ -200,9 +245,29 @@ static inline pte_t pte_wrprotect(pte_t pte)
14450 return pte_clear_flags(pte, _PAGE_RW);
14451 }
14452
14453+static inline pte_t pte_mkread(pte_t pte)
14454+{
14455+ return __pte(pte_val(pte) | _PAGE_USER);
14456+}
14457+
14458 static inline pte_t pte_mkexec(pte_t pte)
14459 {
14460- return pte_clear_flags(pte, _PAGE_NX);
14461+#ifdef CONFIG_X86_PAE
14462+ if (__supported_pte_mask & _PAGE_NX)
14463+ return pte_clear_flags(pte, _PAGE_NX);
14464+ else
14465+#endif
14466+ return pte_set_flags(pte, _PAGE_USER);
14467+}
14468+
14469+static inline pte_t pte_exprotect(pte_t pte)
14470+{
14471+#ifdef CONFIG_X86_PAE
14472+ if (__supported_pte_mask & _PAGE_NX)
14473+ return pte_set_flags(pte, _PAGE_NX);
14474+ else
14475+#endif
14476+ return pte_clear_flags(pte, _PAGE_USER);
14477 }
14478
14479 static inline pte_t pte_mkdirty(pte_t pte)
14480@@ -394,6 +459,15 @@ pte_t *populate_extra_pte(unsigned long vaddr);
14481 #endif
14482
14483 #ifndef __ASSEMBLY__
14484+
14485+#ifdef CONFIG_PAX_PER_CPU_PGD
14486+extern pgd_t cpu_pgd[NR_CPUS][PTRS_PER_PGD];
14487+static inline pgd_t *get_cpu_pgd(unsigned int cpu)
14488+{
14489+ return cpu_pgd[cpu];
14490+}
14491+#endif
14492+
14493 #include <linux/mm_types.h>
14494 #include <linux/log2.h>
14495
14496@@ -529,7 +603,7 @@ static inline unsigned long pud_page_vaddr(pud_t pud)
14497 * Currently stuck as a macro due to indirect forward reference to
14498 * linux/mmzone.h's __section_mem_map_addr() definition:
14499 */
14500-#define pud_page(pud) pfn_to_page(pud_val(pud) >> PAGE_SHIFT)
14501+#define pud_page(pud) pfn_to_page((pud_val(pud) & PTE_PFN_MASK) >> PAGE_SHIFT)
14502
14503 /* Find an entry in the second-level page table.. */
14504 static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address)
14505@@ -569,7 +643,7 @@ static inline unsigned long pgd_page_vaddr(pgd_t pgd)
14506 * Currently stuck as a macro due to indirect forward reference to
14507 * linux/mmzone.h's __section_mem_map_addr() definition:
14508 */
14509-#define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
14510+#define pgd_page(pgd) pfn_to_page((pgd_val(pgd) & PTE_PFN_MASK) >> PAGE_SHIFT)
14511
14512 /* to find an entry in a page-table-directory. */
14513 static inline unsigned long pud_index(unsigned long address)
14514@@ -584,7 +658,7 @@ static inline pud_t *pud_offset(pgd_t *pgd, unsigned long address)
14515
14516 static inline int pgd_bad(pgd_t pgd)
14517 {
14518- return (pgd_flags(pgd) & ~_PAGE_USER) != _KERNPG_TABLE;
14519+ return (pgd_flags(pgd) & ~(_PAGE_USER | _PAGE_NX)) != _KERNPG_TABLE;
14520 }
14521
14522 static inline int pgd_none(pgd_t pgd)
14523@@ -607,7 +681,12 @@ static inline int pgd_none(pgd_t pgd)
14524 * pgd_offset() returns a (pgd_t *)
14525 * pgd_index() is used get the offset into the pgd page's array of pgd_t's;
14526 */
14527-#define pgd_offset(mm, address) ((mm)->pgd + pgd_index((address)))
14528+#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
14529+
14530+#ifdef CONFIG_PAX_PER_CPU_PGD
14531+#define pgd_offset_cpu(cpu, address) (get_cpu_pgd(cpu) + pgd_index(address))
14532+#endif
14533+
14534 /*
14535 * a shortcut which implies the use of the kernel's pgd, instead
14536 * of a process's
14537@@ -618,6 +697,22 @@ static inline int pgd_none(pgd_t pgd)
14538 #define KERNEL_PGD_BOUNDARY pgd_index(PAGE_OFFSET)
14539 #define KERNEL_PGD_PTRS (PTRS_PER_PGD - KERNEL_PGD_BOUNDARY)
14540
14541+#ifdef CONFIG_X86_32
14542+#define USER_PGD_PTRS KERNEL_PGD_BOUNDARY
14543+#else
14544+#define TASK_SIZE_MAX_SHIFT CONFIG_TASK_SIZE_MAX_SHIFT
14545+#define USER_PGD_PTRS (_AC(1,UL) << (TASK_SIZE_MAX_SHIFT - PGDIR_SHIFT))
14546+
14547+#ifdef CONFIG_PAX_MEMORY_UDEREF
14548+#ifdef __ASSEMBLY__
14549+#define pax_user_shadow_base pax_user_shadow_base(%rip)
14550+#else
14551+extern unsigned long pax_user_shadow_base;
14552+#endif
14553+#endif
14554+
14555+#endif
14556+
14557 #ifndef __ASSEMBLY__
14558
14559 extern int direct_gbpages;
14560@@ -784,11 +879,24 @@ static inline void pmdp_set_wrprotect(struct mm_struct *mm,
14561 * dst and src can be on the same page, but the range must not overlap,
14562 * and must not cross a page boundary.
14563 */
14564-static inline void clone_pgd_range(pgd_t *dst, pgd_t *src, int count)
14565+static inline void clone_pgd_range(pgd_t *dst, const pgd_t *src, int count)
14566 {
14567- memcpy(dst, src, count * sizeof(pgd_t));
14568+ pax_open_kernel();
14569+ while (count--)
14570+ *dst++ = *src++;
14571+ pax_close_kernel();
14572 }
14573
14574+#ifdef CONFIG_PAX_PER_CPU_PGD
14575+extern void __clone_user_pgds(pgd_t *dst, const pgd_t *src);
14576+#endif
14577+
14578+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
14579+extern void __shadow_user_pgds(pgd_t *dst, const pgd_t *src);
14580+#else
14581+static inline void __shadow_user_pgds(pgd_t *dst, const pgd_t *src) {}
14582+#endif
14583+
14584 #define PTE_SHIFT ilog2(PTRS_PER_PTE)
14585 static inline int page_level_shift(enum pg_level level)
14586 {
14587diff --git a/arch/x86/include/asm/pgtable_32.h b/arch/x86/include/asm/pgtable_32.h
14588index 9ee3221..b979c6b 100644
14589--- a/arch/x86/include/asm/pgtable_32.h
14590+++ b/arch/x86/include/asm/pgtable_32.h
14591@@ -25,9 +25,6 @@
14592 struct mm_struct;
14593 struct vm_area_struct;
14594
14595-extern pgd_t swapper_pg_dir[1024];
14596-extern pgd_t initial_page_table[1024];
14597-
14598 static inline void pgtable_cache_init(void) { }
14599 static inline void check_pgt_cache(void) { }
14600 void paging_init(void);
14601@@ -48,6 +45,12 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14602 # include <asm/pgtable-2level.h>
14603 #endif
14604
14605+extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
14606+extern pgd_t initial_page_table[PTRS_PER_PGD];
14607+#ifdef CONFIG_X86_PAE
14608+extern pmd_t swapper_pm_dir[PTRS_PER_PGD][PTRS_PER_PMD];
14609+#endif
14610+
14611 #if defined(CONFIG_HIGHPTE)
14612 #define pte_offset_map(dir, address) \
14613 ((pte_t *)kmap_atomic(pmd_page(*(dir))) + \
14614@@ -62,12 +65,17 @@ extern void set_pmd_pfn(unsigned long, unsigned long, pgprot_t);
14615 /* Clear a kernel PTE and flush it from the TLB */
14616 #define kpte_clear_flush(ptep, vaddr) \
14617 do { \
14618+ pax_open_kernel(); \
14619 pte_clear(&init_mm, (vaddr), (ptep)); \
14620+ pax_close_kernel(); \
14621 __flush_tlb_one((vaddr)); \
14622 } while (0)
14623
14624 #endif /* !__ASSEMBLY__ */
14625
14626+#define HAVE_ARCH_UNMAPPED_AREA
14627+#define HAVE_ARCH_UNMAPPED_AREA_TOPDOWN
14628+
14629 /*
14630 * kern_addr_valid() is (1) for FLATMEM and (0) for
14631 * SPARSEMEM and DISCONTIGMEM
14632diff --git a/arch/x86/include/asm/pgtable_32_types.h b/arch/x86/include/asm/pgtable_32_types.h
14633index ed5903b..c7fe163 100644
14634--- a/arch/x86/include/asm/pgtable_32_types.h
14635+++ b/arch/x86/include/asm/pgtable_32_types.h
14636@@ -8,7 +8,7 @@
14637 */
14638 #ifdef CONFIG_X86_PAE
14639 # include <asm/pgtable-3level_types.h>
14640-# define PMD_SIZE (1UL << PMD_SHIFT)
14641+# define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
14642 # define PMD_MASK (~(PMD_SIZE - 1))
14643 #else
14644 # include <asm/pgtable-2level_types.h>
14645@@ -46,6 +46,19 @@ extern bool __vmalloc_start_set; /* set once high_memory is set */
14646 # define VMALLOC_END (FIXADDR_START - 2 * PAGE_SIZE)
14647 #endif
14648
14649+#ifdef CONFIG_PAX_KERNEXEC
14650+#ifndef __ASSEMBLY__
14651+extern unsigned char MODULES_EXEC_VADDR[];
14652+extern unsigned char MODULES_EXEC_END[];
14653+#endif
14654+#include <asm/boot.h>
14655+#define ktla_ktva(addr) (addr + LOAD_PHYSICAL_ADDR + PAGE_OFFSET)
14656+#define ktva_ktla(addr) (addr - LOAD_PHYSICAL_ADDR - PAGE_OFFSET)
14657+#else
14658+#define ktla_ktva(addr) (addr)
14659+#define ktva_ktla(addr) (addr)
14660+#endif
14661+
14662 #define MODULES_VADDR VMALLOC_START
14663 #define MODULES_END VMALLOC_END
14664 #define MODULES_LEN (MODULES_VADDR - MODULES_END)
14665diff --git a/arch/x86/include/asm/pgtable_64.h b/arch/x86/include/asm/pgtable_64.h
14666index e22c1db..23a625a 100644
14667--- a/arch/x86/include/asm/pgtable_64.h
14668+++ b/arch/x86/include/asm/pgtable_64.h
14669@@ -16,10 +16,14 @@
14670
14671 extern pud_t level3_kernel_pgt[512];
14672 extern pud_t level3_ident_pgt[512];
14673+extern pud_t level3_vmalloc_start_pgt[512];
14674+extern pud_t level3_vmalloc_end_pgt[512];
14675+extern pud_t level3_vmemmap_pgt[512];
14676+extern pud_t level2_vmemmap_pgt[512];
14677 extern pmd_t level2_kernel_pgt[512];
14678 extern pmd_t level2_fixmap_pgt[512];
14679-extern pmd_t level2_ident_pgt[512];
14680-extern pgd_t init_level4_pgt[];
14681+extern pmd_t level2_ident_pgt[512*2];
14682+extern pgd_t init_level4_pgt[512];
14683
14684 #define swapper_pg_dir init_level4_pgt
14685
14686@@ -61,7 +65,9 @@ static inline void native_set_pte_atomic(pte_t *ptep, pte_t pte)
14687
14688 static inline void native_set_pmd(pmd_t *pmdp, pmd_t pmd)
14689 {
14690+ pax_open_kernel();
14691 *pmdp = pmd;
14692+ pax_close_kernel();
14693 }
14694
14695 static inline void native_pmd_clear(pmd_t *pmd)
14696@@ -97,7 +103,9 @@ static inline pmd_t native_pmdp_get_and_clear(pmd_t *xp)
14697
14698 static inline void native_set_pud(pud_t *pudp, pud_t pud)
14699 {
14700+ pax_open_kernel();
14701 *pudp = pud;
14702+ pax_close_kernel();
14703 }
14704
14705 static inline void native_pud_clear(pud_t *pud)
14706@@ -107,6 +115,13 @@ static inline void native_pud_clear(pud_t *pud)
14707
14708 static inline void native_set_pgd(pgd_t *pgdp, pgd_t pgd)
14709 {
14710+ pax_open_kernel();
14711+ *pgdp = pgd;
14712+ pax_close_kernel();
14713+}
14714+
14715+static inline void native_set_pgd_batched(pgd_t *pgdp, pgd_t pgd)
14716+{
14717 *pgdp = pgd;
14718 }
14719
14720diff --git a/arch/x86/include/asm/pgtable_64_types.h b/arch/x86/include/asm/pgtable_64_types.h
14721index 2d88344..4679fc3 100644
14722--- a/arch/x86/include/asm/pgtable_64_types.h
14723+++ b/arch/x86/include/asm/pgtable_64_types.h
14724@@ -61,6 +61,11 @@ typedef struct { pteval_t pte; } pte_t;
14725 #define MODULES_VADDR _AC(0xffffffffa0000000, UL)
14726 #define MODULES_END _AC(0xffffffffff000000, UL)
14727 #define MODULES_LEN (MODULES_END - MODULES_VADDR)
14728+#define MODULES_EXEC_VADDR MODULES_VADDR
14729+#define MODULES_EXEC_END MODULES_END
14730+
14731+#define ktla_ktva(addr) (addr)
14732+#define ktva_ktla(addr) (addr)
14733
14734 #define EARLY_DYNAMIC_PAGE_TABLES 64
14735
14736diff --git a/arch/x86/include/asm/pgtable_types.h b/arch/x86/include/asm/pgtable_types.h
14737index e642300..0ef8f31 100644
14738--- a/arch/x86/include/asm/pgtable_types.h
14739+++ b/arch/x86/include/asm/pgtable_types.h
14740@@ -16,13 +16,12 @@
14741 #define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
14742 #define _PAGE_BIT_PAT 7 /* on 4KB pages */
14743 #define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
14744-#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
14745+#define _PAGE_BIT_SPECIAL 9 /* special mappings, no associated struct page */
14746 #define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
14747 #define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
14748 #define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
14749-#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
14750-#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
14751-#define _PAGE_BIT_SPLITTING _PAGE_BIT_UNUSED1 /* only valid on a PSE pmd */
14752+#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SPECIAL
14753+#define _PAGE_BIT_SPLITTING _PAGE_BIT_SPECIAL /* only valid on a PSE pmd */
14754 #define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
14755
14756 /* If _PAGE_BIT_PRESENT is clear, we use these: */
14757@@ -40,7 +39,6 @@
14758 #define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
14759 #define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
14760 #define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
14761-#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
14762 #define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
14763 #define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
14764 #define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
14765@@ -57,8 +55,10 @@
14766
14767 #if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
14768 #define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
14769-#else
14770+#elif defined(CONFIG_KMEMCHECK)
14771 #define _PAGE_NX (_AT(pteval_t, 0))
14772+#else
14773+#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
14774 #endif
14775
14776 #define _PAGE_FILE (_AT(pteval_t, 1) << _PAGE_BIT_FILE)
14777@@ -116,6 +116,9 @@
14778 #define PAGE_READONLY_EXEC __pgprot(_PAGE_PRESENT | _PAGE_USER | \
14779 _PAGE_ACCESSED)
14780
14781+#define PAGE_READONLY_NOEXEC PAGE_READONLY
14782+#define PAGE_SHARED_NOEXEC PAGE_SHARED
14783+
14784 #define __PAGE_KERNEL_EXEC \
14785 (_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_GLOBAL)
14786 #define __PAGE_KERNEL (__PAGE_KERNEL_EXEC | _PAGE_NX)
14787@@ -126,7 +129,7 @@
14788 #define __PAGE_KERNEL_WC (__PAGE_KERNEL | _PAGE_CACHE_WC)
14789 #define __PAGE_KERNEL_NOCACHE (__PAGE_KERNEL | _PAGE_PCD | _PAGE_PWT)
14790 #define __PAGE_KERNEL_UC_MINUS (__PAGE_KERNEL | _PAGE_PCD)
14791-#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RX | _PAGE_USER)
14792+#define __PAGE_KERNEL_VSYSCALL (__PAGE_KERNEL_RO | _PAGE_USER)
14793 #define __PAGE_KERNEL_VVAR (__PAGE_KERNEL_RO | _PAGE_USER)
14794 #define __PAGE_KERNEL_VVAR_NOCACHE (__PAGE_KERNEL_VVAR | _PAGE_PCD | _PAGE_PWT)
14795 #define __PAGE_KERNEL_LARGE (__PAGE_KERNEL | _PAGE_PSE)
14796@@ -188,8 +191,8 @@
14797 * bits are combined, this will alow user to access the high address mapped
14798 * VDSO in the presence of CONFIG_COMPAT_VDSO
14799 */
14800-#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
14801-#define PDE_IDENT_ATTR 0x067 /* PRESENT+RW+USER+DIRTY+ACCESSED */
14802+#define PTE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14803+#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
14804 #define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
14805 #endif
14806
14807@@ -227,7 +230,17 @@ static inline pgdval_t pgd_flags(pgd_t pgd)
14808 {
14809 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
14810 }
14811+#endif
14812
14813+#if PAGETABLE_LEVELS == 3
14814+#include <asm-generic/pgtable-nopud.h>
14815+#endif
14816+
14817+#if PAGETABLE_LEVELS == 2
14818+#include <asm-generic/pgtable-nopmd.h>
14819+#endif
14820+
14821+#ifndef __ASSEMBLY__
14822 #if PAGETABLE_LEVELS > 3
14823 typedef struct { pudval_t pud; } pud_t;
14824
14825@@ -241,8 +254,6 @@ static inline pudval_t native_pud_val(pud_t pud)
14826 return pud.pud;
14827 }
14828 #else
14829-#include <asm-generic/pgtable-nopud.h>
14830-
14831 static inline pudval_t native_pud_val(pud_t pud)
14832 {
14833 return native_pgd_val(pud.pgd);
14834@@ -262,8 +273,6 @@ static inline pmdval_t native_pmd_val(pmd_t pmd)
14835 return pmd.pmd;
14836 }
14837 #else
14838-#include <asm-generic/pgtable-nopmd.h>
14839-
14840 static inline pmdval_t native_pmd_val(pmd_t pmd)
14841 {
14842 return native_pgd_val(pmd.pud.pgd);
14843@@ -303,7 +312,6 @@ typedef struct page *pgtable_t;
14844
14845 extern pteval_t __supported_pte_mask;
14846 extern void set_nx(void);
14847-extern int nx_enabled;
14848
14849 #define pgprot_writecombine pgprot_writecombine
14850 extern pgprot_t pgprot_writecombine(pgprot_t prot);
14851diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
14852index 22224b3..4080dab 100644
14853--- a/arch/x86/include/asm/processor.h
14854+++ b/arch/x86/include/asm/processor.h
14855@@ -282,7 +282,7 @@ struct tss_struct {
14856
14857 } ____cacheline_aligned;
14858
14859-DECLARE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss);
14860+extern struct tss_struct init_tss[NR_CPUS];
14861
14862 /*
14863 * Save the original ist values for checking stack pointers during debugging
14864@@ -823,11 +823,18 @@ static inline void spin_lock_prefetch(const void *x)
14865 */
14866 #define TASK_SIZE PAGE_OFFSET
14867 #define TASK_SIZE_MAX TASK_SIZE
14868+
14869+#ifdef CONFIG_PAX_SEGMEXEC
14870+#define SEGMEXEC_TASK_SIZE (TASK_SIZE / 2)
14871+#define STACK_TOP ((current->mm->pax_flags & MF_PAX_SEGMEXEC)?SEGMEXEC_TASK_SIZE:TASK_SIZE)
14872+#else
14873 #define STACK_TOP TASK_SIZE
14874-#define STACK_TOP_MAX STACK_TOP
14875+#endif
14876+
14877+#define STACK_TOP_MAX TASK_SIZE
14878
14879 #define INIT_THREAD { \
14880- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14881+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14882 .vm86_info = NULL, \
14883 .sysenter_cs = __KERNEL_CS, \
14884 .io_bitmap_ptr = NULL, \
14885@@ -841,7 +848,7 @@ static inline void spin_lock_prefetch(const void *x)
14886 */
14887 #define INIT_TSS { \
14888 .x86_tss = { \
14889- .sp0 = sizeof(init_stack) + (long)&init_stack, \
14890+ .sp0 = sizeof(init_stack) + (long)&init_stack - 8, \
14891 .ss0 = __KERNEL_DS, \
14892 .ss1 = __KERNEL_CS, \
14893 .io_bitmap_base = INVALID_IO_BITMAP_OFFSET, \
14894@@ -852,11 +859,7 @@ static inline void spin_lock_prefetch(const void *x)
14895 extern unsigned long thread_saved_pc(struct task_struct *tsk);
14896
14897 #define THREAD_SIZE_LONGS (THREAD_SIZE/sizeof(unsigned long))
14898-#define KSTK_TOP(info) \
14899-({ \
14900- unsigned long *__ptr = (unsigned long *)(info); \
14901- (unsigned long)(&__ptr[THREAD_SIZE_LONGS]); \
14902-})
14903+#define KSTK_TOP(info) ((container_of(info, struct task_struct, tinfo))->thread.sp0)
14904
14905 /*
14906 * The below -8 is to reserve 8 bytes on top of the ring0 stack.
14907@@ -871,7 +874,7 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14908 #define task_pt_regs(task) \
14909 ({ \
14910 struct pt_regs *__regs__; \
14911- __regs__ = (struct pt_regs *)(KSTK_TOP(task_stack_page(task))-8); \
14912+ __regs__ = (struct pt_regs *)((task)->thread.sp0); \
14913 __regs__ - 1; \
14914 })
14915
14916@@ -881,13 +884,13 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14917 /*
14918 * User space process size. 47bits minus one guard page.
14919 */
14920-#define TASK_SIZE_MAX ((1UL << 47) - PAGE_SIZE)
14921+#define TASK_SIZE_MAX ((1UL << TASK_SIZE_MAX_SHIFT) - PAGE_SIZE)
14922
14923 /* This decides where the kernel will search for a free chunk of vm
14924 * space during mmap's.
14925 */
14926 #define IA32_PAGE_OFFSET ((current->personality & ADDR_LIMIT_3GB) ? \
14927- 0xc0000000 : 0xFFFFe000)
14928+ 0xc0000000 : 0xFFFFf000)
14929
14930 #define TASK_SIZE (test_thread_flag(TIF_ADDR32) ? \
14931 IA32_PAGE_OFFSET : TASK_SIZE_MAX)
14932@@ -898,11 +901,11 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
14933 #define STACK_TOP_MAX TASK_SIZE_MAX
14934
14935 #define INIT_THREAD { \
14936- .sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14937+ .sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14938 }
14939
14940 #define INIT_TSS { \
14941- .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) \
14942+ .x86_tss.sp0 = (unsigned long)&init_stack + sizeof(init_stack) - 16 \
14943 }
14944
14945 /*
14946@@ -930,6 +933,10 @@ extern void start_thread(struct pt_regs *regs, unsigned long new_ip,
14947 */
14948 #define TASK_UNMAPPED_BASE (PAGE_ALIGN(TASK_SIZE / 3))
14949
14950+#ifdef CONFIG_PAX_SEGMEXEC
14951+#define SEGMEXEC_TASK_UNMAPPED_BASE (PAGE_ALIGN(SEGMEXEC_TASK_SIZE / 3))
14952+#endif
14953+
14954 #define KSTK_EIP(task) (task_pt_regs(task)->ip)
14955
14956 /* Get/set a process' ability to use the timestamp counter instruction */
14957@@ -970,7 +977,7 @@ unsigned long calc_aperfmperf_ratio(struct aperfmperf *old,
14958 return ratio;
14959 }
14960
14961-extern unsigned long arch_align_stack(unsigned long sp);
14962+#define arch_align_stack(x) ((x) & ~0xfUL)
14963 extern void free_init_pages(char *what, unsigned long begin, unsigned long end);
14964
14965 void default_idle(void);
14966@@ -980,6 +987,6 @@ bool xen_set_default_idle(void);
14967 #define xen_set_default_idle 0
14968 #endif
14969
14970-void stop_this_cpu(void *dummy);
14971+void stop_this_cpu(void *dummy) __noreturn;
14972
14973 #endif /* _ASM_X86_PROCESSOR_H */
14974diff --git a/arch/x86/include/asm/ptrace.h b/arch/x86/include/asm/ptrace.h
14975index 942a086..6c26446 100644
14976--- a/arch/x86/include/asm/ptrace.h
14977+++ b/arch/x86/include/asm/ptrace.h
14978@@ -85,28 +85,29 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
14979 }
14980
14981 /*
14982- * user_mode_vm(regs) determines whether a register set came from user mode.
14983+ * user_mode(regs) determines whether a register set came from user mode.
14984 * This is true if V8086 mode was enabled OR if the register set was from
14985 * protected mode with RPL-3 CS value. This tricky test checks that with
14986 * one comparison. Many places in the kernel can bypass this full check
14987- * if they have already ruled out V8086 mode, so user_mode(regs) can be used.
14988+ * if they have already ruled out V8086 mode, so user_mode_novm(regs) can
14989+ * be used.
14990 */
14991-static inline int user_mode(struct pt_regs *regs)
14992+static inline int user_mode_novm(struct pt_regs *regs)
14993 {
14994 #ifdef CONFIG_X86_32
14995 return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
14996 #else
14997- return !!(regs->cs & 3);
14998+ return !!(regs->cs & SEGMENT_RPL_MASK);
14999 #endif
15000 }
15001
15002-static inline int user_mode_vm(struct pt_regs *regs)
15003+static inline int user_mode(struct pt_regs *regs)
15004 {
15005 #ifdef CONFIG_X86_32
15006 return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >=
15007 USER_RPL;
15008 #else
15009- return user_mode(regs);
15010+ return user_mode_novm(regs);
15011 #endif
15012 }
15013
15014@@ -122,15 +123,16 @@ static inline int v8086_mode(struct pt_regs *regs)
15015 #ifdef CONFIG_X86_64
15016 static inline bool user_64bit_mode(struct pt_regs *regs)
15017 {
15018+ unsigned long cs = regs->cs & 0xffff;
15019 #ifndef CONFIG_PARAVIRT
15020 /*
15021 * On non-paravirt systems, this is the only long mode CPL 3
15022 * selector. We do not allow long mode selectors in the LDT.
15023 */
15024- return regs->cs == __USER_CS;
15025+ return cs == __USER_CS;
15026 #else
15027 /* Headers are too twisted for this to go in paravirt.h. */
15028- return regs->cs == __USER_CS || regs->cs == pv_info.extra_user_64bit_cs;
15029+ return cs == __USER_CS || cs == pv_info.extra_user_64bit_cs;
15030 #endif
15031 }
15032
15033@@ -181,9 +183,11 @@ static inline unsigned long regs_get_register(struct pt_regs *regs,
15034 * Traps from the kernel do not save sp and ss.
15035 * Use the helper function to retrieve sp.
15036 */
15037- if (offset == offsetof(struct pt_regs, sp) &&
15038- regs->cs == __KERNEL_CS)
15039- return kernel_stack_pointer(regs);
15040+ if (offset == offsetof(struct pt_regs, sp)) {
15041+ unsigned long cs = regs->cs & 0xffff;
15042+ if (cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS)
15043+ return kernel_stack_pointer(regs);
15044+ }
15045 #endif
15046 return *(unsigned long *)((unsigned long)regs + offset);
15047 }
15048diff --git a/arch/x86/include/asm/realmode.h b/arch/x86/include/asm/realmode.h
15049index 9c6b890..5305f53 100644
15050--- a/arch/x86/include/asm/realmode.h
15051+++ b/arch/x86/include/asm/realmode.h
15052@@ -22,16 +22,14 @@ struct real_mode_header {
15053 #endif
15054 /* APM/BIOS reboot */
15055 u32 machine_real_restart_asm;
15056-#ifdef CONFIG_X86_64
15057 u32 machine_real_restart_seg;
15058-#endif
15059 };
15060
15061 /* This must match data at trampoline_32/64.S */
15062 struct trampoline_header {
15063 #ifdef CONFIG_X86_32
15064 u32 start;
15065- u16 gdt_pad;
15066+ u16 boot_cs;
15067 u16 gdt_limit;
15068 u32 gdt_base;
15069 #else
15070diff --git a/arch/x86/include/asm/reboot.h b/arch/x86/include/asm/reboot.h
15071index a82c4f1..ac45053 100644
15072--- a/arch/x86/include/asm/reboot.h
15073+++ b/arch/x86/include/asm/reboot.h
15074@@ -6,13 +6,13 @@
15075 struct pt_regs;
15076
15077 struct machine_ops {
15078- void (*restart)(char *cmd);
15079- void (*halt)(void);
15080- void (*power_off)(void);
15081+ void (* __noreturn restart)(char *cmd);
15082+ void (* __noreturn halt)(void);
15083+ void (* __noreturn power_off)(void);
15084 void (*shutdown)(void);
15085 void (*crash_shutdown)(struct pt_regs *);
15086- void (*emergency_restart)(void);
15087-};
15088+ void (* __noreturn emergency_restart)(void);
15089+} __no_const;
15090
15091 extern struct machine_ops machine_ops;
15092
15093diff --git a/arch/x86/include/asm/rwsem.h b/arch/x86/include/asm/rwsem.h
15094index cad82c9..2e5c5c1 100644
15095--- a/arch/x86/include/asm/rwsem.h
15096+++ b/arch/x86/include/asm/rwsem.h
15097@@ -64,6 +64,14 @@ static inline void __down_read(struct rw_semaphore *sem)
15098 {
15099 asm volatile("# beginning down_read\n\t"
15100 LOCK_PREFIX _ASM_INC "(%1)\n\t"
15101+
15102+#ifdef CONFIG_PAX_REFCOUNT
15103+ "jno 0f\n"
15104+ LOCK_PREFIX _ASM_DEC "(%1)\n"
15105+ "int $4\n0:\n"
15106+ _ASM_EXTABLE(0b, 0b)
15107+#endif
15108+
15109 /* adds 0x00000001 */
15110 " jns 1f\n"
15111 " call call_rwsem_down_read_failed\n"
15112@@ -85,6 +93,14 @@ static inline int __down_read_trylock(struct rw_semaphore *sem)
15113 "1:\n\t"
15114 " mov %1,%2\n\t"
15115 " add %3,%2\n\t"
15116+
15117+#ifdef CONFIG_PAX_REFCOUNT
15118+ "jno 0f\n"
15119+ "sub %3,%2\n"
15120+ "int $4\n0:\n"
15121+ _ASM_EXTABLE(0b, 0b)
15122+#endif
15123+
15124 " jle 2f\n\t"
15125 LOCK_PREFIX " cmpxchg %2,%0\n\t"
15126 " jnz 1b\n\t"
15127@@ -104,6 +120,14 @@ static inline void __down_write_nested(struct rw_semaphore *sem, int subclass)
15128 long tmp;
15129 asm volatile("# beginning down_write\n\t"
15130 LOCK_PREFIX " xadd %1,(%2)\n\t"
15131+
15132+#ifdef CONFIG_PAX_REFCOUNT
15133+ "jno 0f\n"
15134+ "mov %1,(%2)\n"
15135+ "int $4\n0:\n"
15136+ _ASM_EXTABLE(0b, 0b)
15137+#endif
15138+
15139 /* adds 0xffff0001, returns the old value */
15140 " test " __ASM_SEL(%w1,%k1) "," __ASM_SEL(%w1,%k1) "\n\t"
15141 /* was the active mask 0 before? */
15142@@ -155,6 +179,14 @@ static inline void __up_read(struct rw_semaphore *sem)
15143 long tmp;
15144 asm volatile("# beginning __up_read\n\t"
15145 LOCK_PREFIX " xadd %1,(%2)\n\t"
15146+
15147+#ifdef CONFIG_PAX_REFCOUNT
15148+ "jno 0f\n"
15149+ "mov %1,(%2)\n"
15150+ "int $4\n0:\n"
15151+ _ASM_EXTABLE(0b, 0b)
15152+#endif
15153+
15154 /* subtracts 1, returns the old value */
15155 " jns 1f\n\t"
15156 " call call_rwsem_wake\n" /* expects old value in %edx */
15157@@ -173,6 +205,14 @@ static inline void __up_write(struct rw_semaphore *sem)
15158 long tmp;
15159 asm volatile("# beginning __up_write\n\t"
15160 LOCK_PREFIX " xadd %1,(%2)\n\t"
15161+
15162+#ifdef CONFIG_PAX_REFCOUNT
15163+ "jno 0f\n"
15164+ "mov %1,(%2)\n"
15165+ "int $4\n0:\n"
15166+ _ASM_EXTABLE(0b, 0b)
15167+#endif
15168+
15169 /* subtracts 0xffff0001, returns the old value */
15170 " jns 1f\n\t"
15171 " call call_rwsem_wake\n" /* expects old value in %edx */
15172@@ -190,6 +230,14 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15173 {
15174 asm volatile("# beginning __downgrade_write\n\t"
15175 LOCK_PREFIX _ASM_ADD "%2,(%1)\n\t"
15176+
15177+#ifdef CONFIG_PAX_REFCOUNT
15178+ "jno 0f\n"
15179+ LOCK_PREFIX _ASM_SUB "%2,(%1)\n"
15180+ "int $4\n0:\n"
15181+ _ASM_EXTABLE(0b, 0b)
15182+#endif
15183+
15184 /*
15185 * transitions 0xZZZZ0001 -> 0xYYYY0001 (i386)
15186 * 0xZZZZZZZZ00000001 -> 0xYYYYYYYY00000001 (x86_64)
15187@@ -208,7 +256,15 @@ static inline void __downgrade_write(struct rw_semaphore *sem)
15188 */
15189 static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15190 {
15191- asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0"
15192+ asm volatile(LOCK_PREFIX _ASM_ADD "%1,%0\n"
15193+
15194+#ifdef CONFIG_PAX_REFCOUNT
15195+ "jno 0f\n"
15196+ LOCK_PREFIX _ASM_SUB "%1,%0\n"
15197+ "int $4\n0:\n"
15198+ _ASM_EXTABLE(0b, 0b)
15199+#endif
15200+
15201 : "+m" (sem->count)
15202 : "er" (delta));
15203 }
15204@@ -218,7 +274,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
15205 */
15206 static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
15207 {
15208- return delta + xadd(&sem->count, delta);
15209+ return delta + xadd_check_overflow(&sem->count, delta);
15210 }
15211
15212 #endif /* __KERNEL__ */
15213diff --git a/arch/x86/include/asm/segment.h b/arch/x86/include/asm/segment.h
15214index c48a950..c6d7468 100644
15215--- a/arch/x86/include/asm/segment.h
15216+++ b/arch/x86/include/asm/segment.h
15217@@ -64,10 +64,15 @@
15218 * 26 - ESPFIX small SS
15219 * 27 - per-cpu [ offset to per-cpu data area ]
15220 * 28 - stack_canary-20 [ for stack protector ]
15221- * 29 - unused
15222- * 30 - unused
15223+ * 29 - PCI BIOS CS
15224+ * 30 - PCI BIOS DS
15225 * 31 - TSS for double fault handler
15226 */
15227+#define GDT_ENTRY_KERNEXEC_EFI_CS (1)
15228+#define GDT_ENTRY_KERNEXEC_EFI_DS (2)
15229+#define __KERNEXEC_EFI_CS (GDT_ENTRY_KERNEXEC_EFI_CS*8)
15230+#define __KERNEXEC_EFI_DS (GDT_ENTRY_KERNEXEC_EFI_DS*8)
15231+
15232 #define GDT_ENTRY_TLS_MIN 6
15233 #define GDT_ENTRY_TLS_MAX (GDT_ENTRY_TLS_MIN + GDT_ENTRY_TLS_ENTRIES - 1)
15234
15235@@ -79,6 +84,8 @@
15236
15237 #define GDT_ENTRY_KERNEL_CS (GDT_ENTRY_KERNEL_BASE+0)
15238
15239+#define GDT_ENTRY_KERNEXEC_KERNEL_CS (4)
15240+
15241 #define GDT_ENTRY_KERNEL_DS (GDT_ENTRY_KERNEL_BASE+1)
15242
15243 #define GDT_ENTRY_TSS (GDT_ENTRY_KERNEL_BASE+4)
15244@@ -104,6 +111,12 @@
15245 #define __KERNEL_STACK_CANARY 0
15246 #endif
15247
15248+#define GDT_ENTRY_PCIBIOS_CS (GDT_ENTRY_KERNEL_BASE+17)
15249+#define __PCIBIOS_CS (GDT_ENTRY_PCIBIOS_CS * 8)
15250+
15251+#define GDT_ENTRY_PCIBIOS_DS (GDT_ENTRY_KERNEL_BASE+18)
15252+#define __PCIBIOS_DS (GDT_ENTRY_PCIBIOS_DS * 8)
15253+
15254 #define GDT_ENTRY_DOUBLEFAULT_TSS 31
15255
15256 /*
15257@@ -141,7 +154,7 @@
15258 */
15259
15260 /* Matches PNP_CS32 and PNP_CS16 (they must be consecutive) */
15261-#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xf4) == GDT_ENTRY_PNPBIOS_BASE * 8)
15262+#define SEGMENT_IS_PNP_CODE(x) (((x) & 0xFFFCU) == PNP_CS32 || ((x) & 0xFFFCU) == PNP_CS16)
15263
15264
15265 #else
15266@@ -165,6 +178,8 @@
15267 #define __USER32_CS (GDT_ENTRY_DEFAULT_USER32_CS*8+3)
15268 #define __USER32_DS __USER_DS
15269
15270+#define GDT_ENTRY_KERNEXEC_KERNEL_CS 7
15271+
15272 #define GDT_ENTRY_TSS 8 /* needs two entries */
15273 #define GDT_ENTRY_LDT 10 /* needs two entries */
15274 #define GDT_ENTRY_TLS_MIN 12
15275@@ -185,6 +200,7 @@
15276 #endif
15277
15278 #define __KERNEL_CS (GDT_ENTRY_KERNEL_CS*8)
15279+#define __KERNEXEC_KERNEL_CS (GDT_ENTRY_KERNEXEC_KERNEL_CS*8)
15280 #define __KERNEL_DS (GDT_ENTRY_KERNEL_DS*8)
15281 #define __USER_DS (GDT_ENTRY_DEFAULT_USER_DS*8+3)
15282 #define __USER_CS (GDT_ENTRY_DEFAULT_USER_CS*8+3)
15283@@ -265,7 +281,7 @@ static inline unsigned long get_limit(unsigned long segment)
15284 {
15285 unsigned long __limit;
15286 asm("lsll %1,%0" : "=r" (__limit) : "r" (segment));
15287- return __limit + 1;
15288+ return __limit;
15289 }
15290
15291 #endif /* !__ASSEMBLY__ */
15292diff --git a/arch/x86/include/asm/smp.h b/arch/x86/include/asm/smp.h
15293index b073aae..39f9bdd 100644
15294--- a/arch/x86/include/asm/smp.h
15295+++ b/arch/x86/include/asm/smp.h
15296@@ -36,7 +36,7 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
15297 /* cpus sharing the last level cache: */
15298 DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
15299 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
15300-DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
15301+DECLARE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
15302
15303 static inline struct cpumask *cpu_sibling_mask(int cpu)
15304 {
15305@@ -79,7 +79,7 @@ struct smp_ops {
15306
15307 void (*send_call_func_ipi)(const struct cpumask *mask);
15308 void (*send_call_func_single_ipi)(int cpu);
15309-};
15310+} __no_const;
15311
15312 /* Globals due to paravirt */
15313 extern void set_cpu_sibling_map(int cpu);
15314@@ -191,14 +191,8 @@ extern unsigned disabled_cpus __cpuinitdata;
15315 extern int safe_smp_processor_id(void);
15316
15317 #elif defined(CONFIG_X86_64_SMP)
15318-#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15319-
15320-#define stack_smp_processor_id() \
15321-({ \
15322- struct thread_info *ti; \
15323- __asm__("andq %%rsp,%0; ":"=r" (ti) : "0" (CURRENT_MASK)); \
15324- ti->cpu; \
15325-})
15326+#define raw_smp_processor_id() (this_cpu_read(cpu_number))
15327+#define stack_smp_processor_id() raw_smp_processor_id()
15328 #define safe_smp_processor_id() smp_processor_id()
15329
15330 #endif
15331diff --git a/arch/x86/include/asm/spinlock.h b/arch/x86/include/asm/spinlock.h
15332index 33692ea..350a534 100644
15333--- a/arch/x86/include/asm/spinlock.h
15334+++ b/arch/x86/include/asm/spinlock.h
15335@@ -172,6 +172,14 @@ static inline int arch_write_can_lock(arch_rwlock_t *lock)
15336 static inline void arch_read_lock(arch_rwlock_t *rw)
15337 {
15338 asm volatile(LOCK_PREFIX READ_LOCK_SIZE(dec) " (%0)\n\t"
15339+
15340+#ifdef CONFIG_PAX_REFCOUNT
15341+ "jno 0f\n"
15342+ LOCK_PREFIX READ_LOCK_SIZE(inc) " (%0)\n"
15343+ "int $4\n0:\n"
15344+ _ASM_EXTABLE(0b, 0b)
15345+#endif
15346+
15347 "jns 1f\n"
15348 "call __read_lock_failed\n\t"
15349 "1:\n"
15350@@ -181,6 +189,14 @@ static inline void arch_read_lock(arch_rwlock_t *rw)
15351 static inline void arch_write_lock(arch_rwlock_t *rw)
15352 {
15353 asm volatile(LOCK_PREFIX WRITE_LOCK_SUB(%1) "(%0)\n\t"
15354+
15355+#ifdef CONFIG_PAX_REFCOUNT
15356+ "jno 0f\n"
15357+ LOCK_PREFIX WRITE_LOCK_ADD(%1) "(%0)\n"
15358+ "int $4\n0:\n"
15359+ _ASM_EXTABLE(0b, 0b)
15360+#endif
15361+
15362 "jz 1f\n"
15363 "call __write_lock_failed\n\t"
15364 "1:\n"
15365@@ -210,13 +226,29 @@ static inline int arch_write_trylock(arch_rwlock_t *lock)
15366
15367 static inline void arch_read_unlock(arch_rwlock_t *rw)
15368 {
15369- asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0"
15370+ asm volatile(LOCK_PREFIX READ_LOCK_SIZE(inc) " %0\n"
15371+
15372+#ifdef CONFIG_PAX_REFCOUNT
15373+ "jno 0f\n"
15374+ LOCK_PREFIX READ_LOCK_SIZE(dec) " %0\n"
15375+ "int $4\n0:\n"
15376+ _ASM_EXTABLE(0b, 0b)
15377+#endif
15378+
15379 :"+m" (rw->lock) : : "memory");
15380 }
15381
15382 static inline void arch_write_unlock(arch_rwlock_t *rw)
15383 {
15384- asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0"
15385+ asm volatile(LOCK_PREFIX WRITE_LOCK_ADD(%1) "%0\n"
15386+
15387+#ifdef CONFIG_PAX_REFCOUNT
15388+ "jno 0f\n"
15389+ LOCK_PREFIX WRITE_LOCK_SUB(%1) "%0\n"
15390+ "int $4\n0:\n"
15391+ _ASM_EXTABLE(0b, 0b)
15392+#endif
15393+
15394 : "+m" (rw->write) : "i" (RW_LOCK_BIAS) : "memory");
15395 }
15396
15397diff --git a/arch/x86/include/asm/stackprotector.h b/arch/x86/include/asm/stackprotector.h
15398index 6a99859..03cb807 100644
15399--- a/arch/x86/include/asm/stackprotector.h
15400+++ b/arch/x86/include/asm/stackprotector.h
15401@@ -47,7 +47,7 @@
15402 * head_32 for boot CPU and setup_per_cpu_areas() for others.
15403 */
15404 #define GDT_STACK_CANARY_INIT \
15405- [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x18),
15406+ [GDT_ENTRY_STACK_CANARY] = GDT_ENTRY_INIT(0x4090, 0, 0x17),
15407
15408 /*
15409 * Initialize the stackprotector canary value.
15410@@ -112,7 +112,7 @@ static inline void setup_stack_canary_segment(int cpu)
15411
15412 static inline void load_stack_canary_segment(void)
15413 {
15414-#ifdef CONFIG_X86_32
15415+#if defined(CONFIG_X86_32) && !defined(CONFIG_PAX_MEMORY_UDEREF)
15416 asm volatile ("mov %0, %%gs" : : "r" (0));
15417 #endif
15418 }
15419diff --git a/arch/x86/include/asm/stacktrace.h b/arch/x86/include/asm/stacktrace.h
15420index 70bbe39..4ae2bd4 100644
15421--- a/arch/x86/include/asm/stacktrace.h
15422+++ b/arch/x86/include/asm/stacktrace.h
15423@@ -11,28 +11,20 @@
15424
15425 extern int kstack_depth_to_print;
15426
15427-struct thread_info;
15428+struct task_struct;
15429 struct stacktrace_ops;
15430
15431-typedef unsigned long (*walk_stack_t)(struct thread_info *tinfo,
15432- unsigned long *stack,
15433- unsigned long bp,
15434- const struct stacktrace_ops *ops,
15435- void *data,
15436- unsigned long *end,
15437- int *graph);
15438+typedef unsigned long walk_stack_t(struct task_struct *task,
15439+ void *stack_start,
15440+ unsigned long *stack,
15441+ unsigned long bp,
15442+ const struct stacktrace_ops *ops,
15443+ void *data,
15444+ unsigned long *end,
15445+ int *graph);
15446
15447-extern unsigned long
15448-print_context_stack(struct thread_info *tinfo,
15449- unsigned long *stack, unsigned long bp,
15450- const struct stacktrace_ops *ops, void *data,
15451- unsigned long *end, int *graph);
15452-
15453-extern unsigned long
15454-print_context_stack_bp(struct thread_info *tinfo,
15455- unsigned long *stack, unsigned long bp,
15456- const struct stacktrace_ops *ops, void *data,
15457- unsigned long *end, int *graph);
15458+extern walk_stack_t print_context_stack;
15459+extern walk_stack_t print_context_stack_bp;
15460
15461 /* Generic stack tracer with callbacks */
15462
15463@@ -40,7 +32,7 @@ struct stacktrace_ops {
15464 void (*address)(void *data, unsigned long address, int reliable);
15465 /* On negative return stop dumping */
15466 int (*stack)(void *data, char *name);
15467- walk_stack_t walk_stack;
15468+ walk_stack_t *walk_stack;
15469 };
15470
15471 void dump_trace(struct task_struct *tsk, struct pt_regs *regs,
15472diff --git a/arch/x86/include/asm/switch_to.h b/arch/x86/include/asm/switch_to.h
15473index 4ec45b3..a4f0a8a 100644
15474--- a/arch/x86/include/asm/switch_to.h
15475+++ b/arch/x86/include/asm/switch_to.h
15476@@ -108,7 +108,7 @@ do { \
15477 "call __switch_to\n\t" \
15478 "movq "__percpu_arg([current_task])",%%rsi\n\t" \
15479 __switch_canary \
15480- "movq %P[thread_info](%%rsi),%%r8\n\t" \
15481+ "movq "__percpu_arg([thread_info])",%%r8\n\t" \
15482 "movq %%rax,%%rdi\n\t" \
15483 "testl %[_tif_fork],%P[ti_flags](%%r8)\n\t" \
15484 "jnz ret_from_fork\n\t" \
15485@@ -119,7 +119,7 @@ do { \
15486 [threadrsp] "i" (offsetof(struct task_struct, thread.sp)), \
15487 [ti_flags] "i" (offsetof(struct thread_info, flags)), \
15488 [_tif_fork] "i" (_TIF_FORK), \
15489- [thread_info] "i" (offsetof(struct task_struct, stack)), \
15490+ [thread_info] "m" (current_tinfo), \
15491 [current_task] "m" (current_task) \
15492 __switch_canary_iparam \
15493 : "memory", "cc" __EXTRA_CLOBBER)
15494diff --git a/arch/x86/include/asm/thread_info.h b/arch/x86/include/asm/thread_info.h
15495index a1df6e8..e002940 100644
15496--- a/arch/x86/include/asm/thread_info.h
15497+++ b/arch/x86/include/asm/thread_info.h
15498@@ -10,6 +10,7 @@
15499 #include <linux/compiler.h>
15500 #include <asm/page.h>
15501 #include <asm/types.h>
15502+#include <asm/percpu.h>
15503
15504 /*
15505 * low level task data that entry.S needs immediate access to
15506@@ -23,7 +24,6 @@ struct exec_domain;
15507 #include <linux/atomic.h>
15508
15509 struct thread_info {
15510- struct task_struct *task; /* main task structure */
15511 struct exec_domain *exec_domain; /* execution domain */
15512 __u32 flags; /* low level flags */
15513 __u32 status; /* thread synchronous flags */
15514@@ -33,19 +33,13 @@ struct thread_info {
15515 mm_segment_t addr_limit;
15516 struct restart_block restart_block;
15517 void __user *sysenter_return;
15518-#ifdef CONFIG_X86_32
15519- unsigned long previous_esp; /* ESP of the previous stack in
15520- case of nested (IRQ) stacks
15521- */
15522- __u8 supervisor_stack[0];
15523-#endif
15524+ unsigned long lowest_stack;
15525 unsigned int sig_on_uaccess_error:1;
15526 unsigned int uaccess_err:1; /* uaccess failed */
15527 };
15528
15529-#define INIT_THREAD_INFO(tsk) \
15530+#define INIT_THREAD_INFO \
15531 { \
15532- .task = &tsk, \
15533 .exec_domain = &default_exec_domain, \
15534 .flags = 0, \
15535 .cpu = 0, \
15536@@ -56,7 +50,7 @@ struct thread_info {
15537 }, \
15538 }
15539
15540-#define init_thread_info (init_thread_union.thread_info)
15541+#define init_thread_info (init_thread_union.stack)
15542 #define init_stack (init_thread_union.stack)
15543
15544 #else /* !__ASSEMBLY__ */
15545@@ -97,6 +91,7 @@ struct thread_info {
15546 #define TIF_SYSCALL_TRACEPOINT 28 /* syscall tracepoint instrumentation */
15547 #define TIF_ADDR32 29 /* 32-bit address space on 64 bits */
15548 #define TIF_X32 30 /* 32-bit native x86-64 binary */
15549+#define TIF_GRSEC_SETXID 31 /* update credentials on syscall entry/exit */
15550
15551 #define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
15552 #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
15553@@ -121,17 +116,18 @@ struct thread_info {
15554 #define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
15555 #define _TIF_ADDR32 (1 << TIF_ADDR32)
15556 #define _TIF_X32 (1 << TIF_X32)
15557+#define _TIF_GRSEC_SETXID (1 << TIF_GRSEC_SETXID)
15558
15559 /* work to do in syscall_trace_enter() */
15560 #define _TIF_WORK_SYSCALL_ENTRY \
15561 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_EMU | _TIF_SYSCALL_AUDIT | \
15562 _TIF_SECCOMP | _TIF_SINGLESTEP | _TIF_SYSCALL_TRACEPOINT | \
15563- _TIF_NOHZ)
15564+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15565
15566 /* work to do in syscall_trace_leave() */
15567 #define _TIF_WORK_SYSCALL_EXIT \
15568 (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | _TIF_SINGLESTEP | \
15569- _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ)
15570+ _TIF_SYSCALL_TRACEPOINT | _TIF_NOHZ | _TIF_GRSEC_SETXID)
15571
15572 /* work to do on interrupt/exception return */
15573 #define _TIF_WORK_MASK \
15574@@ -142,7 +138,7 @@ struct thread_info {
15575 /* work to do on any return to user space */
15576 #define _TIF_ALLWORK_MASK \
15577 ((0x0000FFFF & ~_TIF_SECCOMP) | _TIF_SYSCALL_TRACEPOINT | \
15578- _TIF_NOHZ)
15579+ _TIF_NOHZ | _TIF_GRSEC_SETXID)
15580
15581 /* Only used for 64 bit */
15582 #define _TIF_DO_NOTIFY_MASK \
15583@@ -158,45 +154,40 @@ struct thread_info {
15584
15585 #define PREEMPT_ACTIVE 0x10000000
15586
15587-#ifdef CONFIG_X86_32
15588-
15589-#define STACK_WARN (THREAD_SIZE/8)
15590-/*
15591- * macros/functions for gaining access to the thread information structure
15592- *
15593- * preempt_count needs to be 1 initially, until the scheduler is functional.
15594- */
15595-#ifndef __ASSEMBLY__
15596-
15597-
15598-/* how to get the current stack pointer from C */
15599-register unsigned long current_stack_pointer asm("esp") __used;
15600-
15601-/* how to get the thread information struct from C */
15602-static inline struct thread_info *current_thread_info(void)
15603-{
15604- return (struct thread_info *)
15605- (current_stack_pointer & ~(THREAD_SIZE - 1));
15606-}
15607-
15608-#else /* !__ASSEMBLY__ */
15609-
15610+#ifdef __ASSEMBLY__
15611 /* how to get the thread information struct from ASM */
15612 #define GET_THREAD_INFO(reg) \
15613- movl $-THREAD_SIZE, reg; \
15614- andl %esp, reg
15615+ mov PER_CPU_VAR(current_tinfo), reg
15616
15617 /* use this one if reg already contains %esp */
15618-#define GET_THREAD_INFO_WITH_ESP(reg) \
15619- andl $-THREAD_SIZE, reg
15620+#define GET_THREAD_INFO_WITH_ESP(reg) GET_THREAD_INFO(reg)
15621+#else
15622+/* how to get the thread information struct from C */
15623+DECLARE_PER_CPU(struct thread_info *, current_tinfo);
15624+
15625+static __always_inline struct thread_info *current_thread_info(void)
15626+{
15627+ return this_cpu_read_stable(current_tinfo);
15628+}
15629+#endif
15630+
15631+#ifdef CONFIG_X86_32
15632+
15633+#define STACK_WARN (THREAD_SIZE/8)
15634+/*
15635+ * macros/functions for gaining access to the thread information structure
15636+ *
15637+ * preempt_count needs to be 1 initially, until the scheduler is functional.
15638+ */
15639+#ifndef __ASSEMBLY__
15640+
15641+/* how to get the current stack pointer from C */
15642+register unsigned long current_stack_pointer asm("esp") __used;
15643
15644 #endif
15645
15646 #else /* X86_32 */
15647
15648-#include <asm/percpu.h>
15649-#define KERNEL_STACK_OFFSET (5*8)
15650-
15651 /*
15652 * macros/functions for gaining access to the thread information structure
15653 * preempt_count needs to be 1 initially, until the scheduler is functional.
15654@@ -204,27 +195,8 @@ static inline struct thread_info *current_thread_info(void)
15655 #ifndef __ASSEMBLY__
15656 DECLARE_PER_CPU(unsigned long, kernel_stack);
15657
15658-static inline struct thread_info *current_thread_info(void)
15659-{
15660- struct thread_info *ti;
15661- ti = (void *)(this_cpu_read_stable(kernel_stack) +
15662- KERNEL_STACK_OFFSET - THREAD_SIZE);
15663- return ti;
15664-}
15665-
15666-#else /* !__ASSEMBLY__ */
15667-
15668-/* how to get the thread information struct from ASM */
15669-#define GET_THREAD_INFO(reg) \
15670- movq PER_CPU_VAR(kernel_stack),reg ; \
15671- subq $(THREAD_SIZE-KERNEL_STACK_OFFSET),reg
15672-
15673-/*
15674- * Same if PER_CPU_VAR(kernel_stack) is, perhaps with some offset, already in
15675- * a certain register (to be used in assembler memory operands).
15676- */
15677-#define THREAD_INFO(reg, off) KERNEL_STACK_OFFSET+(off)-THREAD_SIZE(reg)
15678-
15679+/* how to get the current stack pointer from C */
15680+register unsigned long current_stack_pointer asm("rsp") __used;
15681 #endif
15682
15683 #endif /* !X86_32 */
15684@@ -283,5 +255,12 @@ static inline bool is_ia32_task(void)
15685 extern void arch_task_cache_init(void);
15686 extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
15687 extern void arch_release_task_struct(struct task_struct *tsk);
15688+
15689+#define __HAVE_THREAD_FUNCTIONS
15690+#define task_thread_info(task) (&(task)->tinfo)
15691+#define task_stack_page(task) ((task)->stack)
15692+#define setup_thread_stack(p, org) do {} while (0)
15693+#define end_of_stack(p) ((unsigned long *)task_stack_page(p) + 1)
15694+
15695 #endif
15696 #endif /* _ASM_X86_THREAD_INFO_H */
15697diff --git a/arch/x86/include/asm/uaccess.h b/arch/x86/include/asm/uaccess.h
15698index 5ee2687..70d5895 100644
15699--- a/arch/x86/include/asm/uaccess.h
15700+++ b/arch/x86/include/asm/uaccess.h
15701@@ -7,6 +7,7 @@
15702 #include <linux/compiler.h>
15703 #include <linux/thread_info.h>
15704 #include <linux/string.h>
15705+#include <linux/sched.h>
15706 #include <asm/asm.h>
15707 #include <asm/page.h>
15708 #include <asm/smap.h>
15709@@ -29,7 +30,12 @@
15710
15711 #define get_ds() (KERNEL_DS)
15712 #define get_fs() (current_thread_info()->addr_limit)
15713+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15714+void __set_fs(mm_segment_t x);
15715+void set_fs(mm_segment_t x);
15716+#else
15717 #define set_fs(x) (current_thread_info()->addr_limit = (x))
15718+#endif
15719
15720 #define segment_eq(a, b) ((a).seg == (b).seg)
15721
15722@@ -77,8 +83,33 @@
15723 * checks that the pointer is in the user space range - after calling
15724 * this function, memory access functions may still return -EFAULT.
15725 */
15726-#define access_ok(type, addr, size) \
15727- (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15728+#define __access_ok(type, addr, size) (likely(__range_not_ok(addr, size, user_addr_max()) == 0))
15729+#define access_ok(type, addr, size) \
15730+({ \
15731+ long __size = size; \
15732+ unsigned long __addr = (unsigned long)addr; \
15733+ unsigned long __addr_ao = __addr & PAGE_MASK; \
15734+ unsigned long __end_ao = __addr + __size - 1; \
15735+ bool __ret_ao = __range_not_ok(__addr, __size, user_addr_max()) == 0;\
15736+ if (__ret_ao && unlikely((__end_ao ^ __addr_ao) & PAGE_MASK)) { \
15737+ while(__addr_ao <= __end_ao) { \
15738+ char __c_ao; \
15739+ __addr_ao += PAGE_SIZE; \
15740+ if (__size > PAGE_SIZE) \
15741+ cond_resched(); \
15742+ if (__get_user(__c_ao, (char __user *)__addr)) \
15743+ break; \
15744+ if (type != VERIFY_WRITE) { \
15745+ __addr = __addr_ao; \
15746+ continue; \
15747+ } \
15748+ if (__put_user(__c_ao, (char __user *)__addr)) \
15749+ break; \
15750+ __addr = __addr_ao; \
15751+ } \
15752+ } \
15753+ __ret_ao; \
15754+})
15755
15756 /*
15757 * The exception table consists of pairs of addresses relative to the
15758@@ -176,13 +207,21 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15759 asm volatile("call __put_user_" #size : "=a" (__ret_pu) \
15760 : "0" ((typeof(*(ptr)))(x)), "c" (ptr) : "ebx")
15761
15762-
15763+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
15764+#define __copyuser_seg "gs;"
15765+#define __COPYUSER_SET_ES "pushl %%gs; popl %%es\n"
15766+#define __COPYUSER_RESTORE_ES "pushl %%ss; popl %%es\n"
15767+#else
15768+#define __copyuser_seg
15769+#define __COPYUSER_SET_ES
15770+#define __COPYUSER_RESTORE_ES
15771+#endif
15772
15773 #ifdef CONFIG_X86_32
15774 #define __put_user_asm_u64(x, addr, err, errret) \
15775 asm volatile(ASM_STAC "\n" \
15776- "1: movl %%eax,0(%2)\n" \
15777- "2: movl %%edx,4(%2)\n" \
15778+ "1: "__copyuser_seg"movl %%eax,0(%2)\n" \
15779+ "2: "__copyuser_seg"movl %%edx,4(%2)\n" \
15780 "3: " ASM_CLAC "\n" \
15781 ".section .fixup,\"ax\"\n" \
15782 "4: movl %3,%0\n" \
15783@@ -195,8 +234,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
15784
15785 #define __put_user_asm_ex_u64(x, addr) \
15786 asm volatile(ASM_STAC "\n" \
15787- "1: movl %%eax,0(%1)\n" \
15788- "2: movl %%edx,4(%1)\n" \
15789+ "1: "__copyuser_seg"movl %%eax,0(%1)\n" \
15790+ "2: "__copyuser_seg"movl %%edx,4(%1)\n" \
15791 "3: " ASM_CLAC "\n" \
15792 _ASM_EXTABLE_EX(1b, 2b) \
15793 _ASM_EXTABLE_EX(2b, 3b) \
15794@@ -246,7 +285,7 @@ extern void __put_user_8(void);
15795 __typeof__(*(ptr)) __pu_val; \
15796 __chk_user_ptr(ptr); \
15797 might_fault(); \
15798- __pu_val = x; \
15799+ __pu_val = (x); \
15800 switch (sizeof(*(ptr))) { \
15801 case 1: \
15802 __put_user_x(1, __pu_val, ptr, __ret_pu); \
15803@@ -345,7 +384,7 @@ do { \
15804
15805 #define __get_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15806 asm volatile(ASM_STAC "\n" \
15807- "1: mov"itype" %2,%"rtype"1\n" \
15808+ "1: "__copyuser_seg"mov"itype" %2,%"rtype"1\n"\
15809 "2: " ASM_CLAC "\n" \
15810 ".section .fixup,\"ax\"\n" \
15811 "3: mov %3,%0\n" \
15812@@ -353,7 +392,7 @@ do { \
15813 " jmp 2b\n" \
15814 ".previous\n" \
15815 _ASM_EXTABLE(1b, 3b) \
15816- : "=r" (err), ltype(x) \
15817+ : "=r" (err), ltype (x) \
15818 : "m" (__m(addr)), "i" (errret), "0" (err))
15819
15820 #define __get_user_size_ex(x, ptr, size) \
15821@@ -378,7 +417,7 @@ do { \
15822 } while (0)
15823
15824 #define __get_user_asm_ex(x, addr, itype, rtype, ltype) \
15825- asm volatile("1: mov"itype" %1,%"rtype"0\n" \
15826+ asm volatile("1: "__copyuser_seg"mov"itype" %1,%"rtype"0\n"\
15827 "2:\n" \
15828 _ASM_EXTABLE_EX(1b, 2b) \
15829 : ltype(x) : "m" (__m(addr)))
15830@@ -395,13 +434,24 @@ do { \
15831 int __gu_err; \
15832 unsigned long __gu_val; \
15833 __get_user_size(__gu_val, (ptr), (size), __gu_err, -EFAULT); \
15834- (x) = (__force __typeof__(*(ptr)))__gu_val; \
15835+ (x) = (__typeof__(*(ptr)))__gu_val; \
15836 __gu_err; \
15837 })
15838
15839 /* FIXME: this hack is definitely wrong -AK */
15840 struct __large_struct { unsigned long buf[100]; };
15841-#define __m(x) (*(struct __large_struct __user *)(x))
15842+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15843+#define ____m(x) \
15844+({ \
15845+ unsigned long ____x = (unsigned long)(x); \
15846+ if (____x < pax_user_shadow_base) \
15847+ ____x += pax_user_shadow_base; \
15848+ (typeof(x))____x; \
15849+})
15850+#else
15851+#define ____m(x) (x)
15852+#endif
15853+#define __m(x) (*(struct __large_struct __user *)____m(x))
15854
15855 /*
15856 * Tell gcc we read from memory instead of writing: this is because
15857@@ -410,7 +460,7 @@ struct __large_struct { unsigned long buf[100]; };
15858 */
15859 #define __put_user_asm(x, addr, err, itype, rtype, ltype, errret) \
15860 asm volatile(ASM_STAC "\n" \
15861- "1: mov"itype" %"rtype"1,%2\n" \
15862+ "1: "__copyuser_seg"mov"itype" %"rtype"1,%2\n"\
15863 "2: " ASM_CLAC "\n" \
15864 ".section .fixup,\"ax\"\n" \
15865 "3: mov %3,%0\n" \
15866@@ -418,10 +468,10 @@ struct __large_struct { unsigned long buf[100]; };
15867 ".previous\n" \
15868 _ASM_EXTABLE(1b, 3b) \
15869 : "=r"(err) \
15870- : ltype(x), "m" (__m(addr)), "i" (errret), "0" (err))
15871+ : ltype (x), "m" (__m(addr)), "i" (errret), "0" (err))
15872
15873 #define __put_user_asm_ex(x, addr, itype, rtype, ltype) \
15874- asm volatile("1: mov"itype" %"rtype"0,%1\n" \
15875+ asm volatile("1: "__copyuser_seg"mov"itype" %"rtype"0,%1\n"\
15876 "2:\n" \
15877 _ASM_EXTABLE_EX(1b, 2b) \
15878 : : ltype(x), "m" (__m(addr)))
15879@@ -460,8 +510,12 @@ struct __large_struct { unsigned long buf[100]; };
15880 * On error, the variable @x is set to zero.
15881 */
15882
15883+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15884+#define __get_user(x, ptr) get_user((x), (ptr))
15885+#else
15886 #define __get_user(x, ptr) \
15887 __get_user_nocheck((x), (ptr), sizeof(*(ptr)))
15888+#endif
15889
15890 /**
15891 * __put_user: - Write a simple value into user space, with less checking.
15892@@ -483,8 +537,12 @@ struct __large_struct { unsigned long buf[100]; };
15893 * Returns zero on success, or -EFAULT on error.
15894 */
15895
15896+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
15897+#define __put_user(x, ptr) put_user((x), (ptr))
15898+#else
15899 #define __put_user(x, ptr) \
15900 __put_user_nocheck((__typeof__(*(ptr)))(x), (ptr), sizeof(*(ptr)))
15901+#endif
15902
15903 #define __get_user_unaligned __get_user
15904 #define __put_user_unaligned __put_user
15905@@ -502,7 +560,7 @@ struct __large_struct { unsigned long buf[100]; };
15906 #define get_user_ex(x, ptr) do { \
15907 unsigned long __gue_val; \
15908 __get_user_size_ex((__gue_val), (ptr), (sizeof(*(ptr)))); \
15909- (x) = (__force __typeof__(*(ptr)))__gue_val; \
15910+ (x) = (__typeof__(*(ptr)))__gue_val; \
15911 } while (0)
15912
15913 #define put_user_try uaccess_try
15914@@ -519,8 +577,8 @@ strncpy_from_user(char *dst, const char __user *src, long count);
15915 extern __must_check long strlen_user(const char __user *str);
15916 extern __must_check long strnlen_user(const char __user *str, long n);
15917
15918-unsigned long __must_check clear_user(void __user *mem, unsigned long len);
15919-unsigned long __must_check __clear_user(void __user *mem, unsigned long len);
15920+unsigned long __must_check clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15921+unsigned long __must_check __clear_user(void __user *mem, unsigned long len) __size_overflow(2);
15922
15923 /*
15924 * movsl can be slow when source and dest are not both 8-byte aligned
15925diff --git a/arch/x86/include/asm/uaccess_32.h b/arch/x86/include/asm/uaccess_32.h
15926index 7f760a9..04b1c65 100644
15927--- a/arch/x86/include/asm/uaccess_32.h
15928+++ b/arch/x86/include/asm/uaccess_32.h
15929@@ -11,15 +11,15 @@
15930 #include <asm/page.h>
15931
15932 unsigned long __must_check __copy_to_user_ll
15933- (void __user *to, const void *from, unsigned long n);
15934+ (void __user *to, const void *from, unsigned long n) __size_overflow(3);
15935 unsigned long __must_check __copy_from_user_ll
15936- (void *to, const void __user *from, unsigned long n);
15937+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15938 unsigned long __must_check __copy_from_user_ll_nozero
15939- (void *to, const void __user *from, unsigned long n);
15940+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15941 unsigned long __must_check __copy_from_user_ll_nocache
15942- (void *to, const void __user *from, unsigned long n);
15943+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15944 unsigned long __must_check __copy_from_user_ll_nocache_nozero
15945- (void *to, const void __user *from, unsigned long n);
15946+ (void *to, const void __user *from, unsigned long n) __size_overflow(3);
15947
15948 /**
15949 * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
15950@@ -43,6 +43,11 @@ unsigned long __must_check __copy_from_user_ll_nocache_nozero
15951 static __always_inline unsigned long __must_check
15952 __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
15953 {
15954+ if ((long)n < 0)
15955+ return n;
15956+
15957+ check_object_size(from, n, true);
15958+
15959 if (__builtin_constant_p(n)) {
15960 unsigned long ret;
15961
15962@@ -82,12 +87,16 @@ static __always_inline unsigned long __must_check
15963 __copy_to_user(void __user *to, const void *from, unsigned long n)
15964 {
15965 might_fault();
15966+
15967 return __copy_to_user_inatomic(to, from, n);
15968 }
15969
15970 static __always_inline unsigned long
15971 __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
15972 {
15973+ if ((long)n < 0)
15974+ return n;
15975+
15976 /* Avoid zeroing the tail if the copy fails..
15977 * If 'n' is constant and 1, 2, or 4, we do still zero on a failure,
15978 * but as the zeroing behaviour is only significant when n is not
15979@@ -137,6 +146,12 @@ static __always_inline unsigned long
15980 __copy_from_user(void *to, const void __user *from, unsigned long n)
15981 {
15982 might_fault();
15983+
15984+ if ((long)n < 0)
15985+ return n;
15986+
15987+ check_object_size(to, n, false);
15988+
15989 if (__builtin_constant_p(n)) {
15990 unsigned long ret;
15991
15992@@ -159,6 +174,10 @@ static __always_inline unsigned long __copy_from_user_nocache(void *to,
15993 const void __user *from, unsigned long n)
15994 {
15995 might_fault();
15996+
15997+ if ((long)n < 0)
15998+ return n;
15999+
16000 if (__builtin_constant_p(n)) {
16001 unsigned long ret;
16002
16003@@ -181,15 +200,19 @@ static __always_inline unsigned long
16004 __copy_from_user_inatomic_nocache(void *to, const void __user *from,
16005 unsigned long n)
16006 {
16007- return __copy_from_user_ll_nocache_nozero(to, from, n);
16008+ if ((long)n < 0)
16009+ return n;
16010+
16011+ return __copy_from_user_ll_nocache_nozero(to, from, n);
16012 }
16013
16014-unsigned long __must_check copy_to_user(void __user *to,
16015- const void *from, unsigned long n);
16016-unsigned long __must_check _copy_from_user(void *to,
16017- const void __user *from,
16018- unsigned long n);
16019-
16020+extern void copy_to_user_overflow(void)
16021+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16022+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16023+#else
16024+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16025+#endif
16026+;
16027
16028 extern void copy_from_user_overflow(void)
16029 #ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16030@@ -199,17 +222,60 @@ extern void copy_from_user_overflow(void)
16031 #endif
16032 ;
16033
16034-static inline unsigned long __must_check copy_from_user(void *to,
16035- const void __user *from,
16036- unsigned long n)
16037+/**
16038+ * copy_to_user: - Copy a block of data into user space.
16039+ * @to: Destination address, in user space.
16040+ * @from: Source address, in kernel space.
16041+ * @n: Number of bytes to copy.
16042+ *
16043+ * Context: User context only. This function may sleep.
16044+ *
16045+ * Copy data from kernel space to user space.
16046+ *
16047+ * Returns number of bytes that could not be copied.
16048+ * On success, this will be zero.
16049+ */
16050+static inline unsigned long __must_check
16051+copy_to_user(void __user *to, const void *from, unsigned long n)
16052 {
16053- int sz = __compiletime_object_size(to);
16054+ size_t sz = __compiletime_object_size(from);
16055
16056- if (likely(sz == -1 || sz >= n))
16057- n = _copy_from_user(to, from, n);
16058- else
16059+ if (unlikely(sz != (size_t)-1 && sz < n))
16060+ copy_to_user_overflow();
16061+ else if (access_ok(VERIFY_WRITE, to, n))
16062+ n = __copy_to_user(to, from, n);
16063+ return n;
16064+}
16065+
16066+/**
16067+ * copy_from_user: - Copy a block of data from user space.
16068+ * @to: Destination address, in kernel space.
16069+ * @from: Source address, in user space.
16070+ * @n: Number of bytes to copy.
16071+ *
16072+ * Context: User context only. This function may sleep.
16073+ *
16074+ * Copy data from user space to kernel space.
16075+ *
16076+ * Returns number of bytes that could not be copied.
16077+ * On success, this will be zero.
16078+ *
16079+ * If some data could not be copied, this function will pad the copied
16080+ * data to the requested size using zero bytes.
16081+ */
16082+static inline unsigned long __must_check
16083+copy_from_user(void *to, const void __user *from, unsigned long n)
16084+{
16085+ size_t sz = __compiletime_object_size(to);
16086+
16087+ check_object_size(to, n, false);
16088+
16089+ if (unlikely(sz != (size_t)-1 && sz < n))
16090 copy_from_user_overflow();
16091-
16092+ else if (access_ok(VERIFY_READ, from, n))
16093+ n = __copy_from_user(to, from, n);
16094+ else if ((long)n > 0)
16095+ memset(to, 0, n);
16096 return n;
16097 }
16098
16099diff --git a/arch/x86/include/asm/uaccess_64.h b/arch/x86/include/asm/uaccess_64.h
16100index 142810c..1f2a0a7 100644
16101--- a/arch/x86/include/asm/uaccess_64.h
16102+++ b/arch/x86/include/asm/uaccess_64.h
16103@@ -10,6 +10,9 @@
16104 #include <asm/alternative.h>
16105 #include <asm/cpufeature.h>
16106 #include <asm/page.h>
16107+#include <asm/pgtable.h>
16108+
16109+#define set_fs(x) (current_thread_info()->addr_limit = (x))
16110
16111 /*
16112 * Copy To/From Userspace
16113@@ -17,13 +20,13 @@
16114
16115 /* Handles exceptions in both to and from, but doesn't do access_ok */
16116 __must_check unsigned long
16117-copy_user_enhanced_fast_string(void *to, const void *from, unsigned len);
16118+copy_user_enhanced_fast_string(void *to, const void *from, unsigned len) __size_overflow(3);
16119 __must_check unsigned long
16120-copy_user_generic_string(void *to, const void *from, unsigned len);
16121+copy_user_generic_string(void *to, const void *from, unsigned len) __size_overflow(3);
16122 __must_check unsigned long
16123-copy_user_generic_unrolled(void *to, const void *from, unsigned len);
16124+copy_user_generic_unrolled(void *to, const void *from, unsigned len) __size_overflow(3);
16125
16126-static __always_inline __must_check unsigned long
16127+static __always_inline __must_check __size_overflow(3) unsigned long
16128 copy_user_generic(void *to, const void *from, unsigned len)
16129 {
16130 unsigned ret;
16131@@ -41,142 +44,204 @@ copy_user_generic(void *to, const void *from, unsigned len)
16132 ASM_OUTPUT2("=a" (ret), "=D" (to), "=S" (from),
16133 "=d" (len)),
16134 "1" (to), "2" (from), "3" (len)
16135- : "memory", "rcx", "r8", "r9", "r10", "r11");
16136+ : "memory", "rcx", "r8", "r9", "r11");
16137 return ret;
16138 }
16139
16140+static __always_inline __must_check unsigned long
16141+__copy_to_user(void __user *to, const void *from, unsigned long len);
16142+static __always_inline __must_check unsigned long
16143+__copy_from_user(void *to, const void __user *from, unsigned long len);
16144 __must_check unsigned long
16145-_copy_to_user(void __user *to, const void *from, unsigned len);
16146-__must_check unsigned long
16147-_copy_from_user(void *to, const void __user *from, unsigned len);
16148-__must_check unsigned long
16149-copy_in_user(void __user *to, const void __user *from, unsigned len);
16150+copy_in_user(void __user *to, const void __user *from, unsigned long len);
16151+
16152+extern void copy_to_user_overflow(void)
16153+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16154+ __compiletime_error("copy_to_user() buffer size is not provably correct")
16155+#else
16156+ __compiletime_warning("copy_to_user() buffer size is not provably correct")
16157+#endif
16158+;
16159+
16160+extern void copy_from_user_overflow(void)
16161+#ifdef CONFIG_DEBUG_STRICT_USER_COPY_CHECKS
16162+ __compiletime_error("copy_from_user() buffer size is not provably correct")
16163+#else
16164+ __compiletime_warning("copy_from_user() buffer size is not provably correct")
16165+#endif
16166+;
16167
16168 static inline unsigned long __must_check copy_from_user(void *to,
16169 const void __user *from,
16170 unsigned long n)
16171 {
16172- int sz = __compiletime_object_size(to);
16173-
16174 might_fault();
16175- if (likely(sz == -1 || sz >= n))
16176- n = _copy_from_user(to, from, n);
16177-#ifdef CONFIG_DEBUG_VM
16178- else
16179- WARN(1, "Buffer overflow detected!\n");
16180-#endif
16181+
16182+ check_object_size(to, n, false);
16183+
16184+ if (access_ok(VERIFY_READ, from, n))
16185+ n = __copy_from_user(to, from, n);
16186+ else if (n < INT_MAX)
16187+ memset(to, 0, n);
16188 return n;
16189 }
16190
16191 static __always_inline __must_check
16192-int copy_to_user(void __user *dst, const void *src, unsigned size)
16193+int copy_to_user(void __user *dst, const void *src, unsigned long size)
16194 {
16195 might_fault();
16196
16197- return _copy_to_user(dst, src, size);
16198+ if (access_ok(VERIFY_WRITE, dst, size))
16199+ size = __copy_to_user(dst, src, size);
16200+ return size;
16201 }
16202
16203 static __always_inline __must_check
16204-int __copy_from_user(void *dst, const void __user *src, unsigned size)
16205+unsigned long __copy_from_user(void *dst, const void __user *src, unsigned long size)
16206 {
16207- int ret = 0;
16208+ size_t sz = __compiletime_object_size(dst);
16209+ unsigned ret = 0;
16210
16211 might_fault();
16212+
16213+ if (size > INT_MAX)
16214+ return size;
16215+
16216+ check_object_size(dst, size, false);
16217+
16218+#ifdef CONFIG_PAX_MEMORY_UDEREF
16219+ if (!__access_ok(VERIFY_READ, src, size))
16220+ return size;
16221+#endif
16222+
16223+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16224+ copy_from_user_overflow();
16225+ return size;
16226+ }
16227+
16228 if (!__builtin_constant_p(size))
16229- return copy_user_generic(dst, (__force void *)src, size);
16230+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16231 switch (size) {
16232- case 1:__get_user_asm(*(u8 *)dst, (u8 __user *)src,
16233+ case 1:__get_user_asm(*(u8 *)dst, (const u8 __user *)src,
16234 ret, "b", "b", "=q", 1);
16235 return ret;
16236- case 2:__get_user_asm(*(u16 *)dst, (u16 __user *)src,
16237+ case 2:__get_user_asm(*(u16 *)dst, (const u16 __user *)src,
16238 ret, "w", "w", "=r", 2);
16239 return ret;
16240- case 4:__get_user_asm(*(u32 *)dst, (u32 __user *)src,
16241+ case 4:__get_user_asm(*(u32 *)dst, (const u32 __user *)src,
16242 ret, "l", "k", "=r", 4);
16243 return ret;
16244- case 8:__get_user_asm(*(u64 *)dst, (u64 __user *)src,
16245+ case 8:__get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16246 ret, "q", "", "=r", 8);
16247 return ret;
16248 case 10:
16249- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16250+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16251 ret, "q", "", "=r", 10);
16252 if (unlikely(ret))
16253 return ret;
16254 __get_user_asm(*(u16 *)(8 + (char *)dst),
16255- (u16 __user *)(8 + (char __user *)src),
16256+ (const u16 __user *)(8 + (const char __user *)src),
16257 ret, "w", "w", "=r", 2);
16258 return ret;
16259 case 16:
16260- __get_user_asm(*(u64 *)dst, (u64 __user *)src,
16261+ __get_user_asm(*(u64 *)dst, (const u64 __user *)src,
16262 ret, "q", "", "=r", 16);
16263 if (unlikely(ret))
16264 return ret;
16265 __get_user_asm(*(u64 *)(8 + (char *)dst),
16266- (u64 __user *)(8 + (char __user *)src),
16267+ (const u64 __user *)(8 + (const char __user *)src),
16268 ret, "q", "", "=r", 8);
16269 return ret;
16270 default:
16271- return copy_user_generic(dst, (__force void *)src, size);
16272+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16273 }
16274 }
16275
16276 static __always_inline __must_check
16277-int __copy_to_user(void __user *dst, const void *src, unsigned size)
16278+unsigned long __copy_to_user(void __user *dst, const void *src, unsigned long size)
16279 {
16280- int ret = 0;
16281+ size_t sz = __compiletime_object_size(src);
16282+ unsigned ret = 0;
16283
16284 might_fault();
16285+
16286+ if (size > INT_MAX)
16287+ return size;
16288+
16289+ check_object_size(src, size, true);
16290+
16291+#ifdef CONFIG_PAX_MEMORY_UDEREF
16292+ if (!__access_ok(VERIFY_WRITE, dst, size))
16293+ return size;
16294+#endif
16295+
16296+ if (unlikely(sz != (size_t)-1 && sz < size)) {
16297+ copy_to_user_overflow();
16298+ return size;
16299+ }
16300+
16301 if (!__builtin_constant_p(size))
16302- return copy_user_generic((__force void *)dst, src, size);
16303+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16304 switch (size) {
16305- case 1:__put_user_asm(*(u8 *)src, (u8 __user *)dst,
16306+ case 1:__put_user_asm(*(const u8 *)src, (u8 __user *)dst,
16307 ret, "b", "b", "iq", 1);
16308 return ret;
16309- case 2:__put_user_asm(*(u16 *)src, (u16 __user *)dst,
16310+ case 2:__put_user_asm(*(const u16 *)src, (u16 __user *)dst,
16311 ret, "w", "w", "ir", 2);
16312 return ret;
16313- case 4:__put_user_asm(*(u32 *)src, (u32 __user *)dst,
16314+ case 4:__put_user_asm(*(const u32 *)src, (u32 __user *)dst,
16315 ret, "l", "k", "ir", 4);
16316 return ret;
16317- case 8:__put_user_asm(*(u64 *)src, (u64 __user *)dst,
16318+ case 8:__put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16319 ret, "q", "", "er", 8);
16320 return ret;
16321 case 10:
16322- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16323+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16324 ret, "q", "", "er", 10);
16325 if (unlikely(ret))
16326 return ret;
16327 asm("":::"memory");
16328- __put_user_asm(4[(u16 *)src], 4 + (u16 __user *)dst,
16329+ __put_user_asm(4[(const u16 *)src], 4 + (u16 __user *)dst,
16330 ret, "w", "w", "ir", 2);
16331 return ret;
16332 case 16:
16333- __put_user_asm(*(u64 *)src, (u64 __user *)dst,
16334+ __put_user_asm(*(const u64 *)src, (u64 __user *)dst,
16335 ret, "q", "", "er", 16);
16336 if (unlikely(ret))
16337 return ret;
16338 asm("":::"memory");
16339- __put_user_asm(1[(u64 *)src], 1 + (u64 __user *)dst,
16340+ __put_user_asm(1[(const u64 *)src], 1 + (u64 __user *)dst,
16341 ret, "q", "", "er", 8);
16342 return ret;
16343 default:
16344- return copy_user_generic((__force void *)dst, src, size);
16345+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16346 }
16347 }
16348
16349 static __always_inline __must_check
16350-int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16351+unsigned long __copy_in_user(void __user *dst, const void __user *src, unsigned long size)
16352 {
16353- int ret = 0;
16354+ unsigned ret = 0;
16355
16356 might_fault();
16357+
16358+ if (size > INT_MAX)
16359+ return size;
16360+
16361+#ifdef CONFIG_PAX_MEMORY_UDEREF
16362+ if (!__access_ok(VERIFY_READ, src, size))
16363+ return size;
16364+ if (!__access_ok(VERIFY_WRITE, dst, size))
16365+ return size;
16366+#endif
16367+
16368 if (!__builtin_constant_p(size))
16369- return copy_user_generic((__force void *)dst,
16370- (__force void *)src, size);
16371+ return copy_user_generic((__force_kernel void *)____m(dst),
16372+ (__force_kernel const void *)____m(src), size);
16373 switch (size) {
16374 case 1: {
16375 u8 tmp;
16376- __get_user_asm(tmp, (u8 __user *)src,
16377+ __get_user_asm(tmp, (const u8 __user *)src,
16378 ret, "b", "b", "=q", 1);
16379 if (likely(!ret))
16380 __put_user_asm(tmp, (u8 __user *)dst,
16381@@ -185,7 +250,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16382 }
16383 case 2: {
16384 u16 tmp;
16385- __get_user_asm(tmp, (u16 __user *)src,
16386+ __get_user_asm(tmp, (const u16 __user *)src,
16387 ret, "w", "w", "=r", 2);
16388 if (likely(!ret))
16389 __put_user_asm(tmp, (u16 __user *)dst,
16390@@ -195,7 +260,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16391
16392 case 4: {
16393 u32 tmp;
16394- __get_user_asm(tmp, (u32 __user *)src,
16395+ __get_user_asm(tmp, (const u32 __user *)src,
16396 ret, "l", "k", "=r", 4);
16397 if (likely(!ret))
16398 __put_user_asm(tmp, (u32 __user *)dst,
16399@@ -204,7 +269,7 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16400 }
16401 case 8: {
16402 u64 tmp;
16403- __get_user_asm(tmp, (u64 __user *)src,
16404+ __get_user_asm(tmp, (const u64 __user *)src,
16405 ret, "q", "", "=r", 8);
16406 if (likely(!ret))
16407 __put_user_asm(tmp, (u64 __user *)dst,
16408@@ -212,41 +277,72 @@ int __copy_in_user(void __user *dst, const void __user *src, unsigned size)
16409 return ret;
16410 }
16411 default:
16412- return copy_user_generic((__force void *)dst,
16413- (__force void *)src, size);
16414+ return copy_user_generic((__force_kernel void *)____m(dst),
16415+ (__force_kernel const void *)____m(src), size);
16416 }
16417 }
16418
16419 static __must_check __always_inline int
16420-__copy_from_user_inatomic(void *dst, const void __user *src, unsigned size)
16421+__copy_from_user_inatomic(void *dst, const void __user *src, unsigned long size)
16422 {
16423- return copy_user_generic(dst, (__force const void *)src, size);
16424+ if (size > INT_MAX)
16425+ return size;
16426+
16427+#ifdef CONFIG_PAX_MEMORY_UDEREF
16428+ if (!__access_ok(VERIFY_READ, src, size))
16429+ return size;
16430+#endif
16431+
16432+ return copy_user_generic(dst, (__force_kernel const void *)____m(src), size);
16433 }
16434
16435-static __must_check __always_inline int
16436-__copy_to_user_inatomic(void __user *dst, const void *src, unsigned size)
16437+static __must_check __always_inline unsigned long
16438+__copy_to_user_inatomic(void __user *dst, const void *src, unsigned long size)
16439 {
16440- return copy_user_generic((__force void *)dst, src, size);
16441+ if (size > INT_MAX)
16442+ return size;
16443+
16444+#ifdef CONFIG_PAX_MEMORY_UDEREF
16445+ if (!__access_ok(VERIFY_WRITE, dst, size))
16446+ return size;
16447+#endif
16448+
16449+ return copy_user_generic((__force_kernel void *)____m(dst), src, size);
16450 }
16451
16452-extern long __copy_user_nocache(void *dst, const void __user *src,
16453- unsigned size, int zerorest);
16454+extern unsigned long __copy_user_nocache(void *dst, const void __user *src,
16455+ unsigned long size, int zerorest) __size_overflow(3);
16456
16457-static inline int
16458-__copy_from_user_nocache(void *dst, const void __user *src, unsigned size)
16459+static inline unsigned long __copy_from_user_nocache(void *dst, const void __user *src, unsigned long size)
16460 {
16461 might_sleep();
16462+
16463+ if (size > INT_MAX)
16464+ return size;
16465+
16466+#ifdef CONFIG_PAX_MEMORY_UDEREF
16467+ if (!__access_ok(VERIFY_READ, src, size))
16468+ return size;
16469+#endif
16470+
16471 return __copy_user_nocache(dst, src, size, 1);
16472 }
16473
16474-static inline int
16475-__copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16476- unsigned size)
16477+static inline unsigned long __copy_from_user_inatomic_nocache(void *dst, const void __user *src,
16478+ unsigned long size)
16479 {
16480+ if (size > INT_MAX)
16481+ return size;
16482+
16483+#ifdef CONFIG_PAX_MEMORY_UDEREF
16484+ if (!__access_ok(VERIFY_READ, src, size))
16485+ return size;
16486+#endif
16487+
16488 return __copy_user_nocache(dst, src, size, 0);
16489 }
16490
16491-unsigned long
16492-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest);
16493+extern unsigned long
16494+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest) __size_overflow(3);
16495
16496 #endif /* _ASM_X86_UACCESS_64_H */
16497diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
16498index 5b238981..77fdd78 100644
16499--- a/arch/x86/include/asm/word-at-a-time.h
16500+++ b/arch/x86/include/asm/word-at-a-time.h
16501@@ -11,7 +11,7 @@
16502 * and shift, for example.
16503 */
16504 struct word_at_a_time {
16505- const unsigned long one_bits, high_bits;
16506+ unsigned long one_bits, high_bits;
16507 };
16508
16509 #define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
16510diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
16511index d8d9922..bf6cecb 100644
16512--- a/arch/x86/include/asm/x86_init.h
16513+++ b/arch/x86/include/asm/x86_init.h
16514@@ -129,7 +129,7 @@ struct x86_init_ops {
16515 struct x86_init_timers timers;
16516 struct x86_init_iommu iommu;
16517 struct x86_init_pci pci;
16518-};
16519+} __no_const;
16520
16521 /**
16522 * struct x86_cpuinit_ops - platform specific cpu hotplug setups
16523@@ -140,7 +140,7 @@ struct x86_cpuinit_ops {
16524 void (*setup_percpu_clockev)(void);
16525 void (*early_percpu_clock_init)(void);
16526 void (*fixup_cpu_id)(struct cpuinfo_x86 *c, int node);
16527-};
16528+} __no_const;
16529
16530 /**
16531 * struct x86_platform_ops - platform specific runtime functions
16532@@ -166,7 +166,7 @@ struct x86_platform_ops {
16533 void (*save_sched_clock_state)(void);
16534 void (*restore_sched_clock_state)(void);
16535 void (*apic_post_init)(void);
16536-};
16537+} __no_const;
16538
16539 struct pci_dev;
16540 struct msi_msg;
16541@@ -180,7 +180,7 @@ struct x86_msi_ops {
16542 void (*teardown_msi_irqs)(struct pci_dev *dev);
16543 void (*restore_msi_irqs)(struct pci_dev *dev, int irq);
16544 int (*setup_hpet_msi)(unsigned int irq, unsigned int id);
16545-};
16546+} __no_const;
16547
16548 struct IO_APIC_route_entry;
16549 struct io_apic_irq_attr;
16550@@ -201,7 +201,7 @@ struct x86_io_apic_ops {
16551 unsigned int destination, int vector,
16552 struct io_apic_irq_attr *attr);
16553 void (*eoi_ioapic_pin)(int apic, int pin, int vector);
16554-};
16555+} __no_const;
16556
16557 extern struct x86_init_ops x86_init;
16558 extern struct x86_cpuinit_ops x86_cpuinit;
16559diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
16560index 0415cda..b43d877 100644
16561--- a/arch/x86/include/asm/xsave.h
16562+++ b/arch/x86/include/asm/xsave.h
16563@@ -71,7 +71,9 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16564 return -EFAULT;
16565
16566 __asm__ __volatile__(ASM_STAC "\n"
16567- "1: .byte " REX_PREFIX "0x0f,0xae,0x27\n"
16568+ "1:"
16569+ __copyuser_seg
16570+ ".byte " REX_PREFIX "0x0f,0xae,0x27\n"
16571 "2: " ASM_CLAC "\n"
16572 ".section .fixup,\"ax\"\n"
16573 "3: movl $-1,%[err]\n"
16574@@ -87,12 +89,14 @@ static inline int xsave_user(struct xsave_struct __user *buf)
16575 static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
16576 {
16577 int err;
16578- struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
16579+ struct xsave_struct *xstate = ((__force_kernel struct xsave_struct *)buf);
16580 u32 lmask = mask;
16581 u32 hmask = mask >> 32;
16582
16583 __asm__ __volatile__(ASM_STAC "\n"
16584- "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16585+ "1:"
16586+ __copyuser_seg
16587+ ".byte " REX_PREFIX "0x0f,0xae,0x2f\n"
16588 "2: " ASM_CLAC "\n"
16589 ".section .fixup,\"ax\"\n"
16590 "3: movl $-1,%[err]\n"
16591diff --git a/arch/x86/include/uapi/asm/e820.h b/arch/x86/include/uapi/asm/e820.h
16592index bbae024..e1528f9 100644
16593--- a/arch/x86/include/uapi/asm/e820.h
16594+++ b/arch/x86/include/uapi/asm/e820.h
16595@@ -63,7 +63,7 @@ struct e820map {
16596 #define ISA_START_ADDRESS 0xa0000
16597 #define ISA_END_ADDRESS 0x100000
16598
16599-#define BIOS_BEGIN 0x000a0000
16600+#define BIOS_BEGIN 0x000c0000
16601 #define BIOS_END 0x00100000
16602
16603 #define BIOS_ROM_BASE 0xffe00000
16604diff --git a/arch/x86/kernel/Makefile b/arch/x86/kernel/Makefile
16605index 7bd3bd3..5dac791 100644
16606--- a/arch/x86/kernel/Makefile
16607+++ b/arch/x86/kernel/Makefile
16608@@ -22,7 +22,7 @@ obj-y += time.o ioport.o ldt.o dumpstack.o nmi.o
16609 obj-y += setup.o x86_init.o i8259.o irqinit.o jump_label.o
16610 obj-$(CONFIG_IRQ_WORK) += irq_work.o
16611 obj-y += probe_roms.o
16612-obj-$(CONFIG_X86_32) += i386_ksyms_32.o
16613+obj-$(CONFIG_X86_32) += sys_i386_32.o i386_ksyms_32.o
16614 obj-$(CONFIG_X86_64) += sys_x86_64.o x8664_ksyms_64.o
16615 obj-y += syscall_$(BITS).o
16616 obj-$(CONFIG_X86_64) += vsyscall_64.o
16617diff --git a/arch/x86/kernel/acpi/boot.c b/arch/x86/kernel/acpi/boot.c
16618index 230c8ea..f915130 100644
16619--- a/arch/x86/kernel/acpi/boot.c
16620+++ b/arch/x86/kernel/acpi/boot.c
16621@@ -1361,7 +1361,7 @@ static int __init dmi_ignore_irq0_timer_override(const struct dmi_system_id *d)
16622 * If your system is blacklisted here, but you find that acpi=force
16623 * works for you, please contact linux-acpi@vger.kernel.org
16624 */
16625-static struct dmi_system_id __initdata acpi_dmi_table[] = {
16626+static const struct dmi_system_id __initconst acpi_dmi_table[] = {
16627 /*
16628 * Boxes that need ACPI disabled
16629 */
16630@@ -1436,7 +1436,7 @@ static struct dmi_system_id __initdata acpi_dmi_table[] = {
16631 };
16632
16633 /* second table for DMI checks that should run after early-quirks */
16634-static struct dmi_system_id __initdata acpi_dmi_table_late[] = {
16635+static const struct dmi_system_id __initconst acpi_dmi_table_late[] = {
16636 /*
16637 * HP laptops which use a DSDT reporting as HP/SB400/10000,
16638 * which includes some code which overrides all temperature
16639diff --git a/arch/x86/kernel/acpi/sleep.c b/arch/x86/kernel/acpi/sleep.c
16640index b44577b..27d8443 100644
16641--- a/arch/x86/kernel/acpi/sleep.c
16642+++ b/arch/x86/kernel/acpi/sleep.c
16643@@ -74,8 +74,12 @@ int acpi_suspend_lowlevel(void)
16644 #else /* CONFIG_64BIT */
16645 #ifdef CONFIG_SMP
16646 stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
16647+
16648+ pax_open_kernel();
16649 early_gdt_descr.address =
16650 (unsigned long)get_cpu_gdt_table(smp_processor_id());
16651+ pax_close_kernel();
16652+
16653 initial_gs = per_cpu_offset(smp_processor_id());
16654 #endif
16655 initial_code = (unsigned long)wakeup_long64;
16656diff --git a/arch/x86/kernel/acpi/wakeup_32.S b/arch/x86/kernel/acpi/wakeup_32.S
16657index d1daa66..59fecba 100644
16658--- a/arch/x86/kernel/acpi/wakeup_32.S
16659+++ b/arch/x86/kernel/acpi/wakeup_32.S
16660@@ -29,13 +29,11 @@ wakeup_pmode_return:
16661 # and restore the stack ... but you need gdt for this to work
16662 movl saved_context_esp, %esp
16663
16664- movl %cs:saved_magic, %eax
16665- cmpl $0x12345678, %eax
16666+ cmpl $0x12345678, saved_magic
16667 jne bogus_magic
16668
16669 # jump to place where we left off
16670- movl saved_eip, %eax
16671- jmp *%eax
16672+ jmp *(saved_eip)
16673
16674 bogus_magic:
16675 jmp bogus_magic
16676diff --git a/arch/x86/kernel/alternative.c b/arch/x86/kernel/alternative.c
16677index c15cf9a..0e63558 100644
16678--- a/arch/x86/kernel/alternative.c
16679+++ b/arch/x86/kernel/alternative.c
16680@@ -268,6 +268,13 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
16681 */
16682 for (a = start; a < end; a++) {
16683 instr = (u8 *)&a->instr_offset + a->instr_offset;
16684+
16685+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16686+ instr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16687+ if (instr < (u8 *)_text || (u8 *)_einittext <= instr)
16688+ instr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16689+#endif
16690+
16691 replacement = (u8 *)&a->repl_offset + a->repl_offset;
16692 BUG_ON(a->replacementlen > a->instrlen);
16693 BUG_ON(a->instrlen > sizeof(insnbuf));
16694@@ -299,10 +306,16 @@ static void alternatives_smp_lock(const s32 *start, const s32 *end,
16695 for (poff = start; poff < end; poff++) {
16696 u8 *ptr = (u8 *)poff + *poff;
16697
16698+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16699+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16700+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16701+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16702+#endif
16703+
16704 if (!*poff || ptr < text || ptr >= text_end)
16705 continue;
16706 /* turn DS segment override prefix into lock prefix */
16707- if (*ptr == 0x3e)
16708+ if (*ktla_ktva(ptr) == 0x3e)
16709 text_poke(ptr, ((unsigned char []){0xf0}), 1);
16710 }
16711 mutex_unlock(&text_mutex);
16712@@ -317,10 +330,16 @@ static void alternatives_smp_unlock(const s32 *start, const s32 *end,
16713 for (poff = start; poff < end; poff++) {
16714 u8 *ptr = (u8 *)poff + *poff;
16715
16716+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
16717+ ptr += ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16718+ if (ptr < (u8 *)_text || (u8 *)_einittext <= ptr)
16719+ ptr -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
16720+#endif
16721+
16722 if (!*poff || ptr < text || ptr >= text_end)
16723 continue;
16724 /* turn lock prefix into DS segment override prefix */
16725- if (*ptr == 0xf0)
16726+ if (*ktla_ktva(ptr) == 0xf0)
16727 text_poke(ptr, ((unsigned char []){0x3E}), 1);
16728 }
16729 mutex_unlock(&text_mutex);
16730@@ -468,7 +487,7 @@ void __init_or_module apply_paravirt(struct paravirt_patch_site *start,
16731
16732 BUG_ON(p->len > MAX_PATCH_LEN);
16733 /* prep the buffer with the original instructions */
16734- memcpy(insnbuf, p->instr, p->len);
16735+ memcpy(insnbuf, ktla_ktva(p->instr), p->len);
16736 used = pv_init_ops.patch(p->instrtype, p->clobbers, insnbuf,
16737 (unsigned long)p->instr, p->len);
16738
16739@@ -515,7 +534,7 @@ void __init alternative_instructions(void)
16740 if (!uniproc_patched || num_possible_cpus() == 1)
16741 free_init_pages("SMP alternatives",
16742 (unsigned long)__smp_locks,
16743- (unsigned long)__smp_locks_end);
16744+ PAGE_ALIGN((unsigned long)__smp_locks_end));
16745 #endif
16746
16747 apply_paravirt(__parainstructions, __parainstructions_end);
16748@@ -535,13 +554,17 @@ void __init alternative_instructions(void)
16749 * instructions. And on the local CPU you need to be protected again NMI or MCE
16750 * handlers seeing an inconsistent instruction while you patch.
16751 */
16752-void *__init_or_module text_poke_early(void *addr, const void *opcode,
16753+void *__kprobes text_poke_early(void *addr, const void *opcode,
16754 size_t len)
16755 {
16756 unsigned long flags;
16757 local_irq_save(flags);
16758- memcpy(addr, opcode, len);
16759+
16760+ pax_open_kernel();
16761+ memcpy(ktla_ktva(addr), opcode, len);
16762 sync_core();
16763+ pax_close_kernel();
16764+
16765 local_irq_restore(flags);
16766 /* Could also do a CLFLUSH here to speed up CPU recovery; but
16767 that causes hangs on some VIA CPUs. */
16768@@ -563,36 +586,22 @@ void *__init_or_module text_poke_early(void *addr, const void *opcode,
16769 */
16770 void *__kprobes text_poke(void *addr, const void *opcode, size_t len)
16771 {
16772- unsigned long flags;
16773- char *vaddr;
16774+ unsigned char *vaddr = ktla_ktva(addr);
16775 struct page *pages[2];
16776- int i;
16777+ size_t i;
16778
16779 if (!core_kernel_text((unsigned long)addr)) {
16780- pages[0] = vmalloc_to_page(addr);
16781- pages[1] = vmalloc_to_page(addr + PAGE_SIZE);
16782+ pages[0] = vmalloc_to_page(vaddr);
16783+ pages[1] = vmalloc_to_page(vaddr + PAGE_SIZE);
16784 } else {
16785- pages[0] = virt_to_page(addr);
16786+ pages[0] = virt_to_page(vaddr);
16787 WARN_ON(!PageReserved(pages[0]));
16788- pages[1] = virt_to_page(addr + PAGE_SIZE);
16789+ pages[1] = virt_to_page(vaddr + PAGE_SIZE);
16790 }
16791 BUG_ON(!pages[0]);
16792- local_irq_save(flags);
16793- set_fixmap(FIX_TEXT_POKE0, page_to_phys(pages[0]));
16794- if (pages[1])
16795- set_fixmap(FIX_TEXT_POKE1, page_to_phys(pages[1]));
16796- vaddr = (char *)fix_to_virt(FIX_TEXT_POKE0);
16797- memcpy(&vaddr[(unsigned long)addr & ~PAGE_MASK], opcode, len);
16798- clear_fixmap(FIX_TEXT_POKE0);
16799- if (pages[1])
16800- clear_fixmap(FIX_TEXT_POKE1);
16801- local_flush_tlb();
16802- sync_core();
16803- /* Could also do a CLFLUSH here to speed up CPU recovery; but
16804- that causes hangs on some VIA CPUs. */
16805+ text_poke_early(addr, opcode, len);
16806 for (i = 0; i < len; i++)
16807- BUG_ON(((char *)addr)[i] != ((char *)opcode)[i]);
16808- local_irq_restore(flags);
16809+ BUG_ON((vaddr)[i] != ((const unsigned char *)opcode)[i]);
16810 return addr;
16811 }
16812
16813diff --git a/arch/x86/kernel/apic/apic.c b/arch/x86/kernel/apic/apic.c
16814index 904611b..004dde6 100644
16815--- a/arch/x86/kernel/apic/apic.c
16816+++ b/arch/x86/kernel/apic/apic.c
16817@@ -189,7 +189,7 @@ int first_system_vector = 0xfe;
16818 /*
16819 * Debug level, exported for io_apic.c
16820 */
16821-unsigned int apic_verbosity;
16822+int apic_verbosity;
16823
16824 int pic_mode;
16825
16826@@ -1955,7 +1955,7 @@ void smp_error_interrupt(struct pt_regs *regs)
16827 apic_write(APIC_ESR, 0);
16828 v1 = apic_read(APIC_ESR);
16829 ack_APIC_irq();
16830- atomic_inc(&irq_err_count);
16831+ atomic_inc_unchecked(&irq_err_count);
16832
16833 apic_printk(APIC_DEBUG, KERN_DEBUG "APIC error on CPU%d: %02x(%02x)",
16834 smp_processor_id(), v0 , v1);
16835diff --git a/arch/x86/kernel/apic/apic_flat_64.c b/arch/x86/kernel/apic/apic_flat_64.c
16836index 00c77cf..2dc6a2d 100644
16837--- a/arch/x86/kernel/apic/apic_flat_64.c
16838+++ b/arch/x86/kernel/apic/apic_flat_64.c
16839@@ -157,7 +157,7 @@ static int flat_probe(void)
16840 return 1;
16841 }
16842
16843-static struct apic apic_flat = {
16844+static struct apic apic_flat __read_only = {
16845 .name = "flat",
16846 .probe = flat_probe,
16847 .acpi_madt_oem_check = flat_acpi_madt_oem_check,
16848@@ -271,7 +271,7 @@ static int physflat_probe(void)
16849 return 0;
16850 }
16851
16852-static struct apic apic_physflat = {
16853+static struct apic apic_physflat __read_only = {
16854
16855 .name = "physical flat",
16856 .probe = physflat_probe,
16857diff --git a/arch/x86/kernel/apic/apic_noop.c b/arch/x86/kernel/apic/apic_noop.c
16858index e145f28..2752888 100644
16859--- a/arch/x86/kernel/apic/apic_noop.c
16860+++ b/arch/x86/kernel/apic/apic_noop.c
16861@@ -119,7 +119,7 @@ static void noop_apic_write(u32 reg, u32 v)
16862 WARN_ON_ONCE(cpu_has_apic && !disable_apic);
16863 }
16864
16865-struct apic apic_noop = {
16866+struct apic apic_noop __read_only = {
16867 .name = "noop",
16868 .probe = noop_probe,
16869 .acpi_madt_oem_check = NULL,
16870diff --git a/arch/x86/kernel/apic/bigsmp_32.c b/arch/x86/kernel/apic/bigsmp_32.c
16871index d50e364..543bee3 100644
16872--- a/arch/x86/kernel/apic/bigsmp_32.c
16873+++ b/arch/x86/kernel/apic/bigsmp_32.c
16874@@ -152,7 +152,7 @@ static int probe_bigsmp(void)
16875 return dmi_bigsmp;
16876 }
16877
16878-static struct apic apic_bigsmp = {
16879+static struct apic apic_bigsmp __read_only = {
16880
16881 .name = "bigsmp",
16882 .probe = probe_bigsmp,
16883diff --git a/arch/x86/kernel/apic/es7000_32.c b/arch/x86/kernel/apic/es7000_32.c
16884index 0874799..a7a7892 100644
16885--- a/arch/x86/kernel/apic/es7000_32.c
16886+++ b/arch/x86/kernel/apic/es7000_32.c
16887@@ -608,8 +608,7 @@ static int es7000_mps_oem_check_cluster(struct mpc_table *mpc, char *oem,
16888 return ret && es7000_apic_is_cluster();
16889 }
16890
16891-/* We've been warned by a false positive warning.Use __refdata to keep calm. */
16892-static struct apic __refdata apic_es7000_cluster = {
16893+static struct apic apic_es7000_cluster __read_only = {
16894
16895 .name = "es7000",
16896 .probe = probe_es7000,
16897@@ -675,7 +674,7 @@ static struct apic __refdata apic_es7000_cluster = {
16898 .x86_32_early_logical_apicid = es7000_early_logical_apicid,
16899 };
16900
16901-static struct apic __refdata apic_es7000 = {
16902+static struct apic apic_es7000 __read_only = {
16903
16904 .name = "es7000",
16905 .probe = probe_es7000,
16906diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c
16907index 9ed796c..e930fe4 100644
16908--- a/arch/x86/kernel/apic/io_apic.c
16909+++ b/arch/x86/kernel/apic/io_apic.c
16910@@ -1060,7 +1060,7 @@ int IO_APIC_get_PCI_irq_vector(int bus, int slot, int pin,
16911 }
16912 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
16913
16914-void lock_vector_lock(void)
16915+void lock_vector_lock(void) __acquires(vector_lock)
16916 {
16917 /* Used to the online set of cpus does not change
16918 * during assign_irq_vector.
16919@@ -1068,7 +1068,7 @@ void lock_vector_lock(void)
16920 raw_spin_lock(&vector_lock);
16921 }
16922
16923-void unlock_vector_lock(void)
16924+void unlock_vector_lock(void) __releases(vector_lock)
16925 {
16926 raw_spin_unlock(&vector_lock);
16927 }
16928@@ -2362,7 +2362,7 @@ static void ack_apic_edge(struct irq_data *data)
16929 ack_APIC_irq();
16930 }
16931
16932-atomic_t irq_mis_count;
16933+atomic_unchecked_t irq_mis_count;
16934
16935 #ifdef CONFIG_GENERIC_PENDING_IRQ
16936 static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
16937@@ -2503,7 +2503,7 @@ static void ack_apic_level(struct irq_data *data)
16938 * at the cpu.
16939 */
16940 if (!(v & (1 << (i & 0x1f)))) {
16941- atomic_inc(&irq_mis_count);
16942+ atomic_inc_unchecked(&irq_mis_count);
16943
16944 eoi_ioapic_irq(irq, cfg);
16945 }
16946diff --git a/arch/x86/kernel/apic/numaq_32.c b/arch/x86/kernel/apic/numaq_32.c
16947index d661ee9..791fd33 100644
16948--- a/arch/x86/kernel/apic/numaq_32.c
16949+++ b/arch/x86/kernel/apic/numaq_32.c
16950@@ -455,8 +455,7 @@ static void numaq_setup_portio_remap(void)
16951 (u_long) xquad_portio, (u_long) num_quads*XQUAD_PORTIO_QUAD);
16952 }
16953
16954-/* Use __refdata to keep false positive warning calm. */
16955-static struct apic __refdata apic_numaq = {
16956+static struct apic apic_numaq __read_only = {
16957
16958 .name = "NUMAQ",
16959 .probe = probe_numaq,
16960diff --git a/arch/x86/kernel/apic/probe_32.c b/arch/x86/kernel/apic/probe_32.c
16961index eb35ef9..f184a21 100644
16962--- a/arch/x86/kernel/apic/probe_32.c
16963+++ b/arch/x86/kernel/apic/probe_32.c
16964@@ -72,7 +72,7 @@ static int probe_default(void)
16965 return 1;
16966 }
16967
16968-static struct apic apic_default = {
16969+static struct apic apic_default __read_only = {
16970
16971 .name = "default",
16972 .probe = probe_default,
16973diff --git a/arch/x86/kernel/apic/summit_32.c b/arch/x86/kernel/apic/summit_32.c
16974index 77c95c0..434f8a4 100644
16975--- a/arch/x86/kernel/apic/summit_32.c
16976+++ b/arch/x86/kernel/apic/summit_32.c
16977@@ -486,7 +486,7 @@ void setup_summit(void)
16978 }
16979 #endif
16980
16981-static struct apic apic_summit = {
16982+static struct apic apic_summit __read_only = {
16983
16984 .name = "summit",
16985 .probe = probe_summit,
16986diff --git a/arch/x86/kernel/apic/x2apic_cluster.c b/arch/x86/kernel/apic/x2apic_cluster.c
16987index c88baa4..757aee1 100644
16988--- a/arch/x86/kernel/apic/x2apic_cluster.c
16989+++ b/arch/x86/kernel/apic/x2apic_cluster.c
16990@@ -183,7 +183,7 @@ update_clusterinfo(struct notifier_block *nfb, unsigned long action, void *hcpu)
16991 return notifier_from_errno(err);
16992 }
16993
16994-static struct notifier_block __refdata x2apic_cpu_notifier = {
16995+static struct notifier_block x2apic_cpu_notifier = {
16996 .notifier_call = update_clusterinfo,
16997 };
16998
16999@@ -235,7 +235,7 @@ static void cluster_vector_allocation_domain(int cpu, struct cpumask *retmask,
17000 cpumask_and(retmask, mask, per_cpu(cpus_in_cluster, cpu));
17001 }
17002
17003-static struct apic apic_x2apic_cluster = {
17004+static struct apic apic_x2apic_cluster __read_only = {
17005
17006 .name = "cluster x2apic",
17007 .probe = x2apic_cluster_probe,
17008diff --git a/arch/x86/kernel/apic/x2apic_phys.c b/arch/x86/kernel/apic/x2apic_phys.c
17009index 562a76d..a003c0f 100644
17010--- a/arch/x86/kernel/apic/x2apic_phys.c
17011+++ b/arch/x86/kernel/apic/x2apic_phys.c
17012@@ -89,7 +89,7 @@ static int x2apic_phys_probe(void)
17013 return apic == &apic_x2apic_phys;
17014 }
17015
17016-static struct apic apic_x2apic_phys = {
17017+static struct apic apic_x2apic_phys __read_only = {
17018
17019 .name = "physical x2apic",
17020 .probe = x2apic_phys_probe,
17021diff --git a/arch/x86/kernel/apic/x2apic_uv_x.c b/arch/x86/kernel/apic/x2apic_uv_x.c
17022index 794f6eb..67e1db2 100644
17023--- a/arch/x86/kernel/apic/x2apic_uv_x.c
17024+++ b/arch/x86/kernel/apic/x2apic_uv_x.c
17025@@ -342,7 +342,7 @@ static int uv_probe(void)
17026 return apic == &apic_x2apic_uv_x;
17027 }
17028
17029-static struct apic __refdata apic_x2apic_uv_x = {
17030+static struct apic apic_x2apic_uv_x __read_only = {
17031
17032 .name = "UV large system",
17033 .probe = uv_probe,
17034diff --git a/arch/x86/kernel/apm_32.c b/arch/x86/kernel/apm_32.c
17035index 53a4e27..038760a 100644
17036--- a/arch/x86/kernel/apm_32.c
17037+++ b/arch/x86/kernel/apm_32.c
17038@@ -433,7 +433,7 @@ static DEFINE_MUTEX(apm_mutex);
17039 * This is for buggy BIOS's that refer to (real mode) segment 0x40
17040 * even though they are called in protected mode.
17041 */
17042-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
17043+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
17044 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
17045
17046 static const char driver_version[] = "1.16ac"; /* no spaces */
17047@@ -611,7 +611,10 @@ static long __apm_bios_call(void *_call)
17048 BUG_ON(cpu != 0);
17049 gdt = get_cpu_gdt_table(cpu);
17050 save_desc_40 = gdt[0x40 / 8];
17051+
17052+ pax_open_kernel();
17053 gdt[0x40 / 8] = bad_bios_desc;
17054+ pax_close_kernel();
17055
17056 apm_irq_save(flags);
17057 APM_DO_SAVE_SEGS;
17058@@ -620,7 +623,11 @@ static long __apm_bios_call(void *_call)
17059 &call->esi);
17060 APM_DO_RESTORE_SEGS;
17061 apm_irq_restore(flags);
17062+
17063+ pax_open_kernel();
17064 gdt[0x40 / 8] = save_desc_40;
17065+ pax_close_kernel();
17066+
17067 put_cpu();
17068
17069 return call->eax & 0xff;
17070@@ -687,7 +694,10 @@ static long __apm_bios_call_simple(void *_call)
17071 BUG_ON(cpu != 0);
17072 gdt = get_cpu_gdt_table(cpu);
17073 save_desc_40 = gdt[0x40 / 8];
17074+
17075+ pax_open_kernel();
17076 gdt[0x40 / 8] = bad_bios_desc;
17077+ pax_close_kernel();
17078
17079 apm_irq_save(flags);
17080 APM_DO_SAVE_SEGS;
17081@@ -695,7 +705,11 @@ static long __apm_bios_call_simple(void *_call)
17082 &call->eax);
17083 APM_DO_RESTORE_SEGS;
17084 apm_irq_restore(flags);
17085+
17086+ pax_open_kernel();
17087 gdt[0x40 / 8] = save_desc_40;
17088+ pax_close_kernel();
17089+
17090 put_cpu();
17091 return error;
17092 }
17093@@ -2362,12 +2376,15 @@ static int __init apm_init(void)
17094 * code to that CPU.
17095 */
17096 gdt = get_cpu_gdt_table(0);
17097+
17098+ pax_open_kernel();
17099 set_desc_base(&gdt[APM_CS >> 3],
17100 (unsigned long)__va((unsigned long)apm_info.bios.cseg << 4));
17101 set_desc_base(&gdt[APM_CS_16 >> 3],
17102 (unsigned long)__va((unsigned long)apm_info.bios.cseg_16 << 4));
17103 set_desc_base(&gdt[APM_DS >> 3],
17104 (unsigned long)__va((unsigned long)apm_info.bios.dseg << 4));
17105+ pax_close_kernel();
17106
17107 proc_create("apm", 0, NULL, &apm_file_ops);
17108
17109diff --git a/arch/x86/kernel/asm-offsets.c b/arch/x86/kernel/asm-offsets.c
17110index 2861082..6d4718e 100644
17111--- a/arch/x86/kernel/asm-offsets.c
17112+++ b/arch/x86/kernel/asm-offsets.c
17113@@ -33,6 +33,8 @@ void common(void) {
17114 OFFSET(TI_status, thread_info, status);
17115 OFFSET(TI_addr_limit, thread_info, addr_limit);
17116 OFFSET(TI_preempt_count, thread_info, preempt_count);
17117+ OFFSET(TI_lowest_stack, thread_info, lowest_stack);
17118+ DEFINE(TI_task_thread_sp0, offsetof(struct task_struct, thread.sp0) - offsetof(struct task_struct, tinfo));
17119
17120 BLANK();
17121 OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
17122@@ -53,8 +55,26 @@ void common(void) {
17123 OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
17124 OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
17125 OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
17126+
17127+#ifdef CONFIG_PAX_KERNEXEC
17128+ OFFSET(PV_CPU_write_cr0, pv_cpu_ops, write_cr0);
17129 #endif
17130
17131+#ifdef CONFIG_PAX_MEMORY_UDEREF
17132+ OFFSET(PV_MMU_read_cr3, pv_mmu_ops, read_cr3);
17133+ OFFSET(PV_MMU_write_cr3, pv_mmu_ops, write_cr3);
17134+#ifdef CONFIG_X86_64
17135+ OFFSET(PV_MMU_set_pgd_batched, pv_mmu_ops, set_pgd_batched);
17136+#endif
17137+#endif
17138+
17139+#endif
17140+
17141+ BLANK();
17142+ DEFINE(PAGE_SIZE_asm, PAGE_SIZE);
17143+ DEFINE(PAGE_SHIFT_asm, PAGE_SHIFT);
17144+ DEFINE(THREAD_SIZE_asm, THREAD_SIZE);
17145+
17146 #ifdef CONFIG_XEN
17147 BLANK();
17148 OFFSET(XEN_vcpu_info_mask, vcpu_info, evtchn_upcall_mask);
17149diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c
17150index e7c798b..2b2019b 100644
17151--- a/arch/x86/kernel/asm-offsets_64.c
17152+++ b/arch/x86/kernel/asm-offsets_64.c
17153@@ -77,6 +77,7 @@ int main(void)
17154 BLANK();
17155 #undef ENTRY
17156
17157+ DEFINE(TSS_size, sizeof(struct tss_struct));
17158 OFFSET(TSS_ist, tss_struct, x86_tss.ist);
17159 BLANK();
17160
17161diff --git a/arch/x86/kernel/cpu/Makefile b/arch/x86/kernel/cpu/Makefile
17162index b0684e4..22ccfd7 100644
17163--- a/arch/x86/kernel/cpu/Makefile
17164+++ b/arch/x86/kernel/cpu/Makefile
17165@@ -8,10 +8,6 @@ CFLAGS_REMOVE_common.o = -pg
17166 CFLAGS_REMOVE_perf_event.o = -pg
17167 endif
17168
17169-# Make sure load_percpu_segment has no stackprotector
17170-nostackp := $(call cc-option, -fno-stack-protector)
17171-CFLAGS_common.o := $(nostackp)
17172-
17173 obj-y := intel_cacheinfo.o scattered.o topology.o
17174 obj-y += proc.o capflags.o powerflags.o common.o
17175 obj-y += rdrand.o
17176diff --git a/arch/x86/kernel/cpu/amd.c b/arch/x86/kernel/cpu/amd.c
17177index 5013a48..0782c53 100644
17178--- a/arch/x86/kernel/cpu/amd.c
17179+++ b/arch/x86/kernel/cpu/amd.c
17180@@ -744,7 +744,7 @@ static unsigned int __cpuinit amd_size_cache(struct cpuinfo_x86 *c,
17181 unsigned int size)
17182 {
17183 /* AMD errata T13 (order #21922) */
17184- if ((c->x86 == 6)) {
17185+ if (c->x86 == 6) {
17186 /* Duron Rev A0 */
17187 if (c->x86_model == 3 && c->x86_mask == 0)
17188 size = 64;
17189diff --git a/arch/x86/kernel/cpu/common.c b/arch/x86/kernel/cpu/common.c
17190index 22018f7..bc6f5e3 100644
17191--- a/arch/x86/kernel/cpu/common.c
17192+++ b/arch/x86/kernel/cpu/common.c
17193@@ -88,60 +88,6 @@ static const struct cpu_dev __cpuinitconst default_cpu = {
17194
17195 static const struct cpu_dev *this_cpu __cpuinitdata = &default_cpu;
17196
17197-DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
17198-#ifdef CONFIG_X86_64
17199- /*
17200- * We need valid kernel segments for data and code in long mode too
17201- * IRET will check the segment types kkeil 2000/10/28
17202- * Also sysret mandates a special GDT layout
17203- *
17204- * TLS descriptors are currently at a different place compared to i386.
17205- * Hopefully nobody expects them at a fixed place (Wine?)
17206- */
17207- [GDT_ENTRY_KERNEL32_CS] = GDT_ENTRY_INIT(0xc09b, 0, 0xfffff),
17208- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xa09b, 0, 0xfffff),
17209- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc093, 0, 0xfffff),
17210- [GDT_ENTRY_DEFAULT_USER32_CS] = GDT_ENTRY_INIT(0xc0fb, 0, 0xfffff),
17211- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f3, 0, 0xfffff),
17212- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xa0fb, 0, 0xfffff),
17213-#else
17214- [GDT_ENTRY_KERNEL_CS] = GDT_ENTRY_INIT(0xc09a, 0, 0xfffff),
17215- [GDT_ENTRY_KERNEL_DS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17216- [GDT_ENTRY_DEFAULT_USER_CS] = GDT_ENTRY_INIT(0xc0fa, 0, 0xfffff),
17217- [GDT_ENTRY_DEFAULT_USER_DS] = GDT_ENTRY_INIT(0xc0f2, 0, 0xfffff),
17218- /*
17219- * Segments used for calling PnP BIOS have byte granularity.
17220- * They code segments and data segments have fixed 64k limits,
17221- * the transfer segment sizes are set at run time.
17222- */
17223- /* 32-bit code */
17224- [GDT_ENTRY_PNPBIOS_CS32] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17225- /* 16-bit code */
17226- [GDT_ENTRY_PNPBIOS_CS16] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17227- /* 16-bit data */
17228- [GDT_ENTRY_PNPBIOS_DS] = GDT_ENTRY_INIT(0x0092, 0, 0xffff),
17229- /* 16-bit data */
17230- [GDT_ENTRY_PNPBIOS_TS1] = GDT_ENTRY_INIT(0x0092, 0, 0),
17231- /* 16-bit data */
17232- [GDT_ENTRY_PNPBIOS_TS2] = GDT_ENTRY_INIT(0x0092, 0, 0),
17233- /*
17234- * The APM segments have byte granularity and their bases
17235- * are set at run time. All have 64k limits.
17236- */
17237- /* 32-bit code */
17238- [GDT_ENTRY_APMBIOS_BASE] = GDT_ENTRY_INIT(0x409a, 0, 0xffff),
17239- /* 16-bit code */
17240- [GDT_ENTRY_APMBIOS_BASE+1] = GDT_ENTRY_INIT(0x009a, 0, 0xffff),
17241- /* data */
17242- [GDT_ENTRY_APMBIOS_BASE+2] = GDT_ENTRY_INIT(0x4092, 0, 0xffff),
17243-
17244- [GDT_ENTRY_ESPFIX_SS] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17245- [GDT_ENTRY_PERCPU] = GDT_ENTRY_INIT(0xc092, 0, 0xfffff),
17246- GDT_STACK_CANARY_INIT
17247-#endif
17248-} };
17249-EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
17250-
17251 static int __init x86_xsave_setup(char *s)
17252 {
17253 setup_clear_cpu_cap(X86_FEATURE_XSAVE);
17254@@ -386,7 +332,7 @@ void switch_to_new_gdt(int cpu)
17255 {
17256 struct desc_ptr gdt_descr;
17257
17258- gdt_descr.address = (long)get_cpu_gdt_table(cpu);
17259+ gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
17260 gdt_descr.size = GDT_SIZE - 1;
17261 load_gdt(&gdt_descr);
17262 /* Reload the per-cpu base */
17263@@ -882,6 +828,10 @@ static void __cpuinit identify_cpu(struct cpuinfo_x86 *c)
17264 /* Filter out anything that depends on CPUID levels we don't have */
17265 filter_cpuid_features(c, true);
17266
17267+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
17268+ setup_clear_cpu_cap(X86_FEATURE_SEP);
17269+#endif
17270+
17271 /* If the model name is still unset, do table lookup. */
17272 if (!c->x86_model_id[0]) {
17273 const char *p;
17274@@ -1069,10 +1019,12 @@ static __init int setup_disablecpuid(char *arg)
17275 }
17276 __setup("clearcpuid=", setup_disablecpuid);
17277
17278+DEFINE_PER_CPU(struct thread_info *, current_tinfo) = &init_task.tinfo;
17279+EXPORT_PER_CPU_SYMBOL(current_tinfo);
17280+
17281 #ifdef CONFIG_X86_64
17282 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
17283-struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1,
17284- (unsigned long) nmi_idt_table };
17285+struct desc_ptr nmi_idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) nmi_idt_table };
17286
17287 DEFINE_PER_CPU_FIRST(union irq_stack_union,
17288 irq_stack_union) __aligned(PAGE_SIZE);
17289@@ -1086,7 +1038,7 @@ DEFINE_PER_CPU(struct task_struct *, current_task) ____cacheline_aligned =
17290 EXPORT_PER_CPU_SYMBOL(current_task);
17291
17292 DEFINE_PER_CPU(unsigned long, kernel_stack) =
17293- (unsigned long)&init_thread_union - KERNEL_STACK_OFFSET + THREAD_SIZE;
17294+ (unsigned long)&init_thread_union - 16 + THREAD_SIZE;
17295 EXPORT_PER_CPU_SYMBOL(kernel_stack);
17296
17297 DEFINE_PER_CPU(char *, irq_stack_ptr) =
17298@@ -1231,7 +1183,7 @@ void __cpuinit cpu_init(void)
17299 load_ucode_ap();
17300
17301 cpu = stack_smp_processor_id();
17302- t = &per_cpu(init_tss, cpu);
17303+ t = init_tss + cpu;
17304 oist = &per_cpu(orig_ist, cpu);
17305
17306 #ifdef CONFIG_NUMA
17307@@ -1257,7 +1209,7 @@ void __cpuinit cpu_init(void)
17308 switch_to_new_gdt(cpu);
17309 loadsegment(fs, 0);
17310
17311- load_idt((const struct desc_ptr *)&idt_descr);
17312+ load_idt(&idt_descr);
17313
17314 memset(me->thread.tls_array, 0, GDT_ENTRY_TLS_ENTRIES * 8);
17315 syscall_init();
17316@@ -1266,7 +1218,6 @@ void __cpuinit cpu_init(void)
17317 wrmsrl(MSR_KERNEL_GS_BASE, 0);
17318 barrier();
17319
17320- x86_configure_nx();
17321 enable_x2apic();
17322
17323 /*
17324@@ -1318,7 +1269,7 @@ void __cpuinit cpu_init(void)
17325 {
17326 int cpu = smp_processor_id();
17327 struct task_struct *curr = current;
17328- struct tss_struct *t = &per_cpu(init_tss, cpu);
17329+ struct tss_struct *t = init_tss + cpu;
17330 struct thread_struct *thread = &curr->thread;
17331
17332 show_ucode_info_early();
17333diff --git a/arch/x86/kernel/cpu/intel_cacheinfo.c b/arch/x86/kernel/cpu/intel_cacheinfo.c
17334index 7c6f7d5..8cac382 100644
17335--- a/arch/x86/kernel/cpu/intel_cacheinfo.c
17336+++ b/arch/x86/kernel/cpu/intel_cacheinfo.c
17337@@ -1017,6 +1017,22 @@ static struct attribute *default_attrs[] = {
17338 };
17339
17340 #ifdef CONFIG_AMD_NB
17341+static struct attribute *default_attrs_amd_nb[] = {
17342+ &type.attr,
17343+ &level.attr,
17344+ &coherency_line_size.attr,
17345+ &physical_line_partition.attr,
17346+ &ways_of_associativity.attr,
17347+ &number_of_sets.attr,
17348+ &size.attr,
17349+ &shared_cpu_map.attr,
17350+ &shared_cpu_list.attr,
17351+ NULL,
17352+ NULL,
17353+ NULL,
17354+ NULL
17355+};
17356+
17357 static struct attribute ** __cpuinit amd_l3_attrs(void)
17358 {
17359 static struct attribute **attrs;
17360@@ -1027,18 +1043,7 @@ static struct attribute ** __cpuinit amd_l3_attrs(void)
17361
17362 n = ARRAY_SIZE(default_attrs);
17363
17364- if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
17365- n += 2;
17366-
17367- if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
17368- n += 1;
17369-
17370- attrs = kzalloc(n * sizeof (struct attribute *), GFP_KERNEL);
17371- if (attrs == NULL)
17372- return attrs = default_attrs;
17373-
17374- for (n = 0; default_attrs[n]; n++)
17375- attrs[n] = default_attrs[n];
17376+ attrs = default_attrs_amd_nb;
17377
17378 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
17379 attrs[n++] = &cache_disable_0.attr;
17380@@ -1089,6 +1094,13 @@ static struct kobj_type ktype_cache = {
17381 .default_attrs = default_attrs,
17382 };
17383
17384+#ifdef CONFIG_AMD_NB
17385+static struct kobj_type ktype_cache_amd_nb = {
17386+ .sysfs_ops = &sysfs_ops,
17387+ .default_attrs = default_attrs_amd_nb,
17388+};
17389+#endif
17390+
17391 static struct kobj_type ktype_percpu_entry = {
17392 .sysfs_ops = &sysfs_ops,
17393 };
17394@@ -1154,20 +1166,26 @@ static int __cpuinit cache_add_dev(struct device *dev)
17395 return retval;
17396 }
17397
17398+#ifdef CONFIG_AMD_NB
17399+ amd_l3_attrs();
17400+#endif
17401+
17402 for (i = 0; i < num_cache_leaves; i++) {
17403+ struct kobj_type *ktype;
17404+
17405 this_object = INDEX_KOBJECT_PTR(cpu, i);
17406 this_object->cpu = cpu;
17407 this_object->index = i;
17408
17409 this_leaf = CPUID4_INFO_IDX(cpu, i);
17410
17411- ktype_cache.default_attrs = default_attrs;
17412+ ktype = &ktype_cache;
17413 #ifdef CONFIG_AMD_NB
17414 if (this_leaf->base.nb)
17415- ktype_cache.default_attrs = amd_l3_attrs();
17416+ ktype = &ktype_cache_amd_nb;
17417 #endif
17418 retval = kobject_init_and_add(&(this_object->kobj),
17419- &ktype_cache,
17420+ ktype,
17421 per_cpu(ici_cache_kobject, cpu),
17422 "index%1lu", i);
17423 if (unlikely(retval)) {
17424@@ -1222,7 +1240,7 @@ static int __cpuinit cacheinfo_cpu_callback(struct notifier_block *nfb,
17425 return NOTIFY_OK;
17426 }
17427
17428-static struct notifier_block __cpuinitdata cacheinfo_cpu_notifier = {
17429+static struct notifier_block cacheinfo_cpu_notifier = {
17430 .notifier_call = cacheinfo_cpu_callback,
17431 };
17432
17433diff --git a/arch/x86/kernel/cpu/mcheck/mce.c b/arch/x86/kernel/cpu/mcheck/mce.c
17434index 9239504..b2471ce 100644
17435--- a/arch/x86/kernel/cpu/mcheck/mce.c
17436+++ b/arch/x86/kernel/cpu/mcheck/mce.c
17437@@ -45,6 +45,7 @@
17438 #include <asm/processor.h>
17439 #include <asm/mce.h>
17440 #include <asm/msr.h>
17441+#include <asm/local.h>
17442
17443 #include "mce-internal.h"
17444
17445@@ -246,7 +247,7 @@ static void print_mce(struct mce *m)
17446 !(m->mcgstatus & MCG_STATUS_EIPV) ? " !INEXACT!" : "",
17447 m->cs, m->ip);
17448
17449- if (m->cs == __KERNEL_CS)
17450+ if (m->cs == __KERNEL_CS || m->cs == __KERNEXEC_KERNEL_CS)
17451 print_symbol("{%s}", m->ip);
17452 pr_cont("\n");
17453 }
17454@@ -279,10 +280,10 @@ static void print_mce(struct mce *m)
17455
17456 #define PANIC_TIMEOUT 5 /* 5 seconds */
17457
17458-static atomic_t mce_paniced;
17459+static atomic_unchecked_t mce_paniced;
17460
17461 static int fake_panic;
17462-static atomic_t mce_fake_paniced;
17463+static atomic_unchecked_t mce_fake_paniced;
17464
17465 /* Panic in progress. Enable interrupts and wait for final IPI */
17466 static void wait_for_panic(void)
17467@@ -306,7 +307,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17468 /*
17469 * Make sure only one CPU runs in machine check panic
17470 */
17471- if (atomic_inc_return(&mce_paniced) > 1)
17472+ if (atomic_inc_return_unchecked(&mce_paniced) > 1)
17473 wait_for_panic();
17474 barrier();
17475
17476@@ -314,7 +315,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17477 console_verbose();
17478 } else {
17479 /* Don't log too much for fake panic */
17480- if (atomic_inc_return(&mce_fake_paniced) > 1)
17481+ if (atomic_inc_return_unchecked(&mce_fake_paniced) > 1)
17482 return;
17483 }
17484 /* First print corrected ones that are still unlogged */
17485@@ -353,7 +354,7 @@ static void mce_panic(char *msg, struct mce *final, char *exp)
17486 if (!fake_panic) {
17487 if (panic_timeout == 0)
17488 panic_timeout = mca_cfg.panic_timeout;
17489- panic(msg);
17490+ panic("%s", msg);
17491 } else
17492 pr_emerg(HW_ERR "Fake kernel panic: %s\n", msg);
17493 }
17494@@ -683,7 +684,7 @@ static int mce_timed_out(u64 *t)
17495 * might have been modified by someone else.
17496 */
17497 rmb();
17498- if (atomic_read(&mce_paniced))
17499+ if (atomic_read_unchecked(&mce_paniced))
17500 wait_for_panic();
17501 if (!mca_cfg.monarch_timeout)
17502 goto out;
17503@@ -1654,7 +1655,7 @@ static void unexpected_machine_check(struct pt_regs *regs, long error_code)
17504 }
17505
17506 /* Call the installed machine check handler for this CPU setup. */
17507-void (*machine_check_vector)(struct pt_regs *, long error_code) =
17508+void (*machine_check_vector)(struct pt_regs *, long error_code) __read_only =
17509 unexpected_machine_check;
17510
17511 /*
17512@@ -1677,7 +1678,9 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17513 return;
17514 }
17515
17516+ pax_open_kernel();
17517 machine_check_vector = do_machine_check;
17518+ pax_close_kernel();
17519
17520 __mcheck_cpu_init_generic();
17521 __mcheck_cpu_init_vendor(c);
17522@@ -1691,7 +1694,7 @@ void __cpuinit mcheck_cpu_init(struct cpuinfo_x86 *c)
17523 */
17524
17525 static DEFINE_SPINLOCK(mce_chrdev_state_lock);
17526-static int mce_chrdev_open_count; /* #times opened */
17527+static local_t mce_chrdev_open_count; /* #times opened */
17528 static int mce_chrdev_open_exclu; /* already open exclusive? */
17529
17530 static int mce_chrdev_open(struct inode *inode, struct file *file)
17531@@ -1699,7 +1702,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17532 spin_lock(&mce_chrdev_state_lock);
17533
17534 if (mce_chrdev_open_exclu ||
17535- (mce_chrdev_open_count && (file->f_flags & O_EXCL))) {
17536+ (local_read(&mce_chrdev_open_count) && (file->f_flags & O_EXCL))) {
17537 spin_unlock(&mce_chrdev_state_lock);
17538
17539 return -EBUSY;
17540@@ -1707,7 +1710,7 @@ static int mce_chrdev_open(struct inode *inode, struct file *file)
17541
17542 if (file->f_flags & O_EXCL)
17543 mce_chrdev_open_exclu = 1;
17544- mce_chrdev_open_count++;
17545+ local_inc(&mce_chrdev_open_count);
17546
17547 spin_unlock(&mce_chrdev_state_lock);
17548
17549@@ -1718,7 +1721,7 @@ static int mce_chrdev_release(struct inode *inode, struct file *file)
17550 {
17551 spin_lock(&mce_chrdev_state_lock);
17552
17553- mce_chrdev_open_count--;
17554+ local_dec(&mce_chrdev_open_count);
17555 mce_chrdev_open_exclu = 0;
17556
17557 spin_unlock(&mce_chrdev_state_lock);
17558@@ -2364,7 +2367,7 @@ mce_cpu_callback(struct notifier_block *nfb, unsigned long action, void *hcpu)
17559 return NOTIFY_OK;
17560 }
17561
17562-static struct notifier_block mce_cpu_notifier __cpuinitdata = {
17563+static struct notifier_block mce_cpu_notifier = {
17564 .notifier_call = mce_cpu_callback,
17565 };
17566
17567@@ -2374,7 +2377,7 @@ static __init void mce_init_banks(void)
17568
17569 for (i = 0; i < mca_cfg.banks; i++) {
17570 struct mce_bank *b = &mce_banks[i];
17571- struct device_attribute *a = &b->attr;
17572+ device_attribute_no_const *a = &b->attr;
17573
17574 sysfs_attr_init(&a->attr);
17575 a->attr.name = b->attrname;
17576@@ -2442,7 +2445,7 @@ struct dentry *mce_get_debugfs_dir(void)
17577 static void mce_reset(void)
17578 {
17579 cpu_missing = 0;
17580- atomic_set(&mce_fake_paniced, 0);
17581+ atomic_set_unchecked(&mce_fake_paniced, 0);
17582 atomic_set(&mce_executing, 0);
17583 atomic_set(&mce_callin, 0);
17584 atomic_set(&global_nwo, 0);
17585diff --git a/arch/x86/kernel/cpu/mcheck/p5.c b/arch/x86/kernel/cpu/mcheck/p5.c
17586index 1c044b1..37a2a43 100644
17587--- a/arch/x86/kernel/cpu/mcheck/p5.c
17588+++ b/arch/x86/kernel/cpu/mcheck/p5.c
17589@@ -11,6 +11,7 @@
17590 #include <asm/processor.h>
17591 #include <asm/mce.h>
17592 #include <asm/msr.h>
17593+#include <asm/pgtable.h>
17594
17595 /* By default disabled */
17596 int mce_p5_enabled __read_mostly;
17597@@ -49,7 +50,9 @@ void intel_p5_mcheck_init(struct cpuinfo_x86 *c)
17598 if (!cpu_has(c, X86_FEATURE_MCE))
17599 return;
17600
17601+ pax_open_kernel();
17602 machine_check_vector = pentium_machine_check;
17603+ pax_close_kernel();
17604 /* Make sure the vector pointer is visible before we enable MCEs: */
17605 wmb();
17606
17607diff --git a/arch/x86/kernel/cpu/mcheck/therm_throt.c b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17608index 47a1870..8c019a7 100644
17609--- a/arch/x86/kernel/cpu/mcheck/therm_throt.c
17610+++ b/arch/x86/kernel/cpu/mcheck/therm_throt.c
17611@@ -288,7 +288,7 @@ thermal_throttle_cpu_callback(struct notifier_block *nfb,
17612 return notifier_from_errno(err);
17613 }
17614
17615-static struct notifier_block thermal_throttle_cpu_notifier __cpuinitdata =
17616+static struct notifier_block thermal_throttle_cpu_notifier =
17617 {
17618 .notifier_call = thermal_throttle_cpu_callback,
17619 };
17620diff --git a/arch/x86/kernel/cpu/mcheck/winchip.c b/arch/x86/kernel/cpu/mcheck/winchip.c
17621index e9a701a..35317d6 100644
17622--- a/arch/x86/kernel/cpu/mcheck/winchip.c
17623+++ b/arch/x86/kernel/cpu/mcheck/winchip.c
17624@@ -10,6 +10,7 @@
17625 #include <asm/processor.h>
17626 #include <asm/mce.h>
17627 #include <asm/msr.h>
17628+#include <asm/pgtable.h>
17629
17630 /* Machine check handler for WinChip C6: */
17631 static void winchip_machine_check(struct pt_regs *regs, long error_code)
17632@@ -23,7 +24,9 @@ void winchip_mcheck_init(struct cpuinfo_x86 *c)
17633 {
17634 u32 lo, hi;
17635
17636+ pax_open_kernel();
17637 machine_check_vector = winchip_machine_check;
17638+ pax_close_kernel();
17639 /* Make sure the vector pointer is visible before we enable MCEs: */
17640 wmb();
17641
17642diff --git a/arch/x86/kernel/cpu/mtrr/main.c b/arch/x86/kernel/cpu/mtrr/main.c
17643index 726bf96..81f0526 100644
17644--- a/arch/x86/kernel/cpu/mtrr/main.c
17645+++ b/arch/x86/kernel/cpu/mtrr/main.c
17646@@ -62,7 +62,7 @@ static DEFINE_MUTEX(mtrr_mutex);
17647 u64 size_or_mask, size_and_mask;
17648 static bool mtrr_aps_delayed_init;
17649
17650-static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM];
17651+static const struct mtrr_ops *mtrr_ops[X86_VENDOR_NUM] __read_only;
17652
17653 const struct mtrr_ops *mtrr_if;
17654
17655diff --git a/arch/x86/kernel/cpu/mtrr/mtrr.h b/arch/x86/kernel/cpu/mtrr/mtrr.h
17656index df5e41f..816c719 100644
17657--- a/arch/x86/kernel/cpu/mtrr/mtrr.h
17658+++ b/arch/x86/kernel/cpu/mtrr/mtrr.h
17659@@ -25,7 +25,7 @@ struct mtrr_ops {
17660 int (*validate_add_page)(unsigned long base, unsigned long size,
17661 unsigned int type);
17662 int (*have_wrcomb)(void);
17663-};
17664+} __do_const;
17665
17666 extern int generic_get_free_region(unsigned long base, unsigned long size,
17667 int replace_reg);
17668diff --git a/arch/x86/kernel/cpu/perf_event.c b/arch/x86/kernel/cpu/perf_event.c
17669index 1025f3c..824f677 100644
17670--- a/arch/x86/kernel/cpu/perf_event.c
17671+++ b/arch/x86/kernel/cpu/perf_event.c
17672@@ -1311,7 +1311,7 @@ static void __init pmu_check_apic(void)
17673 pr_info("no hardware sampling interrupt available.\n");
17674 }
17675
17676-static struct attribute_group x86_pmu_format_group = {
17677+static attribute_group_no_const x86_pmu_format_group = {
17678 .name = "format",
17679 .attrs = NULL,
17680 };
17681@@ -1410,7 +1410,7 @@ static struct attribute *events_attr[] = {
17682 NULL,
17683 };
17684
17685-static struct attribute_group x86_pmu_events_group = {
17686+static attribute_group_no_const x86_pmu_events_group = {
17687 .name = "events",
17688 .attrs = events_attr,
17689 };
17690@@ -1920,7 +1920,7 @@ static unsigned long get_segment_base(unsigned int segment)
17691 if (idx > GDT_ENTRIES)
17692 return 0;
17693
17694- desc = __this_cpu_ptr(&gdt_page.gdt[0]);
17695+ desc = get_cpu_gdt_table(smp_processor_id());
17696 }
17697
17698 return get_desc_base(desc + idx);
17699@@ -2010,7 +2010,7 @@ perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
17700 break;
17701
17702 perf_callchain_store(entry, frame.return_address);
17703- fp = frame.next_frame;
17704+ fp = (const void __force_user *)frame.next_frame;
17705 }
17706 }
17707
17708diff --git a/arch/x86/kernel/cpu/perf_event_intel.c b/arch/x86/kernel/cpu/perf_event_intel.c
17709index a9e2207..d70c83a 100644
17710--- a/arch/x86/kernel/cpu/perf_event_intel.c
17711+++ b/arch/x86/kernel/cpu/perf_event_intel.c
17712@@ -2022,10 +2022,10 @@ __init int intel_pmu_init(void)
17713 * v2 and above have a perf capabilities MSR
17714 */
17715 if (version > 1) {
17716- u64 capabilities;
17717+ u64 capabilities = x86_pmu.intel_cap.capabilities;
17718
17719- rdmsrl(MSR_IA32_PERF_CAPABILITIES, capabilities);
17720- x86_pmu.intel_cap.capabilities = capabilities;
17721+ if (rdmsrl_safe(MSR_IA32_PERF_CAPABILITIES, &x86_pmu.intel_cap.capabilities))
17722+ x86_pmu.intel_cap.capabilities = capabilities;
17723 }
17724
17725 intel_ds_init();
17726diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.c b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17727index 52441a2..f94fae8 100644
17728--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17729+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.c
17730@@ -3093,7 +3093,7 @@ static void __init uncore_types_exit(struct intel_uncore_type **types)
17731 static int __init uncore_type_init(struct intel_uncore_type *type)
17732 {
17733 struct intel_uncore_pmu *pmus;
17734- struct attribute_group *attr_group;
17735+ attribute_group_no_const *attr_group;
17736 struct attribute **attrs;
17737 int i, j;
17738
17739@@ -3518,7 +3518,7 @@ static int
17740 return NOTIFY_OK;
17741 }
17742
17743-static struct notifier_block uncore_cpu_nb __cpuinitdata = {
17744+static struct notifier_block uncore_cpu_nb = {
17745 .notifier_call = uncore_cpu_notifier,
17746 /*
17747 * to migrate uncore events, our notifier should be executed
17748diff --git a/arch/x86/kernel/cpu/perf_event_intel_uncore.h b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17749index f952891..4722ad4 100644
17750--- a/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17751+++ b/arch/x86/kernel/cpu/perf_event_intel_uncore.h
17752@@ -488,7 +488,7 @@ struct intel_uncore_box {
17753 struct uncore_event_desc {
17754 struct kobj_attribute attr;
17755 const char *config;
17756-};
17757+} __do_const;
17758
17759 #define INTEL_UNCORE_EVENT_DESC(_name, _config) \
17760 { \
17761diff --git a/arch/x86/kernel/cpuid.c b/arch/x86/kernel/cpuid.c
17762index 1e4dbcf..b9a34c2 100644
17763--- a/arch/x86/kernel/cpuid.c
17764+++ b/arch/x86/kernel/cpuid.c
17765@@ -171,7 +171,7 @@ static int __cpuinit cpuid_class_cpu_callback(struct notifier_block *nfb,
17766 return notifier_from_errno(err);
17767 }
17768
17769-static struct notifier_block __refdata cpuid_class_cpu_notifier =
17770+static struct notifier_block cpuid_class_cpu_notifier =
17771 {
17772 .notifier_call = cpuid_class_cpu_callback,
17773 };
17774diff --git a/arch/x86/kernel/crash.c b/arch/x86/kernel/crash.c
17775index 74467fe..18793d5 100644
17776--- a/arch/x86/kernel/crash.c
17777+++ b/arch/x86/kernel/crash.c
17778@@ -58,10 +58,8 @@ static void kdump_nmi_callback(int cpu, struct pt_regs *regs)
17779 {
17780 #ifdef CONFIG_X86_32
17781 struct pt_regs fixed_regs;
17782-#endif
17783
17784-#ifdef CONFIG_X86_32
17785- if (!user_mode_vm(regs)) {
17786+ if (!user_mode(regs)) {
17787 crash_fixup_ss_esp(&fixed_regs, regs);
17788 regs = &fixed_regs;
17789 }
17790diff --git a/arch/x86/kernel/crash_dump_64.c b/arch/x86/kernel/crash_dump_64.c
17791index afa64ad..dce67dd 100644
17792--- a/arch/x86/kernel/crash_dump_64.c
17793+++ b/arch/x86/kernel/crash_dump_64.c
17794@@ -36,7 +36,7 @@ ssize_t copy_oldmem_page(unsigned long pfn, char *buf,
17795 return -ENOMEM;
17796
17797 if (userbuf) {
17798- if (copy_to_user(buf, vaddr + offset, csize)) {
17799+ if (copy_to_user((char __force_user *)buf, vaddr + offset, csize)) {
17800 iounmap(vaddr);
17801 return -EFAULT;
17802 }
17803diff --git a/arch/x86/kernel/doublefault_32.c b/arch/x86/kernel/doublefault_32.c
17804index 155a13f..1672b9b 100644
17805--- a/arch/x86/kernel/doublefault_32.c
17806+++ b/arch/x86/kernel/doublefault_32.c
17807@@ -11,7 +11,7 @@
17808
17809 #define DOUBLEFAULT_STACKSIZE (1024)
17810 static unsigned long doublefault_stack[DOUBLEFAULT_STACKSIZE];
17811-#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE)
17812+#define STACK_START (unsigned long)(doublefault_stack+DOUBLEFAULT_STACKSIZE-2)
17813
17814 #define ptr_ok(x) ((x) > PAGE_OFFSET && (x) < PAGE_OFFSET + MAXMEM)
17815
17816@@ -21,7 +21,7 @@ static void doublefault_fn(void)
17817 unsigned long gdt, tss;
17818
17819 native_store_gdt(&gdt_desc);
17820- gdt = gdt_desc.address;
17821+ gdt = (unsigned long)gdt_desc.address;
17822
17823 printk(KERN_EMERG "PANIC: double fault, gdt at %08lx [%d bytes]\n", gdt, gdt_desc.size);
17824
17825@@ -58,10 +58,10 @@ struct tss_struct doublefault_tss __cacheline_aligned = {
17826 /* 0x2 bit is always set */
17827 .flags = X86_EFLAGS_SF | 0x2,
17828 .sp = STACK_START,
17829- .es = __USER_DS,
17830+ .es = __KERNEL_DS,
17831 .cs = __KERNEL_CS,
17832 .ss = __KERNEL_DS,
17833- .ds = __USER_DS,
17834+ .ds = __KERNEL_DS,
17835 .fs = __KERNEL_PERCPU,
17836
17837 .__cr3 = __pa_nodebug(swapper_pg_dir),
17838diff --git a/arch/x86/kernel/dumpstack.c b/arch/x86/kernel/dumpstack.c
17839index deb6421..622e0ed 100644
17840--- a/arch/x86/kernel/dumpstack.c
17841+++ b/arch/x86/kernel/dumpstack.c
17842@@ -2,6 +2,9 @@
17843 * Copyright (C) 1991, 1992 Linus Torvalds
17844 * Copyright (C) 2000, 2001, 2002 Andi Kleen, SuSE Labs
17845 */
17846+#ifdef CONFIG_GRKERNSEC_HIDESYM
17847+#define __INCLUDED_BY_HIDESYM 1
17848+#endif
17849 #include <linux/kallsyms.h>
17850 #include <linux/kprobes.h>
17851 #include <linux/uaccess.h>
17852@@ -35,16 +38,14 @@ void printk_address(unsigned long address, int reliable)
17853 static void
17854 print_ftrace_graph_addr(unsigned long addr, void *data,
17855 const struct stacktrace_ops *ops,
17856- struct thread_info *tinfo, int *graph)
17857+ struct task_struct *task, int *graph)
17858 {
17859- struct task_struct *task;
17860 unsigned long ret_addr;
17861 int index;
17862
17863 if (addr != (unsigned long)return_to_handler)
17864 return;
17865
17866- task = tinfo->task;
17867 index = task->curr_ret_stack;
17868
17869 if (!task->ret_stack || index < *graph)
17870@@ -61,7 +62,7 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17871 static inline void
17872 print_ftrace_graph_addr(unsigned long addr, void *data,
17873 const struct stacktrace_ops *ops,
17874- struct thread_info *tinfo, int *graph)
17875+ struct task_struct *task, int *graph)
17876 { }
17877 #endif
17878
17879@@ -72,10 +73,8 @@ print_ftrace_graph_addr(unsigned long addr, void *data,
17880 * severe exception (double fault, nmi, stack fault, debug, mce) hardware stack
17881 */
17882
17883-static inline int valid_stack_ptr(struct thread_info *tinfo,
17884- void *p, unsigned int size, void *end)
17885+static inline int valid_stack_ptr(void *t, void *p, unsigned int size, void *end)
17886 {
17887- void *t = tinfo;
17888 if (end) {
17889 if (p < end && p >= (end-THREAD_SIZE))
17890 return 1;
17891@@ -86,14 +85,14 @@ static inline int valid_stack_ptr(struct thread_info *tinfo,
17892 }
17893
17894 unsigned long
17895-print_context_stack(struct thread_info *tinfo,
17896+print_context_stack(struct task_struct *task, void *stack_start,
17897 unsigned long *stack, unsigned long bp,
17898 const struct stacktrace_ops *ops, void *data,
17899 unsigned long *end, int *graph)
17900 {
17901 struct stack_frame *frame = (struct stack_frame *)bp;
17902
17903- while (valid_stack_ptr(tinfo, stack, sizeof(*stack), end)) {
17904+ while (valid_stack_ptr(stack_start, stack, sizeof(*stack), end)) {
17905 unsigned long addr;
17906
17907 addr = *stack;
17908@@ -105,7 +104,7 @@ print_context_stack(struct thread_info *tinfo,
17909 } else {
17910 ops->address(data, addr, 0);
17911 }
17912- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17913+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17914 }
17915 stack++;
17916 }
17917@@ -114,7 +113,7 @@ print_context_stack(struct thread_info *tinfo,
17918 EXPORT_SYMBOL_GPL(print_context_stack);
17919
17920 unsigned long
17921-print_context_stack_bp(struct thread_info *tinfo,
17922+print_context_stack_bp(struct task_struct *task, void *stack_start,
17923 unsigned long *stack, unsigned long bp,
17924 const struct stacktrace_ops *ops, void *data,
17925 unsigned long *end, int *graph)
17926@@ -122,7 +121,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17927 struct stack_frame *frame = (struct stack_frame *)bp;
17928 unsigned long *ret_addr = &frame->return_address;
17929
17930- while (valid_stack_ptr(tinfo, ret_addr, sizeof(*ret_addr), end)) {
17931+ while (valid_stack_ptr(stack_start, ret_addr, sizeof(*ret_addr), end)) {
17932 unsigned long addr = *ret_addr;
17933
17934 if (!__kernel_text_address(addr))
17935@@ -131,7 +130,7 @@ print_context_stack_bp(struct thread_info *tinfo,
17936 ops->address(data, addr, 1);
17937 frame = frame->next_frame;
17938 ret_addr = &frame->return_address;
17939- print_ftrace_graph_addr(addr, data, ops, tinfo, graph);
17940+ print_ftrace_graph_addr(addr, data, ops, task, graph);
17941 }
17942
17943 return (unsigned long)frame;
17944@@ -219,6 +218,8 @@ unsigned __kprobes long oops_begin(void)
17945 }
17946 EXPORT_SYMBOL_GPL(oops_begin);
17947
17948+extern void gr_handle_kernel_exploit(void);
17949+
17950 void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17951 {
17952 if (regs && kexec_should_crash(current))
17953@@ -240,7 +241,10 @@ void __kprobes oops_end(unsigned long flags, struct pt_regs *regs, int signr)
17954 panic("Fatal exception in interrupt");
17955 if (panic_on_oops)
17956 panic("Fatal exception");
17957- do_exit(signr);
17958+
17959+ gr_handle_kernel_exploit();
17960+
17961+ do_group_exit(signr);
17962 }
17963
17964 int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17965@@ -268,7 +272,7 @@ int __kprobes __die(const char *str, struct pt_regs *regs, long err)
17966 print_modules();
17967 show_regs(regs);
17968 #ifdef CONFIG_X86_32
17969- if (user_mode_vm(regs)) {
17970+ if (user_mode(regs)) {
17971 sp = regs->sp;
17972 ss = regs->ss & 0xffff;
17973 } else {
17974@@ -296,7 +300,7 @@ void die(const char *str, struct pt_regs *regs, long err)
17975 unsigned long flags = oops_begin();
17976 int sig = SIGSEGV;
17977
17978- if (!user_mode_vm(regs))
17979+ if (!user_mode(regs))
17980 report_bug(regs->ip, regs);
17981
17982 if (__die(str, regs, err))
17983diff --git a/arch/x86/kernel/dumpstack_32.c b/arch/x86/kernel/dumpstack_32.c
17984index f2a1770..540657f 100644
17985--- a/arch/x86/kernel/dumpstack_32.c
17986+++ b/arch/x86/kernel/dumpstack_32.c
17987@@ -38,15 +38,13 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
17988 bp = stack_frame(task, regs);
17989
17990 for (;;) {
17991- struct thread_info *context;
17992+ void *stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
17993
17994- context = (struct thread_info *)
17995- ((unsigned long)stack & (~(THREAD_SIZE - 1)));
17996- bp = ops->walk_stack(context, stack, bp, ops, data, NULL, &graph);
17997+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
17998
17999- stack = (unsigned long *)context->previous_esp;
18000- if (!stack)
18001+ if (stack_start == task_stack_page(task))
18002 break;
18003+ stack = *(unsigned long **)stack_start;
18004 if (ops->stack(data, "IRQ") < 0)
18005 break;
18006 touch_nmi_watchdog();
18007@@ -87,27 +85,28 @@ void show_regs(struct pt_regs *regs)
18008 int i;
18009
18010 show_regs_print_info(KERN_EMERG);
18011- __show_regs(regs, !user_mode_vm(regs));
18012+ __show_regs(regs, !user_mode(regs));
18013
18014 /*
18015 * When in-kernel, we also print out the stack and code at the
18016 * time of the fault..
18017 */
18018- if (!user_mode_vm(regs)) {
18019+ if (!user_mode(regs)) {
18020 unsigned int code_prologue = code_bytes * 43 / 64;
18021 unsigned int code_len = code_bytes;
18022 unsigned char c;
18023 u8 *ip;
18024+ unsigned long cs_base = get_desc_base(&get_cpu_gdt_table(0)[(0xffff & regs->cs) >> 3]);
18025
18026 pr_emerg("Stack:\n");
18027 show_stack_log_lvl(NULL, regs, &regs->sp, 0, KERN_EMERG);
18028
18029 pr_emerg("Code:");
18030
18031- ip = (u8 *)regs->ip - code_prologue;
18032+ ip = (u8 *)regs->ip - code_prologue + cs_base;
18033 if (ip < (u8 *)PAGE_OFFSET || probe_kernel_address(ip, c)) {
18034 /* try starting at IP */
18035- ip = (u8 *)regs->ip;
18036+ ip = (u8 *)regs->ip + cs_base;
18037 code_len = code_len - code_prologue + 1;
18038 }
18039 for (i = 0; i < code_len; i++, ip++) {
18040@@ -116,7 +115,7 @@ void show_regs(struct pt_regs *regs)
18041 pr_cont(" Bad EIP value.");
18042 break;
18043 }
18044- if (ip == (u8 *)regs->ip)
18045+ if (ip == (u8 *)regs->ip + cs_base)
18046 pr_cont(" <%02x>", c);
18047 else
18048 pr_cont(" %02x", c);
18049@@ -129,6 +128,7 @@ int is_valid_bugaddr(unsigned long ip)
18050 {
18051 unsigned short ud2;
18052
18053+ ip = ktla_ktva(ip);
18054 if (ip < PAGE_OFFSET)
18055 return 0;
18056 if (probe_kernel_address((unsigned short *)ip, ud2))
18057@@ -136,3 +136,15 @@ int is_valid_bugaddr(unsigned long ip)
18058
18059 return ud2 == 0x0b0f;
18060 }
18061+
18062+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18063+void pax_check_alloca(unsigned long size)
18064+{
18065+ unsigned long sp = (unsigned long)&sp, stack_left;
18066+
18067+ /* all kernel stacks are of the same size */
18068+ stack_left = sp & (THREAD_SIZE - 1);
18069+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18070+}
18071+EXPORT_SYMBOL(pax_check_alloca);
18072+#endif
18073diff --git a/arch/x86/kernel/dumpstack_64.c b/arch/x86/kernel/dumpstack_64.c
18074index addb207..99635fa 100644
18075--- a/arch/x86/kernel/dumpstack_64.c
18076+++ b/arch/x86/kernel/dumpstack_64.c
18077@@ -119,9 +119,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18078 unsigned long *irq_stack_end =
18079 (unsigned long *)per_cpu(irq_stack_ptr, cpu);
18080 unsigned used = 0;
18081- struct thread_info *tinfo;
18082 int graph = 0;
18083 unsigned long dummy;
18084+ void *stack_start;
18085
18086 if (!task)
18087 task = current;
18088@@ -142,10 +142,10 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18089 * current stack address. If the stacks consist of nested
18090 * exceptions
18091 */
18092- tinfo = task_thread_info(task);
18093 for (;;) {
18094 char *id;
18095 unsigned long *estack_end;
18096+
18097 estack_end = in_exception_stack(cpu, (unsigned long)stack,
18098 &used, &id);
18099
18100@@ -153,7 +153,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18101 if (ops->stack(data, id) < 0)
18102 break;
18103
18104- bp = ops->walk_stack(tinfo, stack, bp, ops,
18105+ bp = ops->walk_stack(task, estack_end - EXCEPTION_STKSZ, stack, bp, ops,
18106 data, estack_end, &graph);
18107 ops->stack(data, "<EOE>");
18108 /*
18109@@ -161,6 +161,8 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18110 * second-to-last pointer (index -2 to end) in the
18111 * exception stack:
18112 */
18113+ if ((u16)estack_end[-1] != __KERNEL_DS)
18114+ goto out;
18115 stack = (unsigned long *) estack_end[-2];
18116 continue;
18117 }
18118@@ -172,7 +174,7 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18119 if (in_irq_stack(stack, irq_stack, irq_stack_end)) {
18120 if (ops->stack(data, "IRQ") < 0)
18121 break;
18122- bp = ops->walk_stack(tinfo, stack, bp,
18123+ bp = ops->walk_stack(task, irq_stack, stack, bp,
18124 ops, data, irq_stack_end, &graph);
18125 /*
18126 * We link to the next stack (which would be
18127@@ -191,7 +193,9 @@ void dump_trace(struct task_struct *task, struct pt_regs *regs,
18128 /*
18129 * This handles the process stack:
18130 */
18131- bp = ops->walk_stack(tinfo, stack, bp, ops, data, NULL, &graph);
18132+ stack_start = (void *)((unsigned long)stack & ~(THREAD_SIZE-1));
18133+ bp = ops->walk_stack(task, stack_start, stack, bp, ops, data, NULL, &graph);
18134+out:
18135 put_cpu();
18136 }
18137 EXPORT_SYMBOL(dump_trace);
18138@@ -300,3 +304,50 @@ int is_valid_bugaddr(unsigned long ip)
18139
18140 return ud2 == 0x0b0f;
18141 }
18142+
18143+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18144+void pax_check_alloca(unsigned long size)
18145+{
18146+ unsigned long sp = (unsigned long)&sp, stack_start, stack_end;
18147+ unsigned cpu, used;
18148+ char *id;
18149+
18150+ /* check the process stack first */
18151+ stack_start = (unsigned long)task_stack_page(current);
18152+ stack_end = stack_start + THREAD_SIZE;
18153+ if (likely(stack_start <= sp && sp < stack_end)) {
18154+ unsigned long stack_left = sp & (THREAD_SIZE - 1);
18155+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18156+ return;
18157+ }
18158+
18159+ cpu = get_cpu();
18160+
18161+ /* check the irq stacks */
18162+ stack_end = (unsigned long)per_cpu(irq_stack_ptr, cpu);
18163+ stack_start = stack_end - IRQ_STACK_SIZE;
18164+ if (stack_start <= sp && sp < stack_end) {
18165+ unsigned long stack_left = sp & (IRQ_STACK_SIZE - 1);
18166+ put_cpu();
18167+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18168+ return;
18169+ }
18170+
18171+ /* check the exception stacks */
18172+ used = 0;
18173+ stack_end = (unsigned long)in_exception_stack(cpu, sp, &used, &id);
18174+ stack_start = stack_end - EXCEPTION_STKSZ;
18175+ if (stack_end && stack_start <= sp && sp < stack_end) {
18176+ unsigned long stack_left = sp & (EXCEPTION_STKSZ - 1);
18177+ put_cpu();
18178+ BUG_ON(stack_left < 256 || size >= stack_left - 256);
18179+ return;
18180+ }
18181+
18182+ put_cpu();
18183+
18184+ /* unknown stack */
18185+ BUG();
18186+}
18187+EXPORT_SYMBOL(pax_check_alloca);
18188+#endif
18189diff --git a/arch/x86/kernel/e820.c b/arch/x86/kernel/e820.c
18190index d32abea..74daf4f 100644
18191--- a/arch/x86/kernel/e820.c
18192+++ b/arch/x86/kernel/e820.c
18193@@ -800,8 +800,8 @@ unsigned long __init e820_end_of_low_ram_pfn(void)
18194
18195 static void early_panic(char *msg)
18196 {
18197- early_printk(msg);
18198- panic(msg);
18199+ early_printk("%s", msg);
18200+ panic("%s", msg);
18201 }
18202
18203 static int userdef __initdata;
18204diff --git a/arch/x86/kernel/early_printk.c b/arch/x86/kernel/early_printk.c
18205index d15f575..d692043 100644
18206--- a/arch/x86/kernel/early_printk.c
18207+++ b/arch/x86/kernel/early_printk.c
18208@@ -7,6 +7,7 @@
18209 #include <linux/pci_regs.h>
18210 #include <linux/pci_ids.h>
18211 #include <linux/errno.h>
18212+#include <linux/sched.h>
18213 #include <asm/io.h>
18214 #include <asm/processor.h>
18215 #include <asm/fcntl.h>
18216diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
18217index 8f3e2de..caecc4e 100644
18218--- a/arch/x86/kernel/entry_32.S
18219+++ b/arch/x86/kernel/entry_32.S
18220@@ -177,13 +177,153 @@
18221 /*CFI_REL_OFFSET gs, PT_GS*/
18222 .endm
18223 .macro SET_KERNEL_GS reg
18224+
18225+#ifdef CONFIG_CC_STACKPROTECTOR
18226 movl $(__KERNEL_STACK_CANARY), \reg
18227+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
18228+ movl $(__USER_DS), \reg
18229+#else
18230+ xorl \reg, \reg
18231+#endif
18232+
18233 movl \reg, %gs
18234 .endm
18235
18236 #endif /* CONFIG_X86_32_LAZY_GS */
18237
18238-.macro SAVE_ALL
18239+.macro pax_enter_kernel
18240+#ifdef CONFIG_PAX_KERNEXEC
18241+ call pax_enter_kernel
18242+#endif
18243+.endm
18244+
18245+.macro pax_exit_kernel
18246+#ifdef CONFIG_PAX_KERNEXEC
18247+ call pax_exit_kernel
18248+#endif
18249+.endm
18250+
18251+#ifdef CONFIG_PAX_KERNEXEC
18252+ENTRY(pax_enter_kernel)
18253+#ifdef CONFIG_PARAVIRT
18254+ pushl %eax
18255+ pushl %ecx
18256+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0)
18257+ mov %eax, %esi
18258+#else
18259+ mov %cr0, %esi
18260+#endif
18261+ bts $16, %esi
18262+ jnc 1f
18263+ mov %cs, %esi
18264+ cmp $__KERNEL_CS, %esi
18265+ jz 3f
18266+ ljmp $__KERNEL_CS, $3f
18267+1: ljmp $__KERNEXEC_KERNEL_CS, $2f
18268+2:
18269+#ifdef CONFIG_PARAVIRT
18270+ mov %esi, %eax
18271+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0)
18272+#else
18273+ mov %esi, %cr0
18274+#endif
18275+3:
18276+#ifdef CONFIG_PARAVIRT
18277+ popl %ecx
18278+ popl %eax
18279+#endif
18280+ ret
18281+ENDPROC(pax_enter_kernel)
18282+
18283+ENTRY(pax_exit_kernel)
18284+#ifdef CONFIG_PARAVIRT
18285+ pushl %eax
18286+ pushl %ecx
18287+#endif
18288+ mov %cs, %esi
18289+ cmp $__KERNEXEC_KERNEL_CS, %esi
18290+ jnz 2f
18291+#ifdef CONFIG_PARAVIRT
18292+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_read_cr0);
18293+ mov %eax, %esi
18294+#else
18295+ mov %cr0, %esi
18296+#endif
18297+ btr $16, %esi
18298+ ljmp $__KERNEL_CS, $1f
18299+1:
18300+#ifdef CONFIG_PARAVIRT
18301+ mov %esi, %eax
18302+ call PARA_INDIRECT(pv_cpu_ops+PV_CPU_write_cr0);
18303+#else
18304+ mov %esi, %cr0
18305+#endif
18306+2:
18307+#ifdef CONFIG_PARAVIRT
18308+ popl %ecx
18309+ popl %eax
18310+#endif
18311+ ret
18312+ENDPROC(pax_exit_kernel)
18313+#endif
18314+
18315+ .macro pax_erase_kstack
18316+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18317+ call pax_erase_kstack
18318+#endif
18319+ .endm
18320+
18321+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
18322+/*
18323+ * ebp: thread_info
18324+ */
18325+ENTRY(pax_erase_kstack)
18326+ pushl %edi
18327+ pushl %ecx
18328+ pushl %eax
18329+
18330+ mov TI_lowest_stack(%ebp), %edi
18331+ mov $-0xBEEF, %eax
18332+ std
18333+
18334+1: mov %edi, %ecx
18335+ and $THREAD_SIZE_asm - 1, %ecx
18336+ shr $2, %ecx
18337+ repne scasl
18338+ jecxz 2f
18339+
18340+ cmp $2*16, %ecx
18341+ jc 2f
18342+
18343+ mov $2*16, %ecx
18344+ repe scasl
18345+ jecxz 2f
18346+ jne 1b
18347+
18348+2: cld
18349+ mov %esp, %ecx
18350+ sub %edi, %ecx
18351+
18352+ cmp $THREAD_SIZE_asm, %ecx
18353+ jb 3f
18354+ ud2
18355+3:
18356+
18357+ shr $2, %ecx
18358+ rep stosl
18359+
18360+ mov TI_task_thread_sp0(%ebp), %edi
18361+ sub $128, %edi
18362+ mov %edi, TI_lowest_stack(%ebp)
18363+
18364+ popl %eax
18365+ popl %ecx
18366+ popl %edi
18367+ ret
18368+ENDPROC(pax_erase_kstack)
18369+#endif
18370+
18371+.macro __SAVE_ALL _DS
18372 cld
18373 PUSH_GS
18374 pushl_cfi %fs
18375@@ -206,7 +346,7 @@
18376 CFI_REL_OFFSET ecx, 0
18377 pushl_cfi %ebx
18378 CFI_REL_OFFSET ebx, 0
18379- movl $(__USER_DS), %edx
18380+ movl $\_DS, %edx
18381 movl %edx, %ds
18382 movl %edx, %es
18383 movl $(__KERNEL_PERCPU), %edx
18384@@ -214,6 +354,15 @@
18385 SET_KERNEL_GS %edx
18386 .endm
18387
18388+.macro SAVE_ALL
18389+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
18390+ __SAVE_ALL __KERNEL_DS
18391+ pax_enter_kernel
18392+#else
18393+ __SAVE_ALL __USER_DS
18394+#endif
18395+.endm
18396+
18397 .macro RESTORE_INT_REGS
18398 popl_cfi %ebx
18399 CFI_RESTORE ebx
18400@@ -297,7 +446,7 @@ ENTRY(ret_from_fork)
18401 popfl_cfi
18402 jmp syscall_exit
18403 CFI_ENDPROC
18404-END(ret_from_fork)
18405+ENDPROC(ret_from_fork)
18406
18407 ENTRY(ret_from_kernel_thread)
18408 CFI_STARTPROC
18409@@ -344,7 +493,15 @@ ret_from_intr:
18410 andl $SEGMENT_RPL_MASK, %eax
18411 #endif
18412 cmpl $USER_RPL, %eax
18413+
18414+#ifdef CONFIG_PAX_KERNEXEC
18415+ jae resume_userspace
18416+
18417+ pax_exit_kernel
18418+ jmp resume_kernel
18419+#else
18420 jb resume_kernel # not returning to v8086 or userspace
18421+#endif
18422
18423 ENTRY(resume_userspace)
18424 LOCKDEP_SYS_EXIT
18425@@ -356,8 +513,8 @@ ENTRY(resume_userspace)
18426 andl $_TIF_WORK_MASK, %ecx # is there any work to be done on
18427 # int/exception return?
18428 jne work_pending
18429- jmp restore_all
18430-END(ret_from_exception)
18431+ jmp restore_all_pax
18432+ENDPROC(ret_from_exception)
18433
18434 #ifdef CONFIG_PREEMPT
18435 ENTRY(resume_kernel)
18436@@ -372,7 +529,7 @@ need_resched:
18437 jz restore_all
18438 call preempt_schedule_irq
18439 jmp need_resched
18440-END(resume_kernel)
18441+ENDPROC(resume_kernel)
18442 #endif
18443 CFI_ENDPROC
18444 /*
18445@@ -406,30 +563,45 @@ sysenter_past_esp:
18446 /*CFI_REL_OFFSET cs, 0*/
18447 /*
18448 * Push current_thread_info()->sysenter_return to the stack.
18449- * A tiny bit of offset fixup is necessary - 4*4 means the 4 words
18450- * pushed above; +8 corresponds to copy_thread's esp0 setting.
18451 */
18452- pushl_cfi ((TI_sysenter_return)-THREAD_SIZE+8+4*4)(%esp)
18453+ pushl_cfi $0
18454 CFI_REL_OFFSET eip, 0
18455
18456 pushl_cfi %eax
18457 SAVE_ALL
18458+ GET_THREAD_INFO(%ebp)
18459+ movl TI_sysenter_return(%ebp),%ebp
18460+ movl %ebp,PT_EIP(%esp)
18461 ENABLE_INTERRUPTS(CLBR_NONE)
18462
18463 /*
18464 * Load the potential sixth argument from user stack.
18465 * Careful about security.
18466 */
18467+ movl PT_OLDESP(%esp),%ebp
18468+
18469+#ifdef CONFIG_PAX_MEMORY_UDEREF
18470+ mov PT_OLDSS(%esp),%ds
18471+1: movl %ds:(%ebp),%ebp
18472+ push %ss
18473+ pop %ds
18474+#else
18475 cmpl $__PAGE_OFFSET-3,%ebp
18476 jae syscall_fault
18477 ASM_STAC
18478 1: movl (%ebp),%ebp
18479 ASM_CLAC
18480+#endif
18481+
18482 movl %ebp,PT_EBP(%esp)
18483 _ASM_EXTABLE(1b,syscall_fault)
18484
18485 GET_THREAD_INFO(%ebp)
18486
18487+#ifdef CONFIG_PAX_RANDKSTACK
18488+ pax_erase_kstack
18489+#endif
18490+
18491 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18492 jnz sysenter_audit
18493 sysenter_do_call:
18494@@ -444,12 +616,24 @@ sysenter_do_call:
18495 testl $_TIF_ALLWORK_MASK, %ecx
18496 jne sysexit_audit
18497 sysenter_exit:
18498+
18499+#ifdef CONFIG_PAX_RANDKSTACK
18500+ pushl_cfi %eax
18501+ movl %esp, %eax
18502+ call pax_randomize_kstack
18503+ popl_cfi %eax
18504+#endif
18505+
18506+ pax_erase_kstack
18507+
18508 /* if something modifies registers it must also disable sysexit */
18509 movl PT_EIP(%esp), %edx
18510 movl PT_OLDESP(%esp), %ecx
18511 xorl %ebp,%ebp
18512 TRACE_IRQS_ON
18513 1: mov PT_FS(%esp), %fs
18514+2: mov PT_DS(%esp), %ds
18515+3: mov PT_ES(%esp), %es
18516 PTGS_TO_GS
18517 ENABLE_INTERRUPTS_SYSEXIT
18518
18519@@ -466,6 +650,9 @@ sysenter_audit:
18520 movl %eax,%edx /* 2nd arg: syscall number */
18521 movl $AUDIT_ARCH_I386,%eax /* 1st arg: audit arch */
18522 call __audit_syscall_entry
18523+
18524+ pax_erase_kstack
18525+
18526 pushl_cfi %ebx
18527 movl PT_EAX(%esp),%eax /* reload syscall number */
18528 jmp sysenter_do_call
18529@@ -491,10 +678,16 @@ sysexit_audit:
18530
18531 CFI_ENDPROC
18532 .pushsection .fixup,"ax"
18533-2: movl $0,PT_FS(%esp)
18534+4: movl $0,PT_FS(%esp)
18535+ jmp 1b
18536+5: movl $0,PT_DS(%esp)
18537+ jmp 1b
18538+6: movl $0,PT_ES(%esp)
18539 jmp 1b
18540 .popsection
18541- _ASM_EXTABLE(1b,2b)
18542+ _ASM_EXTABLE(1b,4b)
18543+ _ASM_EXTABLE(2b,5b)
18544+ _ASM_EXTABLE(3b,6b)
18545 PTGS_TO_GS_EX
18546 ENDPROC(ia32_sysenter_target)
18547
18548@@ -509,6 +702,11 @@ ENTRY(system_call)
18549 pushl_cfi %eax # save orig_eax
18550 SAVE_ALL
18551 GET_THREAD_INFO(%ebp)
18552+
18553+#ifdef CONFIG_PAX_RANDKSTACK
18554+ pax_erase_kstack
18555+#endif
18556+
18557 # system call tracing in operation / emulation
18558 testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
18559 jnz syscall_trace_entry
18560@@ -527,6 +725,15 @@ syscall_exit:
18561 testl $_TIF_ALLWORK_MASK, %ecx # current->work
18562 jne syscall_exit_work
18563
18564+restore_all_pax:
18565+
18566+#ifdef CONFIG_PAX_RANDKSTACK
18567+ movl %esp, %eax
18568+ call pax_randomize_kstack
18569+#endif
18570+
18571+ pax_erase_kstack
18572+
18573 restore_all:
18574 TRACE_IRQS_IRET
18575 restore_all_notrace:
18576@@ -583,14 +790,34 @@ ldt_ss:
18577 * compensating for the offset by changing to the ESPFIX segment with
18578 * a base address that matches for the difference.
18579 */
18580-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
18581+#define GDT_ESPFIX_SS (GDT_ENTRY_ESPFIX_SS * 8)(%ebx)
18582 mov %esp, %edx /* load kernel esp */
18583 mov PT_OLDESP(%esp), %eax /* load userspace esp */
18584 mov %dx, %ax /* eax: new kernel esp */
18585 sub %eax, %edx /* offset (low word is 0) */
18586+#ifdef CONFIG_SMP
18587+ movl PER_CPU_VAR(cpu_number), %ebx
18588+ shll $PAGE_SHIFT_asm, %ebx
18589+ addl $cpu_gdt_table, %ebx
18590+#else
18591+ movl $cpu_gdt_table, %ebx
18592+#endif
18593 shr $16, %edx
18594- mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
18595- mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
18596+
18597+#ifdef CONFIG_PAX_KERNEXEC
18598+ mov %cr0, %esi
18599+ btr $16, %esi
18600+ mov %esi, %cr0
18601+#endif
18602+
18603+ mov %dl, 4 + GDT_ESPFIX_SS /* bits 16..23 */
18604+ mov %dh, 7 + GDT_ESPFIX_SS /* bits 24..31 */
18605+
18606+#ifdef CONFIG_PAX_KERNEXEC
18607+ bts $16, %esi
18608+ mov %esi, %cr0
18609+#endif
18610+
18611 pushl_cfi $__ESPFIX_SS
18612 pushl_cfi %eax /* new kernel esp */
18613 /* Disable interrupts, but do not irqtrace this section: we
18614@@ -619,20 +846,18 @@ work_resched:
18615 movl TI_flags(%ebp), %ecx
18616 andl $_TIF_WORK_MASK, %ecx # is there any work to be done other
18617 # than syscall tracing?
18618- jz restore_all
18619+ jz restore_all_pax
18620 testb $_TIF_NEED_RESCHED, %cl
18621 jnz work_resched
18622
18623 work_notifysig: # deal with pending signals and
18624 # notify-resume requests
18625+ movl %esp, %eax
18626 #ifdef CONFIG_VM86
18627 testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
18628- movl %esp, %eax
18629 jne work_notifysig_v86 # returning to kernel-space or
18630 # vm86-space
18631 1:
18632-#else
18633- movl %esp, %eax
18634 #endif
18635 TRACE_IRQS_ON
18636 ENABLE_INTERRUPTS(CLBR_NONE)
18637@@ -653,7 +878,7 @@ work_notifysig_v86:
18638 movl %eax, %esp
18639 jmp 1b
18640 #endif
18641-END(work_pending)
18642+ENDPROC(work_pending)
18643
18644 # perform syscall exit tracing
18645 ALIGN
18646@@ -661,11 +886,14 @@ syscall_trace_entry:
18647 movl $-ENOSYS,PT_EAX(%esp)
18648 movl %esp, %eax
18649 call syscall_trace_enter
18650+
18651+ pax_erase_kstack
18652+
18653 /* What it returned is what we'll actually use. */
18654 cmpl $(NR_syscalls), %eax
18655 jnae syscall_call
18656 jmp syscall_exit
18657-END(syscall_trace_entry)
18658+ENDPROC(syscall_trace_entry)
18659
18660 # perform syscall exit tracing
18661 ALIGN
18662@@ -678,21 +906,25 @@ syscall_exit_work:
18663 movl %esp, %eax
18664 call syscall_trace_leave
18665 jmp resume_userspace
18666-END(syscall_exit_work)
18667+ENDPROC(syscall_exit_work)
18668 CFI_ENDPROC
18669
18670 RING0_INT_FRAME # can't unwind into user space anyway
18671 syscall_fault:
18672+#ifdef CONFIG_PAX_MEMORY_UDEREF
18673+ push %ss
18674+ pop %ds
18675+#endif
18676 ASM_CLAC
18677 GET_THREAD_INFO(%ebp)
18678 movl $-EFAULT,PT_EAX(%esp)
18679 jmp resume_userspace
18680-END(syscall_fault)
18681+ENDPROC(syscall_fault)
18682
18683 syscall_badsys:
18684 movl $-ENOSYS,PT_EAX(%esp)
18685 jmp resume_userspace
18686-END(syscall_badsys)
18687+ENDPROC(syscall_badsys)
18688 CFI_ENDPROC
18689 /*
18690 * End of kprobes section
18691@@ -708,8 +940,15 @@ END(syscall_badsys)
18692 * normal stack and adjusts ESP with the matching offset.
18693 */
18694 /* fixup the stack */
18695- mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
18696- mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
18697+#ifdef CONFIG_SMP
18698+ movl PER_CPU_VAR(cpu_number), %ebx
18699+ shll $PAGE_SHIFT_asm, %ebx
18700+ addl $cpu_gdt_table, %ebx
18701+#else
18702+ movl $cpu_gdt_table, %ebx
18703+#endif
18704+ mov 4 + GDT_ESPFIX_SS, %al /* bits 16..23 */
18705+ mov 7 + GDT_ESPFIX_SS, %ah /* bits 24..31 */
18706 shl $16, %eax
18707 addl %esp, %eax /* the adjusted stack pointer */
18708 pushl_cfi $__KERNEL_DS
18709@@ -762,7 +1001,7 @@ vector=vector+1
18710 .endr
18711 2: jmp common_interrupt
18712 .endr
18713-END(irq_entries_start)
18714+ENDPROC(irq_entries_start)
18715
18716 .previous
18717 END(interrupt)
18718@@ -813,7 +1052,7 @@ ENTRY(coprocessor_error)
18719 pushl_cfi $do_coprocessor_error
18720 jmp error_code
18721 CFI_ENDPROC
18722-END(coprocessor_error)
18723+ENDPROC(coprocessor_error)
18724
18725 ENTRY(simd_coprocessor_error)
18726 RING0_INT_FRAME
18727@@ -835,7 +1074,7 @@ ENTRY(simd_coprocessor_error)
18728 #endif
18729 jmp error_code
18730 CFI_ENDPROC
18731-END(simd_coprocessor_error)
18732+ENDPROC(simd_coprocessor_error)
18733
18734 ENTRY(device_not_available)
18735 RING0_INT_FRAME
18736@@ -844,18 +1083,18 @@ ENTRY(device_not_available)
18737 pushl_cfi $do_device_not_available
18738 jmp error_code
18739 CFI_ENDPROC
18740-END(device_not_available)
18741+ENDPROC(device_not_available)
18742
18743 #ifdef CONFIG_PARAVIRT
18744 ENTRY(native_iret)
18745 iret
18746 _ASM_EXTABLE(native_iret, iret_exc)
18747-END(native_iret)
18748+ENDPROC(native_iret)
18749
18750 ENTRY(native_irq_enable_sysexit)
18751 sti
18752 sysexit
18753-END(native_irq_enable_sysexit)
18754+ENDPROC(native_irq_enable_sysexit)
18755 #endif
18756
18757 ENTRY(overflow)
18758@@ -865,7 +1104,7 @@ ENTRY(overflow)
18759 pushl_cfi $do_overflow
18760 jmp error_code
18761 CFI_ENDPROC
18762-END(overflow)
18763+ENDPROC(overflow)
18764
18765 ENTRY(bounds)
18766 RING0_INT_FRAME
18767@@ -874,7 +1113,7 @@ ENTRY(bounds)
18768 pushl_cfi $do_bounds
18769 jmp error_code
18770 CFI_ENDPROC
18771-END(bounds)
18772+ENDPROC(bounds)
18773
18774 ENTRY(invalid_op)
18775 RING0_INT_FRAME
18776@@ -883,7 +1122,7 @@ ENTRY(invalid_op)
18777 pushl_cfi $do_invalid_op
18778 jmp error_code
18779 CFI_ENDPROC
18780-END(invalid_op)
18781+ENDPROC(invalid_op)
18782
18783 ENTRY(coprocessor_segment_overrun)
18784 RING0_INT_FRAME
18785@@ -892,7 +1131,7 @@ ENTRY(coprocessor_segment_overrun)
18786 pushl_cfi $do_coprocessor_segment_overrun
18787 jmp error_code
18788 CFI_ENDPROC
18789-END(coprocessor_segment_overrun)
18790+ENDPROC(coprocessor_segment_overrun)
18791
18792 ENTRY(invalid_TSS)
18793 RING0_EC_FRAME
18794@@ -900,7 +1139,7 @@ ENTRY(invalid_TSS)
18795 pushl_cfi $do_invalid_TSS
18796 jmp error_code
18797 CFI_ENDPROC
18798-END(invalid_TSS)
18799+ENDPROC(invalid_TSS)
18800
18801 ENTRY(segment_not_present)
18802 RING0_EC_FRAME
18803@@ -908,7 +1147,7 @@ ENTRY(segment_not_present)
18804 pushl_cfi $do_segment_not_present
18805 jmp error_code
18806 CFI_ENDPROC
18807-END(segment_not_present)
18808+ENDPROC(segment_not_present)
18809
18810 ENTRY(stack_segment)
18811 RING0_EC_FRAME
18812@@ -916,7 +1155,7 @@ ENTRY(stack_segment)
18813 pushl_cfi $do_stack_segment
18814 jmp error_code
18815 CFI_ENDPROC
18816-END(stack_segment)
18817+ENDPROC(stack_segment)
18818
18819 ENTRY(alignment_check)
18820 RING0_EC_FRAME
18821@@ -924,7 +1163,7 @@ ENTRY(alignment_check)
18822 pushl_cfi $do_alignment_check
18823 jmp error_code
18824 CFI_ENDPROC
18825-END(alignment_check)
18826+ENDPROC(alignment_check)
18827
18828 ENTRY(divide_error)
18829 RING0_INT_FRAME
18830@@ -933,7 +1172,7 @@ ENTRY(divide_error)
18831 pushl_cfi $do_divide_error
18832 jmp error_code
18833 CFI_ENDPROC
18834-END(divide_error)
18835+ENDPROC(divide_error)
18836
18837 #ifdef CONFIG_X86_MCE
18838 ENTRY(machine_check)
18839@@ -943,7 +1182,7 @@ ENTRY(machine_check)
18840 pushl_cfi machine_check_vector
18841 jmp error_code
18842 CFI_ENDPROC
18843-END(machine_check)
18844+ENDPROC(machine_check)
18845 #endif
18846
18847 ENTRY(spurious_interrupt_bug)
18848@@ -953,7 +1192,7 @@ ENTRY(spurious_interrupt_bug)
18849 pushl_cfi $do_spurious_interrupt_bug
18850 jmp error_code
18851 CFI_ENDPROC
18852-END(spurious_interrupt_bug)
18853+ENDPROC(spurious_interrupt_bug)
18854 /*
18855 * End of kprobes section
18856 */
18857@@ -1063,7 +1302,7 @@ BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
18858
18859 ENTRY(mcount)
18860 ret
18861-END(mcount)
18862+ENDPROC(mcount)
18863
18864 ENTRY(ftrace_caller)
18865 cmpl $0, function_trace_stop
18866@@ -1096,7 +1335,7 @@ ftrace_graph_call:
18867 .globl ftrace_stub
18868 ftrace_stub:
18869 ret
18870-END(ftrace_caller)
18871+ENDPROC(ftrace_caller)
18872
18873 ENTRY(ftrace_regs_caller)
18874 pushf /* push flags before compare (in cs location) */
18875@@ -1197,7 +1436,7 @@ trace:
18876 popl %ecx
18877 popl %eax
18878 jmp ftrace_stub
18879-END(mcount)
18880+ENDPROC(mcount)
18881 #endif /* CONFIG_DYNAMIC_FTRACE */
18882 #endif /* CONFIG_FUNCTION_TRACER */
18883
18884@@ -1215,7 +1454,7 @@ ENTRY(ftrace_graph_caller)
18885 popl %ecx
18886 popl %eax
18887 ret
18888-END(ftrace_graph_caller)
18889+ENDPROC(ftrace_graph_caller)
18890
18891 .globl return_to_handler
18892 return_to_handler:
18893@@ -1271,15 +1510,18 @@ error_code:
18894 movl $-1, PT_ORIG_EAX(%esp) # no syscall to restart
18895 REG_TO_PTGS %ecx
18896 SET_KERNEL_GS %ecx
18897- movl $(__USER_DS), %ecx
18898+ movl $(__KERNEL_DS), %ecx
18899 movl %ecx, %ds
18900 movl %ecx, %es
18901+
18902+ pax_enter_kernel
18903+
18904 TRACE_IRQS_OFF
18905 movl %esp,%eax # pt_regs pointer
18906 call *%edi
18907 jmp ret_from_exception
18908 CFI_ENDPROC
18909-END(page_fault)
18910+ENDPROC(page_fault)
18911
18912 /*
18913 * Debug traps and NMI can happen at the one SYSENTER instruction
18914@@ -1322,7 +1564,7 @@ debug_stack_correct:
18915 call do_debug
18916 jmp ret_from_exception
18917 CFI_ENDPROC
18918-END(debug)
18919+ENDPROC(debug)
18920
18921 /*
18922 * NMI is doubly nasty. It can happen _while_ we're handling
18923@@ -1360,6 +1602,9 @@ nmi_stack_correct:
18924 xorl %edx,%edx # zero error code
18925 movl %esp,%eax # pt_regs pointer
18926 call do_nmi
18927+
18928+ pax_exit_kernel
18929+
18930 jmp restore_all_notrace
18931 CFI_ENDPROC
18932
18933@@ -1396,12 +1641,15 @@ nmi_espfix_stack:
18934 FIXUP_ESPFIX_STACK # %eax == %esp
18935 xorl %edx,%edx # zero error code
18936 call do_nmi
18937+
18938+ pax_exit_kernel
18939+
18940 RESTORE_REGS
18941 lss 12+4(%esp), %esp # back to espfix stack
18942 CFI_ADJUST_CFA_OFFSET -24
18943 jmp irq_return
18944 CFI_ENDPROC
18945-END(nmi)
18946+ENDPROC(nmi)
18947
18948 ENTRY(int3)
18949 RING0_INT_FRAME
18950@@ -1414,14 +1662,14 @@ ENTRY(int3)
18951 call do_int3
18952 jmp ret_from_exception
18953 CFI_ENDPROC
18954-END(int3)
18955+ENDPROC(int3)
18956
18957 ENTRY(general_protection)
18958 RING0_EC_FRAME
18959 pushl_cfi $do_general_protection
18960 jmp error_code
18961 CFI_ENDPROC
18962-END(general_protection)
18963+ENDPROC(general_protection)
18964
18965 #ifdef CONFIG_KVM_GUEST
18966 ENTRY(async_page_fault)
18967@@ -1430,7 +1678,7 @@ ENTRY(async_page_fault)
18968 pushl_cfi $do_async_page_fault
18969 jmp error_code
18970 CFI_ENDPROC
18971-END(async_page_fault)
18972+ENDPROC(async_page_fault)
18973 #endif
18974
18975 /*
18976diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
18977index 7272089..6204f9c5 100644
18978--- a/arch/x86/kernel/entry_64.S
18979+++ b/arch/x86/kernel/entry_64.S
18980@@ -59,6 +59,8 @@
18981 #include <asm/context_tracking.h>
18982 #include <asm/smap.h>
18983 #include <linux/err.h>
18984+#include <asm/pgtable.h>
18985+#include <asm/alternative-asm.h>
18986
18987 /* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this. */
18988 #include <linux/elf-em.h>
18989@@ -80,8 +82,9 @@
18990 #ifdef CONFIG_DYNAMIC_FTRACE
18991
18992 ENTRY(function_hook)
18993+ pax_force_retaddr
18994 retq
18995-END(function_hook)
18996+ENDPROC(function_hook)
18997
18998 /* skip is set if stack has been adjusted */
18999 .macro ftrace_caller_setup skip=0
19000@@ -122,8 +125,9 @@ GLOBAL(ftrace_graph_call)
19001 #endif
19002
19003 GLOBAL(ftrace_stub)
19004+ pax_force_retaddr
19005 retq
19006-END(ftrace_caller)
19007+ENDPROC(ftrace_caller)
19008
19009 ENTRY(ftrace_regs_caller)
19010 /* Save the current flags before compare (in SS location)*/
19011@@ -191,7 +195,7 @@ ftrace_restore_flags:
19012 popfq
19013 jmp ftrace_stub
19014
19015-END(ftrace_regs_caller)
19016+ENDPROC(ftrace_regs_caller)
19017
19018
19019 #else /* ! CONFIG_DYNAMIC_FTRACE */
19020@@ -212,6 +216,7 @@ ENTRY(function_hook)
19021 #endif
19022
19023 GLOBAL(ftrace_stub)
19024+ pax_force_retaddr
19025 retq
19026
19027 trace:
19028@@ -225,12 +230,13 @@ trace:
19029 #endif
19030 subq $MCOUNT_INSN_SIZE, %rdi
19031
19032+ pax_force_fptr ftrace_trace_function
19033 call *ftrace_trace_function
19034
19035 MCOUNT_RESTORE_FRAME
19036
19037 jmp ftrace_stub
19038-END(function_hook)
19039+ENDPROC(function_hook)
19040 #endif /* CONFIG_DYNAMIC_FTRACE */
19041 #endif /* CONFIG_FUNCTION_TRACER */
19042
19043@@ -252,8 +258,9 @@ ENTRY(ftrace_graph_caller)
19044
19045 MCOUNT_RESTORE_FRAME
19046
19047+ pax_force_retaddr
19048 retq
19049-END(ftrace_graph_caller)
19050+ENDPROC(ftrace_graph_caller)
19051
19052 GLOBAL(return_to_handler)
19053 subq $24, %rsp
19054@@ -269,7 +276,9 @@ GLOBAL(return_to_handler)
19055 movq 8(%rsp), %rdx
19056 movq (%rsp), %rax
19057 addq $24, %rsp
19058+ pax_force_fptr %rdi
19059 jmp *%rdi
19060+ENDPROC(return_to_handler)
19061 #endif
19062
19063
19064@@ -284,6 +293,309 @@ ENTRY(native_usergs_sysret64)
19065 ENDPROC(native_usergs_sysret64)
19066 #endif /* CONFIG_PARAVIRT */
19067
19068+ .macro ljmpq sel, off
19069+#if defined(CONFIG_MPSC) || defined(CONFIG_MCORE2) || defined (CONFIG_MATOM)
19070+ .byte 0x48; ljmp *1234f(%rip)
19071+ .pushsection .rodata
19072+ .align 16
19073+ 1234: .quad \off; .word \sel
19074+ .popsection
19075+#else
19076+ pushq $\sel
19077+ pushq $\off
19078+ lretq
19079+#endif
19080+ .endm
19081+
19082+ .macro pax_enter_kernel
19083+ pax_set_fptr_mask
19084+#ifdef CONFIG_PAX_KERNEXEC
19085+ call pax_enter_kernel
19086+#endif
19087+ .endm
19088+
19089+ .macro pax_exit_kernel
19090+#ifdef CONFIG_PAX_KERNEXEC
19091+ call pax_exit_kernel
19092+#endif
19093+ .endm
19094+
19095+#ifdef CONFIG_PAX_KERNEXEC
19096+ENTRY(pax_enter_kernel)
19097+ pushq %rdi
19098+
19099+#ifdef CONFIG_PARAVIRT
19100+ PV_SAVE_REGS(CLBR_RDI)
19101+#endif
19102+
19103+ GET_CR0_INTO_RDI
19104+ bts $16,%rdi
19105+ jnc 3f
19106+ mov %cs,%edi
19107+ cmp $__KERNEL_CS,%edi
19108+ jnz 2f
19109+1:
19110+
19111+#ifdef CONFIG_PARAVIRT
19112+ PV_RESTORE_REGS(CLBR_RDI)
19113+#endif
19114+
19115+ popq %rdi
19116+ pax_force_retaddr
19117+ retq
19118+
19119+2: ljmpq __KERNEL_CS,1b
19120+3: ljmpq __KERNEXEC_KERNEL_CS,4f
19121+4: SET_RDI_INTO_CR0
19122+ jmp 1b
19123+ENDPROC(pax_enter_kernel)
19124+
19125+ENTRY(pax_exit_kernel)
19126+ pushq %rdi
19127+
19128+#ifdef CONFIG_PARAVIRT
19129+ PV_SAVE_REGS(CLBR_RDI)
19130+#endif
19131+
19132+ mov %cs,%rdi
19133+ cmp $__KERNEXEC_KERNEL_CS,%edi
19134+ jz 2f
19135+ GET_CR0_INTO_RDI
19136+ bts $16,%rdi
19137+ jnc 4f
19138+1:
19139+
19140+#ifdef CONFIG_PARAVIRT
19141+ PV_RESTORE_REGS(CLBR_RDI);
19142+#endif
19143+
19144+ popq %rdi
19145+ pax_force_retaddr
19146+ retq
19147+
19148+2: GET_CR0_INTO_RDI
19149+ btr $16,%rdi
19150+ jnc 4f
19151+ ljmpq __KERNEL_CS,3f
19152+3: SET_RDI_INTO_CR0
19153+ jmp 1b
19154+4: ud2
19155+ jmp 4b
19156+ENDPROC(pax_exit_kernel)
19157+#endif
19158+
19159+ .macro pax_enter_kernel_user
19160+ pax_set_fptr_mask
19161+#ifdef CONFIG_PAX_MEMORY_UDEREF
19162+ call pax_enter_kernel_user
19163+#endif
19164+ .endm
19165+
19166+ .macro pax_exit_kernel_user
19167+#ifdef CONFIG_PAX_MEMORY_UDEREF
19168+ call pax_exit_kernel_user
19169+#endif
19170+#ifdef CONFIG_PAX_RANDKSTACK
19171+ pushq %rax
19172+ pushq %r11
19173+ call pax_randomize_kstack
19174+ popq %r11
19175+ popq %rax
19176+#endif
19177+ .endm
19178+
19179+#ifdef CONFIG_PAX_MEMORY_UDEREF
19180+ENTRY(pax_enter_kernel_user)
19181+ pushq %rdi
19182+ pushq %rbx
19183+
19184+#ifdef CONFIG_PARAVIRT
19185+ PV_SAVE_REGS(CLBR_RDI)
19186+#endif
19187+
19188+ GET_CR3_INTO_RDI
19189+ mov %rdi,%rbx
19190+ add $__START_KERNEL_map,%rbx
19191+ sub phys_base(%rip),%rbx
19192+
19193+#ifdef CONFIG_PARAVIRT
19194+ cmpl $0, pv_info+PARAVIRT_enabled
19195+ jz 1f
19196+ pushq %rdi
19197+ i = 0
19198+ .rept USER_PGD_PTRS
19199+ mov i*8(%rbx),%rsi
19200+ mov $0,%sil
19201+ lea i*8(%rbx),%rdi
19202+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19203+ i = i + 1
19204+ .endr
19205+ popq %rdi
19206+ jmp 2f
19207+1:
19208+#endif
19209+
19210+ i = 0
19211+ .rept USER_PGD_PTRS
19212+ movb $0,i*8(%rbx)
19213+ i = i + 1
19214+ .endr
19215+
19216+#ifdef CONFIG_PARAVIRT
19217+2:
19218+#endif
19219+ SET_RDI_INTO_CR3
19220+
19221+#ifdef CONFIG_PAX_KERNEXEC
19222+ GET_CR0_INTO_RDI
19223+ bts $16,%rdi
19224+ SET_RDI_INTO_CR0
19225+#endif
19226+
19227+#ifdef CONFIG_PARAVIRT
19228+ PV_RESTORE_REGS(CLBR_RDI)
19229+#endif
19230+
19231+ popq %rbx
19232+ popq %rdi
19233+ pax_force_retaddr
19234+ retq
19235+ENDPROC(pax_enter_kernel_user)
19236+
19237+ENTRY(pax_exit_kernel_user)
19238+ pushq %rdi
19239+ pushq %rbx
19240+
19241+#ifdef CONFIG_PARAVIRT
19242+ PV_SAVE_REGS(CLBR_RDI)
19243+#endif
19244+
19245+#ifdef CONFIG_PAX_KERNEXEC
19246+ GET_CR0_INTO_RDI
19247+ btr $16,%rdi
19248+ jnc 3f
19249+ SET_RDI_INTO_CR0
19250+#endif
19251+
19252+ GET_CR3_INTO_RDI
19253+ mov %rdi,%rbx
19254+ add $__START_KERNEL_map,%rbx
19255+ sub phys_base(%rip),%rbx
19256+
19257+#ifdef CONFIG_PARAVIRT
19258+ cmpl $0, pv_info+PARAVIRT_enabled
19259+ jz 1f
19260+ i = 0
19261+ .rept USER_PGD_PTRS
19262+ mov i*8(%rbx),%rsi
19263+ mov $0x67,%sil
19264+ lea i*8(%rbx),%rdi
19265+ call PARA_INDIRECT(pv_mmu_ops+PV_MMU_set_pgd_batched)
19266+ i = i + 1
19267+ .endr
19268+ jmp 2f
19269+1:
19270+#endif
19271+
19272+ i = 0
19273+ .rept USER_PGD_PTRS
19274+ movb $0x67,i*8(%rbx)
19275+ i = i + 1
19276+ .endr
19277+
19278+#ifdef CONFIG_PARAVIRT
19279+2: PV_RESTORE_REGS(CLBR_RDI)
19280+#endif
19281+
19282+ popq %rbx
19283+ popq %rdi
19284+ pax_force_retaddr
19285+ retq
19286+3: ud2
19287+ jmp 3b
19288+ENDPROC(pax_exit_kernel_user)
19289+#endif
19290+
19291+ .macro pax_enter_kernel_nmi
19292+ pax_set_fptr_mask
19293+
19294+#ifdef CONFIG_PAX_KERNEXEC
19295+ GET_CR0_INTO_RDI
19296+ bts $16,%rdi
19297+ jc 110f
19298+ SET_RDI_INTO_CR0
19299+ or $2,%ebx
19300+110:
19301+#endif
19302+ .endm
19303+
19304+ .macro pax_exit_kernel_nmi
19305+#ifdef CONFIG_PAX_KERNEXEC
19306+ btr $1,%ebx
19307+ jnc 110f
19308+ GET_CR0_INTO_RDI
19309+ btr $16,%rdi
19310+ SET_RDI_INTO_CR0
19311+110:
19312+#endif
19313+ .endm
19314+
19315+ .macro pax_erase_kstack
19316+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19317+ call pax_erase_kstack
19318+#endif
19319+ .endm
19320+
19321+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
19322+ENTRY(pax_erase_kstack)
19323+ pushq %rdi
19324+ pushq %rcx
19325+ pushq %rax
19326+ pushq %r11
19327+
19328+ GET_THREAD_INFO(%r11)
19329+ mov TI_lowest_stack(%r11), %rdi
19330+ mov $-0xBEEF, %rax
19331+ std
19332+
19333+1: mov %edi, %ecx
19334+ and $THREAD_SIZE_asm - 1, %ecx
19335+ shr $3, %ecx
19336+ repne scasq
19337+ jecxz 2f
19338+
19339+ cmp $2*8, %ecx
19340+ jc 2f
19341+
19342+ mov $2*8, %ecx
19343+ repe scasq
19344+ jecxz 2f
19345+ jne 1b
19346+
19347+2: cld
19348+ mov %esp, %ecx
19349+ sub %edi, %ecx
19350+
19351+ cmp $THREAD_SIZE_asm, %rcx
19352+ jb 3f
19353+ ud2
19354+3:
19355+
19356+ shr $3, %ecx
19357+ rep stosq
19358+
19359+ mov TI_task_thread_sp0(%r11), %rdi
19360+ sub $256, %rdi
19361+ mov %rdi, TI_lowest_stack(%r11)
19362+
19363+ popq %r11
19364+ popq %rax
19365+ popq %rcx
19366+ popq %rdi
19367+ pax_force_retaddr
19368+ ret
19369+ENDPROC(pax_erase_kstack)
19370+#endif
19371
19372 .macro TRACE_IRQS_IRETQ offset=ARGOFFSET
19373 #ifdef CONFIG_TRACE_IRQFLAGS
19374@@ -375,8 +687,8 @@ ENDPROC(native_usergs_sysret64)
19375 .endm
19376
19377 .macro UNFAKE_STACK_FRAME
19378- addq $8*6, %rsp
19379- CFI_ADJUST_CFA_OFFSET -(6*8)
19380+ addq $8*6 + ARG_SKIP, %rsp
19381+ CFI_ADJUST_CFA_OFFSET -(6*8 + ARG_SKIP)
19382 .endm
19383
19384 /*
19385@@ -463,7 +775,7 @@ ENDPROC(native_usergs_sysret64)
19386 movq %rsp, %rsi
19387
19388 leaq -RBP(%rsp),%rdi /* arg1 for handler */
19389- testl $3, CS-RBP(%rsi)
19390+ testb $3, CS-RBP(%rsi)
19391 je 1f
19392 SWAPGS
19393 /*
19394@@ -498,9 +810,10 @@ ENTRY(save_rest)
19395 movq_cfi r15, R15+16
19396 movq %r11, 8(%rsp) /* return address */
19397 FIXUP_TOP_OF_STACK %r11, 16
19398+ pax_force_retaddr
19399 ret
19400 CFI_ENDPROC
19401-END(save_rest)
19402+ENDPROC(save_rest)
19403
19404 /* save complete stack frame */
19405 .pushsection .kprobes.text, "ax"
19406@@ -529,9 +842,10 @@ ENTRY(save_paranoid)
19407 js 1f /* negative -> in kernel */
19408 SWAPGS
19409 xorl %ebx,%ebx
19410-1: ret
19411+1: pax_force_retaddr_bts
19412+ ret
19413 CFI_ENDPROC
19414-END(save_paranoid)
19415+ENDPROC(save_paranoid)
19416 .popsection
19417
19418 /*
19419@@ -553,7 +867,7 @@ ENTRY(ret_from_fork)
19420
19421 RESTORE_REST
19422
19423- testl $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19424+ testb $3, CS-ARGOFFSET(%rsp) # from kernel_thread?
19425 jz 1f
19426
19427 testl $_TIF_IA32, TI_flags(%rcx) # 32-bit compat task needs IRET
19428@@ -571,7 +885,7 @@ ENTRY(ret_from_fork)
19429 RESTORE_REST
19430 jmp int_ret_from_sys_call
19431 CFI_ENDPROC
19432-END(ret_from_fork)
19433+ENDPROC(ret_from_fork)
19434
19435 /*
19436 * System call entry. Up to 6 arguments in registers are supported.
19437@@ -608,7 +922,7 @@ END(ret_from_fork)
19438 ENTRY(system_call)
19439 CFI_STARTPROC simple
19440 CFI_SIGNAL_FRAME
19441- CFI_DEF_CFA rsp,KERNEL_STACK_OFFSET
19442+ CFI_DEF_CFA rsp,0
19443 CFI_REGISTER rip,rcx
19444 /*CFI_REGISTER rflags,r11*/
19445 SWAPGS_UNSAFE_STACK
19446@@ -621,16 +935,23 @@ GLOBAL(system_call_after_swapgs)
19447
19448 movq %rsp,PER_CPU_VAR(old_rsp)
19449 movq PER_CPU_VAR(kernel_stack),%rsp
19450+ SAVE_ARGS 8*6,0
19451+ pax_enter_kernel_user
19452+
19453+#ifdef CONFIG_PAX_RANDKSTACK
19454+ pax_erase_kstack
19455+#endif
19456+
19457 /*
19458 * No need to follow this irqs off/on section - it's straight
19459 * and short:
19460 */
19461 ENABLE_INTERRUPTS(CLBR_NONE)
19462- SAVE_ARGS 8,0
19463 movq %rax,ORIG_RAX-ARGOFFSET(%rsp)
19464 movq %rcx,RIP-ARGOFFSET(%rsp)
19465 CFI_REL_OFFSET rip,RIP-ARGOFFSET
19466- testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19467+ GET_THREAD_INFO(%rcx)
19468+ testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%rcx)
19469 jnz tracesys
19470 system_call_fastpath:
19471 #if __SYSCALL_MASK == ~0
19472@@ -640,7 +961,7 @@ system_call_fastpath:
19473 cmpl $__NR_syscall_max,%eax
19474 #endif
19475 ja badsys
19476- movq %r10,%rcx
19477+ movq R10-ARGOFFSET(%rsp),%rcx
19478 call *sys_call_table(,%rax,8) # XXX: rip relative
19479 movq %rax,RAX-ARGOFFSET(%rsp)
19480 /*
19481@@ -654,10 +975,13 @@ sysret_check:
19482 LOCKDEP_SYS_EXIT
19483 DISABLE_INTERRUPTS(CLBR_NONE)
19484 TRACE_IRQS_OFF
19485- movl TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET),%edx
19486+ GET_THREAD_INFO(%rcx)
19487+ movl TI_flags(%rcx),%edx
19488 andl %edi,%edx
19489 jnz sysret_careful
19490 CFI_REMEMBER_STATE
19491+ pax_exit_kernel_user
19492+ pax_erase_kstack
19493 /*
19494 * sysretq will re-enable interrupts:
19495 */
19496@@ -709,14 +1033,18 @@ badsys:
19497 * jump back to the normal fast path.
19498 */
19499 auditsys:
19500- movq %r10,%r9 /* 6th arg: 4th syscall arg */
19501+ movq R10-ARGOFFSET(%rsp),%r9 /* 6th arg: 4th syscall arg */
19502 movq %rdx,%r8 /* 5th arg: 3rd syscall arg */
19503 movq %rsi,%rcx /* 4th arg: 2nd syscall arg */
19504 movq %rdi,%rdx /* 3rd arg: 1st syscall arg */
19505 movq %rax,%rsi /* 2nd arg: syscall number */
19506 movl $AUDIT_ARCH_X86_64,%edi /* 1st arg: audit arch */
19507 call __audit_syscall_entry
19508+
19509+ pax_erase_kstack
19510+
19511 LOAD_ARGS 0 /* reload call-clobbered registers */
19512+ pax_set_fptr_mask
19513 jmp system_call_fastpath
19514
19515 /*
19516@@ -737,7 +1065,7 @@ sysret_audit:
19517 /* Do syscall tracing */
19518 tracesys:
19519 #ifdef CONFIG_AUDITSYSCALL
19520- testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags+THREAD_INFO(%rsp,RIP-ARGOFFSET)
19521+ testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
19522 jz auditsys
19523 #endif
19524 SAVE_REST
19525@@ -745,12 +1073,16 @@ tracesys:
19526 FIXUP_TOP_OF_STACK %rdi
19527 movq %rsp,%rdi
19528 call syscall_trace_enter
19529+
19530+ pax_erase_kstack
19531+
19532 /*
19533 * Reload arg registers from stack in case ptrace changed them.
19534 * We don't reload %rax because syscall_trace_enter() returned
19535 * the value it wants us to use in the table lookup.
19536 */
19537 LOAD_ARGS ARGOFFSET, 1
19538+ pax_set_fptr_mask
19539 RESTORE_REST
19540 #if __SYSCALL_MASK == ~0
19541 cmpq $__NR_syscall_max,%rax
19542@@ -759,7 +1091,7 @@ tracesys:
19543 cmpl $__NR_syscall_max,%eax
19544 #endif
19545 ja int_ret_from_sys_call /* RAX(%rsp) set to -ENOSYS above */
19546- movq %r10,%rcx /* fixup for C */
19547+ movq R10-ARGOFFSET(%rsp),%rcx /* fixup for C */
19548 call *sys_call_table(,%rax,8)
19549 movq %rax,RAX-ARGOFFSET(%rsp)
19550 /* Use IRET because user could have changed frame */
19551@@ -780,7 +1112,9 @@ GLOBAL(int_with_check)
19552 andl %edi,%edx
19553 jnz int_careful
19554 andl $~TS_COMPAT,TI_status(%rcx)
19555- jmp retint_swapgs
19556+ pax_exit_kernel_user
19557+ pax_erase_kstack
19558+ jmp retint_swapgs_pax
19559
19560 /* Either reschedule or signal or syscall exit tracking needed. */
19561 /* First do a reschedule test. */
19562@@ -826,7 +1160,7 @@ int_restore_rest:
19563 TRACE_IRQS_OFF
19564 jmp int_with_check
19565 CFI_ENDPROC
19566-END(system_call)
19567+ENDPROC(system_call)
19568
19569 .macro FORK_LIKE func
19570 ENTRY(stub_\func)
19571@@ -839,9 +1173,10 @@ ENTRY(stub_\func)
19572 DEFAULT_FRAME 0 8 /* offset 8: return address */
19573 call sys_\func
19574 RESTORE_TOP_OF_STACK %r11, 8
19575+ pax_force_retaddr
19576 ret $REST_SKIP /* pop extended registers */
19577 CFI_ENDPROC
19578-END(stub_\func)
19579+ENDPROC(stub_\func)
19580 .endm
19581
19582 .macro FIXED_FRAME label,func
19583@@ -851,9 +1186,10 @@ ENTRY(\label)
19584 FIXUP_TOP_OF_STACK %r11, 8-ARGOFFSET
19585 call \func
19586 RESTORE_TOP_OF_STACK %r11, 8-ARGOFFSET
19587+ pax_force_retaddr
19588 ret
19589 CFI_ENDPROC
19590-END(\label)
19591+ENDPROC(\label)
19592 .endm
19593
19594 FORK_LIKE clone
19595@@ -870,9 +1206,10 @@ ENTRY(ptregscall_common)
19596 movq_cfi_restore R12+8, r12
19597 movq_cfi_restore RBP+8, rbp
19598 movq_cfi_restore RBX+8, rbx
19599+ pax_force_retaddr
19600 ret $REST_SKIP /* pop extended registers */
19601 CFI_ENDPROC
19602-END(ptregscall_common)
19603+ENDPROC(ptregscall_common)
19604
19605 ENTRY(stub_execve)
19606 CFI_STARTPROC
19607@@ -885,7 +1222,7 @@ ENTRY(stub_execve)
19608 RESTORE_REST
19609 jmp int_ret_from_sys_call
19610 CFI_ENDPROC
19611-END(stub_execve)
19612+ENDPROC(stub_execve)
19613
19614 /*
19615 * sigreturn is special because it needs to restore all registers on return.
19616@@ -902,7 +1239,7 @@ ENTRY(stub_rt_sigreturn)
19617 RESTORE_REST
19618 jmp int_ret_from_sys_call
19619 CFI_ENDPROC
19620-END(stub_rt_sigreturn)
19621+ENDPROC(stub_rt_sigreturn)
19622
19623 #ifdef CONFIG_X86_X32_ABI
19624 ENTRY(stub_x32_rt_sigreturn)
19625@@ -916,7 +1253,7 @@ ENTRY(stub_x32_rt_sigreturn)
19626 RESTORE_REST
19627 jmp int_ret_from_sys_call
19628 CFI_ENDPROC
19629-END(stub_x32_rt_sigreturn)
19630+ENDPROC(stub_x32_rt_sigreturn)
19631
19632 ENTRY(stub_x32_execve)
19633 CFI_STARTPROC
19634@@ -930,7 +1267,7 @@ ENTRY(stub_x32_execve)
19635 RESTORE_REST
19636 jmp int_ret_from_sys_call
19637 CFI_ENDPROC
19638-END(stub_x32_execve)
19639+ENDPROC(stub_x32_execve)
19640
19641 #endif
19642
19643@@ -967,7 +1304,7 @@ vector=vector+1
19644 2: jmp common_interrupt
19645 .endr
19646 CFI_ENDPROC
19647-END(irq_entries_start)
19648+ENDPROC(irq_entries_start)
19649
19650 .previous
19651 END(interrupt)
19652@@ -987,6 +1324,16 @@ END(interrupt)
19653 subq $ORIG_RAX-RBP, %rsp
19654 CFI_ADJUST_CFA_OFFSET ORIG_RAX-RBP
19655 SAVE_ARGS_IRQ
19656+#ifdef CONFIG_PAX_MEMORY_UDEREF
19657+ testb $3, CS(%rdi)
19658+ jnz 1f
19659+ pax_enter_kernel
19660+ jmp 2f
19661+1: pax_enter_kernel_user
19662+2:
19663+#else
19664+ pax_enter_kernel
19665+#endif
19666 call \func
19667 .endm
19668
19669@@ -1019,7 +1366,7 @@ ret_from_intr:
19670
19671 exit_intr:
19672 GET_THREAD_INFO(%rcx)
19673- testl $3,CS-ARGOFFSET(%rsp)
19674+ testb $3,CS-ARGOFFSET(%rsp)
19675 je retint_kernel
19676
19677 /* Interrupt came from user space */
19678@@ -1041,12 +1388,16 @@ retint_swapgs: /* return to user-space */
19679 * The iretq could re-enable interrupts:
19680 */
19681 DISABLE_INTERRUPTS(CLBR_ANY)
19682+ pax_exit_kernel_user
19683+retint_swapgs_pax:
19684 TRACE_IRQS_IRETQ
19685 SWAPGS
19686 jmp restore_args
19687
19688 retint_restore_args: /* return to kernel space */
19689 DISABLE_INTERRUPTS(CLBR_ANY)
19690+ pax_exit_kernel
19691+ pax_force_retaddr (RIP-ARGOFFSET)
19692 /*
19693 * The iretq could re-enable interrupts:
19694 */
19695@@ -1129,7 +1480,7 @@ ENTRY(retint_kernel)
19696 #endif
19697
19698 CFI_ENDPROC
19699-END(common_interrupt)
19700+ENDPROC(common_interrupt)
19701 /*
19702 * End of kprobes section
19703 */
19704@@ -1147,7 +1498,7 @@ ENTRY(\sym)
19705 interrupt \do_sym
19706 jmp ret_from_intr
19707 CFI_ENDPROC
19708-END(\sym)
19709+ENDPROC(\sym)
19710 .endm
19711
19712 #ifdef CONFIG_SMP
19713@@ -1208,12 +1559,22 @@ ENTRY(\sym)
19714 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19715 call error_entry
19716 DEFAULT_FRAME 0
19717+#ifdef CONFIG_PAX_MEMORY_UDEREF
19718+ testb $3, CS(%rsp)
19719+ jnz 1f
19720+ pax_enter_kernel
19721+ jmp 2f
19722+1: pax_enter_kernel_user
19723+2:
19724+#else
19725+ pax_enter_kernel
19726+#endif
19727 movq %rsp,%rdi /* pt_regs pointer */
19728 xorl %esi,%esi /* no error code */
19729 call \do_sym
19730 jmp error_exit /* %ebx: no swapgs flag */
19731 CFI_ENDPROC
19732-END(\sym)
19733+ENDPROC(\sym)
19734 .endm
19735
19736 .macro paranoidzeroentry sym do_sym
19737@@ -1226,15 +1587,25 @@ ENTRY(\sym)
19738 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19739 call save_paranoid
19740 TRACE_IRQS_OFF
19741+#ifdef CONFIG_PAX_MEMORY_UDEREF
19742+ testb $3, CS(%rsp)
19743+ jnz 1f
19744+ pax_enter_kernel
19745+ jmp 2f
19746+1: pax_enter_kernel_user
19747+2:
19748+#else
19749+ pax_enter_kernel
19750+#endif
19751 movq %rsp,%rdi /* pt_regs pointer */
19752 xorl %esi,%esi /* no error code */
19753 call \do_sym
19754 jmp paranoid_exit /* %ebx: no swapgs flag */
19755 CFI_ENDPROC
19756-END(\sym)
19757+ENDPROC(\sym)
19758 .endm
19759
19760-#define INIT_TSS_IST(x) PER_CPU_VAR(init_tss) + (TSS_ist + ((x) - 1) * 8)
19761+#define INIT_TSS_IST(x) (TSS_ist + ((x) - 1) * 8)(%r12)
19762 .macro paranoidzeroentry_ist sym do_sym ist
19763 ENTRY(\sym)
19764 INTR_FRAME
19765@@ -1245,14 +1616,30 @@ ENTRY(\sym)
19766 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19767 call save_paranoid
19768 TRACE_IRQS_OFF_DEBUG
19769+#ifdef CONFIG_PAX_MEMORY_UDEREF
19770+ testb $3, CS(%rsp)
19771+ jnz 1f
19772+ pax_enter_kernel
19773+ jmp 2f
19774+1: pax_enter_kernel_user
19775+2:
19776+#else
19777+ pax_enter_kernel
19778+#endif
19779 movq %rsp,%rdi /* pt_regs pointer */
19780 xorl %esi,%esi /* no error code */
19781+#ifdef CONFIG_SMP
19782+ imul $TSS_size, PER_CPU_VAR(cpu_number), %r12d
19783+ lea init_tss(%r12), %r12
19784+#else
19785+ lea init_tss(%rip), %r12
19786+#endif
19787 subq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19788 call \do_sym
19789 addq $EXCEPTION_STKSZ, INIT_TSS_IST(\ist)
19790 jmp paranoid_exit /* %ebx: no swapgs flag */
19791 CFI_ENDPROC
19792-END(\sym)
19793+ENDPROC(\sym)
19794 .endm
19795
19796 .macro errorentry sym do_sym
19797@@ -1264,13 +1651,23 @@ ENTRY(\sym)
19798 CFI_ADJUST_CFA_OFFSET ORIG_RAX-R15
19799 call error_entry
19800 DEFAULT_FRAME 0
19801+#ifdef CONFIG_PAX_MEMORY_UDEREF
19802+ testb $3, CS(%rsp)
19803+ jnz 1f
19804+ pax_enter_kernel
19805+ jmp 2f
19806+1: pax_enter_kernel_user
19807+2:
19808+#else
19809+ pax_enter_kernel
19810+#endif
19811 movq %rsp,%rdi /* pt_regs pointer */
19812 movq ORIG_RAX(%rsp),%rsi /* get error code */
19813 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19814 call \do_sym
19815 jmp error_exit /* %ebx: no swapgs flag */
19816 CFI_ENDPROC
19817-END(\sym)
19818+ENDPROC(\sym)
19819 .endm
19820
19821 /* error code is on the stack already */
19822@@ -1284,13 +1681,23 @@ ENTRY(\sym)
19823 call save_paranoid
19824 DEFAULT_FRAME 0
19825 TRACE_IRQS_OFF
19826+#ifdef CONFIG_PAX_MEMORY_UDEREF
19827+ testb $3, CS(%rsp)
19828+ jnz 1f
19829+ pax_enter_kernel
19830+ jmp 2f
19831+1: pax_enter_kernel_user
19832+2:
19833+#else
19834+ pax_enter_kernel
19835+#endif
19836 movq %rsp,%rdi /* pt_regs pointer */
19837 movq ORIG_RAX(%rsp),%rsi /* get error code */
19838 movq $-1,ORIG_RAX(%rsp) /* no syscall to restart */
19839 call \do_sym
19840 jmp paranoid_exit /* %ebx: no swapgs flag */
19841 CFI_ENDPROC
19842-END(\sym)
19843+ENDPROC(\sym)
19844 .endm
19845
19846 zeroentry divide_error do_divide_error
19847@@ -1320,9 +1727,10 @@ gs_change:
19848 2: mfence /* workaround */
19849 SWAPGS
19850 popfq_cfi
19851+ pax_force_retaddr
19852 ret
19853 CFI_ENDPROC
19854-END(native_load_gs_index)
19855+ENDPROC(native_load_gs_index)
19856
19857 _ASM_EXTABLE(gs_change,bad_gs)
19858 .section .fixup,"ax"
19859@@ -1350,9 +1758,10 @@ ENTRY(call_softirq)
19860 CFI_DEF_CFA_REGISTER rsp
19861 CFI_ADJUST_CFA_OFFSET -8
19862 decl PER_CPU_VAR(irq_count)
19863+ pax_force_retaddr
19864 ret
19865 CFI_ENDPROC
19866-END(call_softirq)
19867+ENDPROC(call_softirq)
19868
19869 #ifdef CONFIG_XEN
19870 zeroentry xen_hypervisor_callback xen_do_hypervisor_callback
19871@@ -1390,7 +1799,7 @@ ENTRY(xen_do_hypervisor_callback) # do_hypervisor_callback(struct *pt_regs)
19872 decl PER_CPU_VAR(irq_count)
19873 jmp error_exit
19874 CFI_ENDPROC
19875-END(xen_do_hypervisor_callback)
19876+ENDPROC(xen_do_hypervisor_callback)
19877
19878 /*
19879 * Hypervisor uses this for application faults while it executes.
19880@@ -1449,7 +1858,7 @@ ENTRY(xen_failsafe_callback)
19881 SAVE_ALL
19882 jmp error_exit
19883 CFI_ENDPROC
19884-END(xen_failsafe_callback)
19885+ENDPROC(xen_failsafe_callback)
19886
19887 apicinterrupt HYPERVISOR_CALLBACK_VECTOR \
19888 xen_hvm_callback_vector xen_evtchn_do_upcall
19889@@ -1501,18 +1910,33 @@ ENTRY(paranoid_exit)
19890 DEFAULT_FRAME
19891 DISABLE_INTERRUPTS(CLBR_NONE)
19892 TRACE_IRQS_OFF_DEBUG
19893- testl %ebx,%ebx /* swapgs needed? */
19894+ testl $1,%ebx /* swapgs needed? */
19895 jnz paranoid_restore
19896- testl $3,CS(%rsp)
19897+ testb $3,CS(%rsp)
19898 jnz paranoid_userspace
19899+#ifdef CONFIG_PAX_MEMORY_UDEREF
19900+ pax_exit_kernel
19901+ TRACE_IRQS_IRETQ 0
19902+ SWAPGS_UNSAFE_STACK
19903+ RESTORE_ALL 8
19904+ pax_force_retaddr_bts
19905+ jmp irq_return
19906+#endif
19907 paranoid_swapgs:
19908+#ifdef CONFIG_PAX_MEMORY_UDEREF
19909+ pax_exit_kernel_user
19910+#else
19911+ pax_exit_kernel
19912+#endif
19913 TRACE_IRQS_IRETQ 0
19914 SWAPGS_UNSAFE_STACK
19915 RESTORE_ALL 8
19916 jmp irq_return
19917 paranoid_restore:
19918+ pax_exit_kernel
19919 TRACE_IRQS_IRETQ_DEBUG 0
19920 RESTORE_ALL 8
19921+ pax_force_retaddr_bts
19922 jmp irq_return
19923 paranoid_userspace:
19924 GET_THREAD_INFO(%rcx)
19925@@ -1541,7 +1965,7 @@ paranoid_schedule:
19926 TRACE_IRQS_OFF
19927 jmp paranoid_userspace
19928 CFI_ENDPROC
19929-END(paranoid_exit)
19930+ENDPROC(paranoid_exit)
19931
19932 /*
19933 * Exception entry point. This expects an error code/orig_rax on the stack.
19934@@ -1568,12 +1992,13 @@ ENTRY(error_entry)
19935 movq_cfi r14, R14+8
19936 movq_cfi r15, R15+8
19937 xorl %ebx,%ebx
19938- testl $3,CS+8(%rsp)
19939+ testb $3,CS+8(%rsp)
19940 je error_kernelspace
19941 error_swapgs:
19942 SWAPGS
19943 error_sti:
19944 TRACE_IRQS_OFF
19945+ pax_force_retaddr_bts
19946 ret
19947
19948 /*
19949@@ -1600,7 +2025,7 @@ bstep_iret:
19950 movq %rcx,RIP+8(%rsp)
19951 jmp error_swapgs
19952 CFI_ENDPROC
19953-END(error_entry)
19954+ENDPROC(error_entry)
19955
19956
19957 /* ebx: no swapgs flag (1: don't need swapgs, 0: need it) */
19958@@ -1611,7 +2036,7 @@ ENTRY(error_exit)
19959 DISABLE_INTERRUPTS(CLBR_NONE)
19960 TRACE_IRQS_OFF
19961 GET_THREAD_INFO(%rcx)
19962- testl %eax,%eax
19963+ testl $1,%eax
19964 jne retint_kernel
19965 LOCKDEP_SYS_EXIT_IRQ
19966 movl TI_flags(%rcx),%edx
19967@@ -1620,7 +2045,7 @@ ENTRY(error_exit)
19968 jnz retint_careful
19969 jmp retint_swapgs
19970 CFI_ENDPROC
19971-END(error_exit)
19972+ENDPROC(error_exit)
19973
19974 /*
19975 * Test if a given stack is an NMI stack or not.
19976@@ -1678,9 +2103,11 @@ ENTRY(nmi)
19977 * If %cs was not the kernel segment, then the NMI triggered in user
19978 * space, which means it is definitely not nested.
19979 */
19980+ cmpl $__KERNEXEC_KERNEL_CS, 16(%rsp)
19981+ je 1f
19982 cmpl $__KERNEL_CS, 16(%rsp)
19983 jne first_nmi
19984-
19985+1:
19986 /*
19987 * Check the special variable on the stack to see if NMIs are
19988 * executing.
19989@@ -1714,8 +2141,7 @@ nested_nmi:
19990
19991 1:
19992 /* Set up the interrupted NMIs stack to jump to repeat_nmi */
19993- leaq -1*8(%rsp), %rdx
19994- movq %rdx, %rsp
19995+ subq $8, %rsp
19996 CFI_ADJUST_CFA_OFFSET 1*8
19997 leaq -10*8(%rsp), %rdx
19998 pushq_cfi $__KERNEL_DS
19999@@ -1733,6 +2159,7 @@ nested_nmi_out:
20000 CFI_RESTORE rdx
20001
20002 /* No need to check faults here */
20003+# pax_force_retaddr_bts
20004 INTERRUPT_RETURN
20005
20006 CFI_RESTORE_STATE
20007@@ -1849,6 +2276,8 @@ end_repeat_nmi:
20008 */
20009 movq %cr2, %r12
20010
20011+ pax_enter_kernel_nmi
20012+
20013 /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
20014 movq %rsp,%rdi
20015 movq $-1,%rsi
20016@@ -1861,26 +2290,31 @@ end_repeat_nmi:
20017 movq %r12, %cr2
20018 1:
20019
20020- testl %ebx,%ebx /* swapgs needed? */
20021+ testl $1,%ebx /* swapgs needed? */
20022 jnz nmi_restore
20023 nmi_swapgs:
20024 SWAPGS_UNSAFE_STACK
20025 nmi_restore:
20026+ pax_exit_kernel_nmi
20027 /* Pop the extra iret frame at once */
20028 RESTORE_ALL 6*8
20029+ testb $3, 8(%rsp)
20030+ jnz 1f
20031+ pax_force_retaddr_bts
20032+1:
20033
20034 /* Clear the NMI executing stack variable */
20035 movq $0, 5*8(%rsp)
20036 jmp irq_return
20037 CFI_ENDPROC
20038-END(nmi)
20039+ENDPROC(nmi)
20040
20041 ENTRY(ignore_sysret)
20042 CFI_STARTPROC
20043 mov $-ENOSYS,%eax
20044 sysret
20045 CFI_ENDPROC
20046-END(ignore_sysret)
20047+ENDPROC(ignore_sysret)
20048
20049 /*
20050 * End of kprobes section
20051diff --git a/arch/x86/kernel/ftrace.c b/arch/x86/kernel/ftrace.c
20052index 42a392a..fbbd930 100644
20053--- a/arch/x86/kernel/ftrace.c
20054+++ b/arch/x86/kernel/ftrace.c
20055@@ -105,6 +105,8 @@ ftrace_modify_code_direct(unsigned long ip, unsigned const char *old_code,
20056 {
20057 unsigned char replaced[MCOUNT_INSN_SIZE];
20058
20059+ ip = ktla_ktva(ip);
20060+
20061 /*
20062 * Note: Due to modules and __init, code can
20063 * disappear and change, we need to protect against faulting
20064@@ -227,7 +229,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20065 unsigned char old[MCOUNT_INSN_SIZE], *new;
20066 int ret;
20067
20068- memcpy(old, &ftrace_call, MCOUNT_INSN_SIZE);
20069+ memcpy(old, (void *)ktla_ktva((unsigned long)ftrace_call), MCOUNT_INSN_SIZE);
20070 new = ftrace_call_replace(ip, (unsigned long)func);
20071
20072 /* See comment above by declaration of modifying_ftrace_code */
20073@@ -238,7 +240,7 @@ int ftrace_update_ftrace_func(ftrace_func_t func)
20074 /* Also update the regs callback function */
20075 if (!ret) {
20076 ip = (unsigned long)(&ftrace_regs_call);
20077- memcpy(old, &ftrace_regs_call, MCOUNT_INSN_SIZE);
20078+ memcpy(old, ktla_ktva((void *)&ftrace_regs_call), MCOUNT_INSN_SIZE);
20079 new = ftrace_call_replace(ip, (unsigned long)func);
20080 ret = ftrace_modify_code(ip, old, new);
20081 }
20082@@ -279,7 +281,7 @@ static int ftrace_write(unsigned long ip, const char *val, int size)
20083 * kernel identity mapping to modify code.
20084 */
20085 if (within(ip, (unsigned long)_text, (unsigned long)_etext))
20086- ip = (unsigned long)__va(__pa_symbol(ip));
20087+ ip = (unsigned long)__va(__pa_symbol(ktla_ktva(ip)));
20088
20089 return probe_kernel_write((void *)ip, val, size);
20090 }
20091@@ -289,7 +291,7 @@ static int add_break(unsigned long ip, const char *old)
20092 unsigned char replaced[MCOUNT_INSN_SIZE];
20093 unsigned char brk = BREAKPOINT_INSTRUCTION;
20094
20095- if (probe_kernel_read(replaced, (void *)ip, MCOUNT_INSN_SIZE))
20096+ if (probe_kernel_read(replaced, (void *)ktla_ktva(ip), MCOUNT_INSN_SIZE))
20097 return -EFAULT;
20098
20099 /* Make sure it is what we expect it to be */
20100@@ -637,7 +639,7 @@ ftrace_modify_code(unsigned long ip, unsigned const char *old_code,
20101 return ret;
20102
20103 fail_update:
20104- probe_kernel_write((void *)ip, &old_code[0], 1);
20105+ probe_kernel_write((void *)ktla_ktva(ip), &old_code[0], 1);
20106 goto out;
20107 }
20108
20109@@ -670,6 +672,8 @@ static int ftrace_mod_jmp(unsigned long ip,
20110 {
20111 unsigned char code[MCOUNT_INSN_SIZE];
20112
20113+ ip = ktla_ktva(ip);
20114+
20115 if (probe_kernel_read(code, (void *)ip, MCOUNT_INSN_SIZE))
20116 return -EFAULT;
20117
20118diff --git a/arch/x86/kernel/head64.c b/arch/x86/kernel/head64.c
20119index 55b6761..a6456fc 100644
20120--- a/arch/x86/kernel/head64.c
20121+++ b/arch/x86/kernel/head64.c
20122@@ -67,12 +67,12 @@ again:
20123 pgd = *pgd_p;
20124
20125 /*
20126- * The use of __START_KERNEL_map rather than __PAGE_OFFSET here is
20127- * critical -- __PAGE_OFFSET would point us back into the dynamic
20128+ * The use of __early_va rather than __va here is critical:
20129+ * __va would point us back into the dynamic
20130 * range and we might end up looping forever...
20131 */
20132 if (pgd)
20133- pud_p = (pudval_t *)((pgd & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
20134+ pud_p = (pudval_t *)(__early_va(pgd & PTE_PFN_MASK));
20135 else {
20136 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
20137 reset_early_page_tables();
20138@@ -82,13 +82,13 @@ again:
20139 pud_p = (pudval_t *)early_dynamic_pgts[next_early_pgt++];
20140 for (i = 0; i < PTRS_PER_PUD; i++)
20141 pud_p[i] = 0;
20142- *pgd_p = (pgdval_t)pud_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
20143+ *pgd_p = (pgdval_t)__pa(pud_p) + _KERNPG_TABLE;
20144 }
20145 pud_p += pud_index(address);
20146 pud = *pud_p;
20147
20148 if (pud)
20149- pmd_p = (pmdval_t *)((pud & PTE_PFN_MASK) + __START_KERNEL_map - phys_base);
20150+ pmd_p = (pmdval_t *)(__early_va(pud & PTE_PFN_MASK));
20151 else {
20152 if (next_early_pgt >= EARLY_DYNAMIC_PAGE_TABLES) {
20153 reset_early_page_tables();
20154@@ -98,7 +98,7 @@ again:
20155 pmd_p = (pmdval_t *)early_dynamic_pgts[next_early_pgt++];
20156 for (i = 0; i < PTRS_PER_PMD; i++)
20157 pmd_p[i] = 0;
20158- *pud_p = (pudval_t)pmd_p - __START_KERNEL_map + phys_base + _KERNPG_TABLE;
20159+ *pud_p = (pudval_t)__pa(pmd_p) + _KERNPG_TABLE;
20160 }
20161 pmd = (physaddr & PMD_MASK) + early_pmd_flags;
20162 pmd_p[pmd_index(address)] = pmd;
20163@@ -175,7 +175,6 @@ void __init x86_64_start_kernel(char * real_mode_data)
20164 if (console_loglevel == 10)
20165 early_printk("Kernel alive\n");
20166
20167- clear_page(init_level4_pgt);
20168 /* set init_level4_pgt kernel high mapping*/
20169 init_level4_pgt[511] = early_level4_pgt[511];
20170
20171diff --git a/arch/x86/kernel/head_32.S b/arch/x86/kernel/head_32.S
20172index 73afd11..d1670f5 100644
20173--- a/arch/x86/kernel/head_32.S
20174+++ b/arch/x86/kernel/head_32.S
20175@@ -26,6 +26,12 @@
20176 /* Physical address */
20177 #define pa(X) ((X) - __PAGE_OFFSET)
20178
20179+#ifdef CONFIG_PAX_KERNEXEC
20180+#define ta(X) (X)
20181+#else
20182+#define ta(X) ((X) - __PAGE_OFFSET)
20183+#endif
20184+
20185 /*
20186 * References to members of the new_cpu_data structure.
20187 */
20188@@ -55,11 +61,7 @@
20189 * and small than max_low_pfn, otherwise will waste some page table entries
20190 */
20191
20192-#if PTRS_PER_PMD > 1
20193-#define PAGE_TABLE_SIZE(pages) (((pages) / PTRS_PER_PMD) + PTRS_PER_PGD)
20194-#else
20195-#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
20196-#endif
20197+#define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PTE)
20198
20199 /* Number of possible pages in the lowmem region */
20200 LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
20201@@ -78,6 +80,12 @@ INIT_MAP_SIZE = PAGE_TABLE_SIZE(KERNEL_PAGES) * PAGE_SIZE
20202 RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20203
20204 /*
20205+ * Real beginning of normal "text" segment
20206+ */
20207+ENTRY(stext)
20208+ENTRY(_stext)
20209+
20210+/*
20211 * 32-bit kernel entrypoint; only used by the boot CPU. On entry,
20212 * %esi points to the real-mode code as a 32-bit pointer.
20213 * CS and DS must be 4 GB flat segments, but we don't depend on
20214@@ -85,6 +93,13 @@ RESERVE_BRK(pagetables, INIT_MAP_SIZE)
20215 * can.
20216 */
20217 __HEAD
20218+
20219+#ifdef CONFIG_PAX_KERNEXEC
20220+ jmp startup_32
20221+/* PaX: fill first page in .text with int3 to catch NULL derefs in kernel mode */
20222+.fill PAGE_SIZE-5,1,0xcc
20223+#endif
20224+
20225 ENTRY(startup_32)
20226 movl pa(stack_start),%ecx
20227
20228@@ -106,6 +121,59 @@ ENTRY(startup_32)
20229 2:
20230 leal -__PAGE_OFFSET(%ecx),%esp
20231
20232+#ifdef CONFIG_SMP
20233+ movl $pa(cpu_gdt_table),%edi
20234+ movl $__per_cpu_load,%eax
20235+ movw %ax,GDT_ENTRY_PERCPU * 8 + 2(%edi)
20236+ rorl $16,%eax
20237+ movb %al,GDT_ENTRY_PERCPU * 8 + 4(%edi)
20238+ movb %ah,GDT_ENTRY_PERCPU * 8 + 7(%edi)
20239+ movl $__per_cpu_end - 1,%eax
20240+ subl $__per_cpu_start,%eax
20241+ movw %ax,GDT_ENTRY_PERCPU * 8 + 0(%edi)
20242+#endif
20243+
20244+#ifdef CONFIG_PAX_MEMORY_UDEREF
20245+ movl $NR_CPUS,%ecx
20246+ movl $pa(cpu_gdt_table),%edi
20247+1:
20248+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c09700),GDT_ENTRY_KERNEL_DS * 8 + 4(%edi)
20249+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0fb00),GDT_ENTRY_DEFAULT_USER_CS * 8 + 4(%edi)
20250+ movl $((((__PAGE_OFFSET-1) & 0xf0000000) >> 12) | 0x00c0f300),GDT_ENTRY_DEFAULT_USER_DS * 8 + 4(%edi)
20251+ addl $PAGE_SIZE_asm,%edi
20252+ loop 1b
20253+#endif
20254+
20255+#ifdef CONFIG_PAX_KERNEXEC
20256+ movl $pa(boot_gdt),%edi
20257+ movl $__LOAD_PHYSICAL_ADDR,%eax
20258+ movw %ax,GDT_ENTRY_BOOT_CS * 8 + 2(%edi)
20259+ rorl $16,%eax
20260+ movb %al,GDT_ENTRY_BOOT_CS * 8 + 4(%edi)
20261+ movb %ah,GDT_ENTRY_BOOT_CS * 8 + 7(%edi)
20262+ rorl $16,%eax
20263+
20264+ ljmp $(__BOOT_CS),$1f
20265+1:
20266+
20267+ movl $NR_CPUS,%ecx
20268+ movl $pa(cpu_gdt_table),%edi
20269+ addl $__PAGE_OFFSET,%eax
20270+1:
20271+ movb $0xc0,GDT_ENTRY_KERNEL_CS * 8 + 6(%edi)
20272+ movb $0xc0,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 6(%edi)
20273+ movw %ax,GDT_ENTRY_KERNEL_CS * 8 + 2(%edi)
20274+ movw %ax,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 2(%edi)
20275+ rorl $16,%eax
20276+ movb %al,GDT_ENTRY_KERNEL_CS * 8 + 4(%edi)
20277+ movb %al,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 4(%edi)
20278+ movb %ah,GDT_ENTRY_KERNEL_CS * 8 + 7(%edi)
20279+ movb %ah,GDT_ENTRY_KERNEXEC_KERNEL_CS * 8 + 7(%edi)
20280+ rorl $16,%eax
20281+ addl $PAGE_SIZE_asm,%edi
20282+ loop 1b
20283+#endif
20284+
20285 /*
20286 * Clear BSS first so that there are no surprises...
20287 */
20288@@ -201,8 +269,11 @@ ENTRY(startup_32)
20289 movl %eax, pa(max_pfn_mapped)
20290
20291 /* Do early initialization of the fixmap area */
20292- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20293- movl %eax,pa(initial_pg_pmd+0x1000*KPMDS-8)
20294+#ifdef CONFIG_COMPAT_VDSO
20295+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_pg_pmd+0x1000*KPMDS-8)
20296+#else
20297+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_pg_pmd+0x1000*KPMDS-8)
20298+#endif
20299 #else /* Not PAE */
20300
20301 page_pde_offset = (__PAGE_OFFSET >> 20);
20302@@ -232,8 +303,11 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20303 movl %eax, pa(max_pfn_mapped)
20304
20305 /* Do early initialization of the fixmap area */
20306- movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,%eax
20307- movl %eax,pa(initial_page_table+0xffc)
20308+#ifdef CONFIG_COMPAT_VDSO
20309+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR+_PAGE_USER,pa(initial_page_table+0xffc)
20310+#else
20311+ movl $pa(initial_pg_fixmap)+PDE_IDENT_ATTR,pa(initial_page_table+0xffc)
20312+#endif
20313 #endif
20314
20315 #ifdef CONFIG_PARAVIRT
20316@@ -247,9 +321,7 @@ page_pde_offset = (__PAGE_OFFSET >> 20);
20317 cmpl $num_subarch_entries, %eax
20318 jae bad_subarch
20319
20320- movl pa(subarch_entries)(,%eax,4), %eax
20321- subl $__PAGE_OFFSET, %eax
20322- jmp *%eax
20323+ jmp *pa(subarch_entries)(,%eax,4)
20324
20325 bad_subarch:
20326 WEAK(lguest_entry)
20327@@ -261,10 +333,10 @@ WEAK(xen_entry)
20328 __INITDATA
20329
20330 subarch_entries:
20331- .long default_entry /* normal x86/PC */
20332- .long lguest_entry /* lguest hypervisor */
20333- .long xen_entry /* Xen hypervisor */
20334- .long default_entry /* Moorestown MID */
20335+ .long ta(default_entry) /* normal x86/PC */
20336+ .long ta(lguest_entry) /* lguest hypervisor */
20337+ .long ta(xen_entry) /* Xen hypervisor */
20338+ .long ta(default_entry) /* Moorestown MID */
20339 num_subarch_entries = (. - subarch_entries) / 4
20340 .previous
20341 #else
20342@@ -355,6 +427,7 @@ default_entry:
20343 movl pa(mmu_cr4_features),%eax
20344 movl %eax,%cr4
20345
20346+#ifdef CONFIG_X86_PAE
20347 testb $X86_CR4_PAE, %al # check if PAE is enabled
20348 jz enable_paging
20349
20350@@ -383,6 +456,9 @@ default_entry:
20351 /* Make changes effective */
20352 wrmsr
20353
20354+ btsl $_PAGE_BIT_NX-32,pa(__supported_pte_mask+4)
20355+#endif
20356+
20357 enable_paging:
20358
20359 /*
20360@@ -451,14 +527,20 @@ is486:
20361 1: movl $(__KERNEL_DS),%eax # reload all the segment registers
20362 movl %eax,%ss # after changing gdt.
20363
20364- movl $(__USER_DS),%eax # DS/ES contains default USER segment
20365+# movl $(__KERNEL_DS),%eax # DS/ES contains default KERNEL segment
20366 movl %eax,%ds
20367 movl %eax,%es
20368
20369 movl $(__KERNEL_PERCPU), %eax
20370 movl %eax,%fs # set this cpu's percpu
20371
20372+#ifdef CONFIG_CC_STACKPROTECTOR
20373 movl $(__KERNEL_STACK_CANARY),%eax
20374+#elif defined(CONFIG_PAX_MEMORY_UDEREF)
20375+ movl $(__USER_DS),%eax
20376+#else
20377+ xorl %eax,%eax
20378+#endif
20379 movl %eax,%gs
20380
20381 xorl %eax,%eax # Clear LDT
20382@@ -534,8 +616,11 @@ setup_once:
20383 * relocation. Manually set base address in stack canary
20384 * segment descriptor.
20385 */
20386- movl $gdt_page,%eax
20387+ movl $cpu_gdt_table,%eax
20388 movl $stack_canary,%ecx
20389+#ifdef CONFIG_SMP
20390+ addl $__per_cpu_load,%ecx
20391+#endif
20392 movw %cx, 8 * GDT_ENTRY_STACK_CANARY + 2(%eax)
20393 shrl $16, %ecx
20394 movb %cl, 8 * GDT_ENTRY_STACK_CANARY + 4(%eax)
20395@@ -566,7 +651,7 @@ ENDPROC(early_idt_handlers)
20396 /* This is global to keep gas from relaxing the jumps */
20397 ENTRY(early_idt_handler)
20398 cld
20399- cmpl $2,%ss:early_recursion_flag
20400+ cmpl $1,%ss:early_recursion_flag
20401 je hlt_loop
20402 incl %ss:early_recursion_flag
20403
20404@@ -604,8 +689,8 @@ ENTRY(early_idt_handler)
20405 pushl (20+6*4)(%esp) /* trapno */
20406 pushl $fault_msg
20407 call printk
20408-#endif
20409 call dump_stack
20410+#endif
20411 hlt_loop:
20412 hlt
20413 jmp hlt_loop
20414@@ -624,8 +709,11 @@ ENDPROC(early_idt_handler)
20415 /* This is the default interrupt "handler" :-) */
20416 ALIGN
20417 ignore_int:
20418- cld
20419 #ifdef CONFIG_PRINTK
20420+ cmpl $2,%ss:early_recursion_flag
20421+ je hlt_loop
20422+ incl %ss:early_recursion_flag
20423+ cld
20424 pushl %eax
20425 pushl %ecx
20426 pushl %edx
20427@@ -634,9 +722,6 @@ ignore_int:
20428 movl $(__KERNEL_DS),%eax
20429 movl %eax,%ds
20430 movl %eax,%es
20431- cmpl $2,early_recursion_flag
20432- je hlt_loop
20433- incl early_recursion_flag
20434 pushl 16(%esp)
20435 pushl 24(%esp)
20436 pushl 32(%esp)
20437@@ -670,29 +755,43 @@ ENTRY(setup_once_ref)
20438 /*
20439 * BSS section
20440 */
20441-__PAGE_ALIGNED_BSS
20442- .align PAGE_SIZE
20443 #ifdef CONFIG_X86_PAE
20444+.section .initial_pg_pmd,"a",@progbits
20445 initial_pg_pmd:
20446 .fill 1024*KPMDS,4,0
20447 #else
20448+.section .initial_page_table,"a",@progbits
20449 ENTRY(initial_page_table)
20450 .fill 1024,4,0
20451 #endif
20452+.section .initial_pg_fixmap,"a",@progbits
20453 initial_pg_fixmap:
20454 .fill 1024,4,0
20455+.section .empty_zero_page,"a",@progbits
20456 ENTRY(empty_zero_page)
20457 .fill 4096,1,0
20458+.section .swapper_pg_dir,"a",@progbits
20459 ENTRY(swapper_pg_dir)
20460+#ifdef CONFIG_X86_PAE
20461+ .fill 4,8,0
20462+#else
20463 .fill 1024,4,0
20464+#endif
20465+
20466+/*
20467+ * The IDT has to be page-aligned to simplify the Pentium
20468+ * F0 0F bug workaround.. We have a special link segment
20469+ * for this.
20470+ */
20471+.section .idt,"a",@progbits
20472+ENTRY(idt_table)
20473+ .fill 256,8,0
20474
20475 /*
20476 * This starts the data section.
20477 */
20478 #ifdef CONFIG_X86_PAE
20479-__PAGE_ALIGNED_DATA
20480- /* Page-aligned for the benefit of paravirt? */
20481- .align PAGE_SIZE
20482+.section .initial_page_table,"a",@progbits
20483 ENTRY(initial_page_table)
20484 .long pa(initial_pg_pmd+PGD_IDENT_ATTR),0 /* low identity map */
20485 # if KPMDS == 3
20486@@ -711,12 +810,20 @@ ENTRY(initial_page_table)
20487 # error "Kernel PMDs should be 1, 2 or 3"
20488 # endif
20489 .align PAGE_SIZE /* needs to be page-sized too */
20490+
20491+#ifdef CONFIG_PAX_PER_CPU_PGD
20492+ENTRY(cpu_pgd)
20493+ .rept NR_CPUS
20494+ .fill 4,8,0
20495+ .endr
20496+#endif
20497+
20498 #endif
20499
20500 .data
20501 .balign 4
20502 ENTRY(stack_start)
20503- .long init_thread_union+THREAD_SIZE
20504+ .long init_thread_union+THREAD_SIZE-8
20505
20506 __INITRODATA
20507 int_msg:
20508@@ -744,7 +851,7 @@ fault_msg:
20509 * segment size, and 32-bit linear address value:
20510 */
20511
20512- .data
20513+.section .rodata,"a",@progbits
20514 .globl boot_gdt_descr
20515 .globl idt_descr
20516
20517@@ -753,7 +860,7 @@ fault_msg:
20518 .word 0 # 32 bit align gdt_desc.address
20519 boot_gdt_descr:
20520 .word __BOOT_DS+7
20521- .long boot_gdt - __PAGE_OFFSET
20522+ .long pa(boot_gdt)
20523
20524 .word 0 # 32-bit align idt_desc.address
20525 idt_descr:
20526@@ -764,7 +871,7 @@ idt_descr:
20527 .word 0 # 32 bit align gdt_desc.address
20528 ENTRY(early_gdt_descr)
20529 .word GDT_ENTRIES*8-1
20530- .long gdt_page /* Overwritten for secondary CPUs */
20531+ .long cpu_gdt_table /* Overwritten for secondary CPUs */
20532
20533 /*
20534 * The boot_gdt must mirror the equivalent in setup.S and is
20535@@ -773,5 +880,65 @@ ENTRY(early_gdt_descr)
20536 .align L1_CACHE_BYTES
20537 ENTRY(boot_gdt)
20538 .fill GDT_ENTRY_BOOT_CS,8,0
20539- .quad 0x00cf9a000000ffff /* kernel 4GB code at 0x00000000 */
20540- .quad 0x00cf92000000ffff /* kernel 4GB data at 0x00000000 */
20541+ .quad 0x00cf9b000000ffff /* kernel 4GB code at 0x00000000 */
20542+ .quad 0x00cf93000000ffff /* kernel 4GB data at 0x00000000 */
20543+
20544+ .align PAGE_SIZE_asm
20545+ENTRY(cpu_gdt_table)
20546+ .rept NR_CPUS
20547+ .quad 0x0000000000000000 /* NULL descriptor */
20548+ .quad 0x0000000000000000 /* 0x0b reserved */
20549+ .quad 0x0000000000000000 /* 0x13 reserved */
20550+ .quad 0x0000000000000000 /* 0x1b reserved */
20551+
20552+#ifdef CONFIG_PAX_KERNEXEC
20553+ .quad 0x00cf9b000000ffff /* 0x20 alternate kernel 4GB code at 0x00000000 */
20554+#else
20555+ .quad 0x0000000000000000 /* 0x20 unused */
20556+#endif
20557+
20558+ .quad 0x0000000000000000 /* 0x28 unused */
20559+ .quad 0x0000000000000000 /* 0x33 TLS entry 1 */
20560+ .quad 0x0000000000000000 /* 0x3b TLS entry 2 */
20561+ .quad 0x0000000000000000 /* 0x43 TLS entry 3 */
20562+ .quad 0x0000000000000000 /* 0x4b reserved */
20563+ .quad 0x0000000000000000 /* 0x53 reserved */
20564+ .quad 0x0000000000000000 /* 0x5b reserved */
20565+
20566+ .quad 0x00cf9b000000ffff /* 0x60 kernel 4GB code at 0x00000000 */
20567+ .quad 0x00cf93000000ffff /* 0x68 kernel 4GB data at 0x00000000 */
20568+ .quad 0x00cffb000000ffff /* 0x73 user 4GB code at 0x00000000 */
20569+ .quad 0x00cff3000000ffff /* 0x7b user 4GB data at 0x00000000 */
20570+
20571+ .quad 0x0000000000000000 /* 0x80 TSS descriptor */
20572+ .quad 0x0000000000000000 /* 0x88 LDT descriptor */
20573+
20574+ /*
20575+ * Segments used for calling PnP BIOS have byte granularity.
20576+ * The code segments and data segments have fixed 64k limits,
20577+ * the transfer segment sizes are set at run time.
20578+ */
20579+ .quad 0x00409b000000ffff /* 0x90 32-bit code */
20580+ .quad 0x00009b000000ffff /* 0x98 16-bit code */
20581+ .quad 0x000093000000ffff /* 0xa0 16-bit data */
20582+ .quad 0x0000930000000000 /* 0xa8 16-bit data */
20583+ .quad 0x0000930000000000 /* 0xb0 16-bit data */
20584+
20585+ /*
20586+ * The APM segments have byte granularity and their bases
20587+ * are set at run time. All have 64k limits.
20588+ */
20589+ .quad 0x00409b000000ffff /* 0xb8 APM CS code */
20590+ .quad 0x00009b000000ffff /* 0xc0 APM CS 16 code (16 bit) */
20591+ .quad 0x004093000000ffff /* 0xc8 APM DS data */
20592+
20593+ .quad 0x00c0930000000000 /* 0xd0 - ESPFIX SS */
20594+ .quad 0x0040930000000000 /* 0xd8 - PERCPU */
20595+ .quad 0x0040910000000017 /* 0xe0 - STACK_CANARY */
20596+ .quad 0x0000000000000000 /* 0xe8 - PCIBIOS_CS */
20597+ .quad 0x0000000000000000 /* 0xf0 - PCIBIOS_DS */
20598+ .quad 0x0000000000000000 /* 0xf8 - GDT entry 31: double-fault TSS */
20599+
20600+ /* Be sure this is zeroed to avoid false validations in Xen */
20601+ .fill PAGE_SIZE_asm - GDT_SIZE,1,0
20602+ .endr
20603diff --git a/arch/x86/kernel/head_64.S b/arch/x86/kernel/head_64.S
20604index 321d65e..863089b 100644
20605--- a/arch/x86/kernel/head_64.S
20606+++ b/arch/x86/kernel/head_64.S
20607@@ -20,6 +20,8 @@
20608 #include <asm/processor-flags.h>
20609 #include <asm/percpu.h>
20610 #include <asm/nops.h>
20611+#include <asm/cpufeature.h>
20612+#include <asm/alternative-asm.h>
20613
20614 #ifdef CONFIG_PARAVIRT
20615 #include <asm/asm-offsets.h>
20616@@ -41,6 +43,12 @@ L4_PAGE_OFFSET = pgd_index(__PAGE_OFFSET)
20617 L3_PAGE_OFFSET = pud_index(__PAGE_OFFSET)
20618 L4_START_KERNEL = pgd_index(__START_KERNEL_map)
20619 L3_START_KERNEL = pud_index(__START_KERNEL_map)
20620+L4_VMALLOC_START = pgd_index(VMALLOC_START)
20621+L3_VMALLOC_START = pud_index(VMALLOC_START)
20622+L4_VMALLOC_END = pgd_index(VMALLOC_END)
20623+L3_VMALLOC_END = pud_index(VMALLOC_END)
20624+L4_VMEMMAP_START = pgd_index(VMEMMAP_START)
20625+L3_VMEMMAP_START = pud_index(VMEMMAP_START)
20626
20627 .text
20628 __HEAD
20629@@ -89,11 +97,23 @@ startup_64:
20630 * Fixup the physical addresses in the page table
20631 */
20632 addq %rbp, early_level4_pgt + (L4_START_KERNEL*8)(%rip)
20633+ addq %rbp, init_level4_pgt + (L4_PAGE_OFFSET*8)(%rip)
20634+ addq %rbp, init_level4_pgt + (L4_VMALLOC_START*8)(%rip)
20635+ addq %rbp, init_level4_pgt + (L4_VMALLOC_END*8)(%rip)
20636+ addq %rbp, init_level4_pgt + (L4_VMEMMAP_START*8)(%rip)
20637+ addq %rbp, init_level4_pgt + (L4_START_KERNEL*8)(%rip)
20638
20639- addq %rbp, level3_kernel_pgt + (510*8)(%rip)
20640- addq %rbp, level3_kernel_pgt + (511*8)(%rip)
20641+ addq %rbp, level3_ident_pgt + (0*8)(%rip)
20642+#ifndef CONFIG_XEN
20643+ addq %rbp, level3_ident_pgt + (1*8)(%rip)
20644+#endif
20645
20646- addq %rbp, level2_fixmap_pgt + (506*8)(%rip)
20647+ addq %rbp, level3_vmemmap_pgt + (L3_VMEMMAP_START*8)(%rip)
20648+
20649+ addq %rbp, level3_kernel_pgt + (L3_START_KERNEL*8)(%rip)
20650+ addq %rbp, level3_kernel_pgt + ((L3_START_KERNEL+1)*8)(%rip)
20651+
20652+ addq %rbp, level2_fixmap_pgt + (507*8)(%rip)
20653
20654 /*
20655 * Set up the identity mapping for the switchover. These
20656@@ -177,8 +197,8 @@ ENTRY(secondary_startup_64)
20657 movq $(init_level4_pgt - __START_KERNEL_map), %rax
20658 1:
20659
20660- /* Enable PAE mode and PGE */
20661- movl $(X86_CR4_PAE | X86_CR4_PGE), %ecx
20662+ /* Enable PAE mode and PSE/PGE */
20663+ movl $(X86_CR4_PSE | X86_CR4_PAE | X86_CR4_PGE), %ecx
20664 movq %rcx, %cr4
20665
20666 /* Setup early boot stage 4 level pagetables. */
20667@@ -199,10 +219,18 @@ ENTRY(secondary_startup_64)
20668 movl $MSR_EFER, %ecx
20669 rdmsr
20670 btsl $_EFER_SCE, %eax /* Enable System Call */
20671- btl $20,%edi /* No Execute supported? */
20672+ btl $(X86_FEATURE_NX & 31),%edi /* No Execute supported? */
20673 jnc 1f
20674 btsl $_EFER_NX, %eax
20675 btsq $_PAGE_BIT_NX,early_pmd_flags(%rip)
20676+ leaq init_level4_pgt(%rip), %rdi
20677+#ifndef CONFIG_EFI
20678+ btsq $_PAGE_BIT_NX, 8*L4_PAGE_OFFSET(%rdi)
20679+#endif
20680+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_START(%rdi)
20681+ btsq $_PAGE_BIT_NX, 8*L4_VMALLOC_END(%rdi)
20682+ btsq $_PAGE_BIT_NX, 8*L4_VMEMMAP_START(%rdi)
20683+ btsq $_PAGE_BIT_NX, __supported_pte_mask(%rip)
20684 1: wrmsr /* Make changes effective */
20685
20686 /* Setup cr0 */
20687@@ -282,6 +310,7 @@ ENTRY(secondary_startup_64)
20688 * REX.W + FF /5 JMP m16:64 Jump far, absolute indirect,
20689 * address given in m16:64.
20690 */
20691+ pax_set_fptr_mask
20692 movq initial_code(%rip),%rax
20693 pushq $0 # fake return address to stop unwinder
20694 pushq $__KERNEL_CS # set correct cs
20695@@ -388,7 +417,7 @@ ENTRY(early_idt_handler)
20696 call dump_stack
20697 #ifdef CONFIG_KALLSYMS
20698 leaq early_idt_ripmsg(%rip),%rdi
20699- movq 40(%rsp),%rsi # %rip again
20700+ movq 88(%rsp),%rsi # %rip again
20701 call __print_symbol
20702 #endif
20703 #endif /* EARLY_PRINTK */
20704@@ -416,6 +445,7 @@ ENDPROC(early_idt_handler)
20705 early_recursion_flag:
20706 .long 0
20707
20708+ .section .rodata,"a",@progbits
20709 #ifdef CONFIG_EARLY_PRINTK
20710 early_idt_msg:
20711 .asciz "PANIC: early exception %02lx rip %lx:%lx error %lx cr2 %lx\n"
20712@@ -443,29 +473,52 @@ NEXT_PAGE(early_level4_pgt)
20713 NEXT_PAGE(early_dynamic_pgts)
20714 .fill 512*EARLY_DYNAMIC_PAGE_TABLES,8,0
20715
20716- .data
20717+ .section .rodata,"a",@progbits
20718
20719-#ifndef CONFIG_XEN
20720 NEXT_PAGE(init_level4_pgt)
20721- .fill 512,8,0
20722-#else
20723-NEXT_PAGE(init_level4_pgt)
20724- .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20725 .org init_level4_pgt + L4_PAGE_OFFSET*8, 0
20726 .quad level3_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20727+ .org init_level4_pgt + L4_VMALLOC_START*8, 0
20728+ .quad level3_vmalloc_start_pgt - __START_KERNEL_map + _KERNPG_TABLE
20729+ .org init_level4_pgt + L4_VMALLOC_END*8, 0
20730+ .quad level3_vmalloc_end_pgt - __START_KERNEL_map + _KERNPG_TABLE
20731+ .org init_level4_pgt + L4_VMEMMAP_START*8, 0
20732+ .quad level3_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20733 .org init_level4_pgt + L4_START_KERNEL*8, 0
20734 /* (2^48-(2*1024*1024*1024))/(2^39) = 511 */
20735 .quad level3_kernel_pgt - __START_KERNEL_map + _PAGE_TABLE
20736
20737+#ifdef CONFIG_PAX_PER_CPU_PGD
20738+NEXT_PAGE(cpu_pgd)
20739+ .rept NR_CPUS
20740+ .fill 512,8,0
20741+ .endr
20742+#endif
20743+
20744 NEXT_PAGE(level3_ident_pgt)
20745 .quad level2_ident_pgt - __START_KERNEL_map + _KERNPG_TABLE
20746+#ifdef CONFIG_XEN
20747 .fill 511, 8, 0
20748+#else
20749+ .quad level2_ident_pgt + PAGE_SIZE - __START_KERNEL_map + _KERNPG_TABLE
20750+ .fill 510,8,0
20751+#endif
20752+
20753+NEXT_PAGE(level3_vmalloc_start_pgt)
20754+ .fill 512,8,0
20755+
20756+NEXT_PAGE(level3_vmalloc_end_pgt)
20757+ .fill 512,8,0
20758+
20759+NEXT_PAGE(level3_vmemmap_pgt)
20760+ .fill L3_VMEMMAP_START,8,0
20761+ .quad level2_vmemmap_pgt - __START_KERNEL_map + _KERNPG_TABLE
20762+
20763 NEXT_PAGE(level2_ident_pgt)
20764- /* Since I easily can, map the first 1G.
20765+ /* Since I easily can, map the first 2G.
20766 * Don't set NX because code runs from these pages.
20767 */
20768- PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, PTRS_PER_PMD)
20769-#endif
20770+ PMDS(0, __PAGE_KERNEL_IDENT_LARGE_EXEC, 2*PTRS_PER_PMD)
20771
20772 NEXT_PAGE(level3_kernel_pgt)
20773 .fill L3_START_KERNEL,8,0
20774@@ -473,6 +526,9 @@ NEXT_PAGE(level3_kernel_pgt)
20775 .quad level2_kernel_pgt - __START_KERNEL_map + _KERNPG_TABLE
20776 .quad level2_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20777
20778+NEXT_PAGE(level2_vmemmap_pgt)
20779+ .fill 512,8,0
20780+
20781 NEXT_PAGE(level2_kernel_pgt)
20782 /*
20783 * 512 MB kernel mapping. We spend a full page on this pagetable
20784@@ -488,39 +544,64 @@ NEXT_PAGE(level2_kernel_pgt)
20785 KERNEL_IMAGE_SIZE/PMD_SIZE)
20786
20787 NEXT_PAGE(level2_fixmap_pgt)
20788- .fill 506,8,0
20789- .quad level1_fixmap_pgt - __START_KERNEL_map + _PAGE_TABLE
20790- /* 8MB reserved for vsyscalls + a 2MB hole = 4 + 1 entries */
20791- .fill 5,8,0
20792+ .fill 507,8,0
20793+ .quad level1_vsyscall_pgt - __START_KERNEL_map + _PAGE_TABLE
20794+ /* 6MB reserved for vsyscalls + a 2MB hole = 3 + 1 entries */
20795+ .fill 4,8,0
20796
20797-NEXT_PAGE(level1_fixmap_pgt)
20798+NEXT_PAGE(level1_vsyscall_pgt)
20799 .fill 512,8,0
20800
20801 #undef PMDS
20802
20803- .data
20804+ .align PAGE_SIZE
20805+ENTRY(cpu_gdt_table)
20806+ .rept NR_CPUS
20807+ .quad 0x0000000000000000 /* NULL descriptor */
20808+ .quad 0x00cf9b000000ffff /* __KERNEL32_CS */
20809+ .quad 0x00af9b000000ffff /* __KERNEL_CS */
20810+ .quad 0x00cf93000000ffff /* __KERNEL_DS */
20811+ .quad 0x00cffb000000ffff /* __USER32_CS */
20812+ .quad 0x00cff3000000ffff /* __USER_DS, __USER32_DS */
20813+ .quad 0x00affb000000ffff /* __USER_CS */
20814+
20815+#ifdef CONFIG_PAX_KERNEXEC
20816+ .quad 0x00af9b000000ffff /* __KERNEXEC_KERNEL_CS */
20817+#else
20818+ .quad 0x0 /* unused */
20819+#endif
20820+
20821+ .quad 0,0 /* TSS */
20822+ .quad 0,0 /* LDT */
20823+ .quad 0,0,0 /* three TLS descriptors */
20824+ .quad 0x0000f40000000000 /* node/CPU stored in limit */
20825+ /* asm/segment.h:GDT_ENTRIES must match this */
20826+
20827+ /* zero the remaining page */
20828+ .fill PAGE_SIZE / 8 - GDT_ENTRIES,8,0
20829+ .endr
20830+
20831 .align 16
20832 .globl early_gdt_descr
20833 early_gdt_descr:
20834 .word GDT_ENTRIES*8-1
20835 early_gdt_descr_base:
20836- .quad INIT_PER_CPU_VAR(gdt_page)
20837+ .quad cpu_gdt_table
20838
20839 ENTRY(phys_base)
20840 /* This must match the first entry in level2_kernel_pgt */
20841 .quad 0x0000000000000000
20842
20843 #include "../../x86/xen/xen-head.S"
20844-
20845- .section .bss, "aw", @nobits
20846- .align L1_CACHE_BYTES
20847-ENTRY(idt_table)
20848- .skip IDT_ENTRIES * 16
20849
20850- .align L1_CACHE_BYTES
20851-ENTRY(nmi_idt_table)
20852- .skip IDT_ENTRIES * 16
20853-
20854- __PAGE_ALIGNED_BSS
20855+ .section .rodata,"a",@progbits
20856 NEXT_PAGE(empty_zero_page)
20857 .skip PAGE_SIZE
20858+
20859+ .align L1_CACHE_BYTES
20860+ENTRY(idt_table)
20861+ .fill 512,8,0
20862+
20863+ .align L1_CACHE_BYTES
20864+ENTRY(nmi_idt_table)
20865+ .fill 512,8,0
20866diff --git a/arch/x86/kernel/i386_ksyms_32.c b/arch/x86/kernel/i386_ksyms_32.c
20867index 0fa6912..37fce70 100644
20868--- a/arch/x86/kernel/i386_ksyms_32.c
20869+++ b/arch/x86/kernel/i386_ksyms_32.c
20870@@ -20,8 +20,12 @@ extern void cmpxchg8b_emu(void);
20871 EXPORT_SYMBOL(cmpxchg8b_emu);
20872 #endif
20873
20874+EXPORT_SYMBOL_GPL(cpu_gdt_table);
20875+
20876 /* Networking helper routines. */
20877 EXPORT_SYMBOL(csum_partial_copy_generic);
20878+EXPORT_SYMBOL(csum_partial_copy_generic_to_user);
20879+EXPORT_SYMBOL(csum_partial_copy_generic_from_user);
20880
20881 EXPORT_SYMBOL(__get_user_1);
20882 EXPORT_SYMBOL(__get_user_2);
20883@@ -37,3 +41,7 @@ EXPORT_SYMBOL(strstr);
20884
20885 EXPORT_SYMBOL(csum_partial);
20886 EXPORT_SYMBOL(empty_zero_page);
20887+
20888+#ifdef CONFIG_PAX_KERNEXEC
20889+EXPORT_SYMBOL(__LOAD_PHYSICAL_ADDR);
20890+#endif
20891diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
20892index cb33909..1163b40 100644
20893--- a/arch/x86/kernel/i387.c
20894+++ b/arch/x86/kernel/i387.c
20895@@ -51,7 +51,7 @@ static inline bool interrupted_kernel_fpu_idle(void)
20896 static inline bool interrupted_user_mode(void)
20897 {
20898 struct pt_regs *regs = get_irq_regs();
20899- return regs && user_mode_vm(regs);
20900+ return regs && user_mode(regs);
20901 }
20902
20903 /*
20904diff --git a/arch/x86/kernel/i8259.c b/arch/x86/kernel/i8259.c
20905index 9a5c460..84868423 100644
20906--- a/arch/x86/kernel/i8259.c
20907+++ b/arch/x86/kernel/i8259.c
20908@@ -110,7 +110,7 @@ static int i8259A_irq_pending(unsigned int irq)
20909 static void make_8259A_irq(unsigned int irq)
20910 {
20911 disable_irq_nosync(irq);
20912- io_apic_irqs &= ~(1<<irq);
20913+ io_apic_irqs &= ~(1UL<<irq);
20914 irq_set_chip_and_handler_name(irq, &i8259A_chip, handle_level_irq,
20915 i8259A_chip.name);
20916 enable_irq(irq);
20917@@ -209,7 +209,7 @@ spurious_8259A_irq:
20918 "spurious 8259A interrupt: IRQ%d.\n", irq);
20919 spurious_irq_mask |= irqmask;
20920 }
20921- atomic_inc(&irq_err_count);
20922+ atomic_inc_unchecked(&irq_err_count);
20923 /*
20924 * Theoretically we do not have to handle this IRQ,
20925 * but in Linux this does not cause problems and is
20926@@ -333,14 +333,16 @@ static void init_8259A(int auto_eoi)
20927 /* (slave's support for AEOI in flat mode is to be investigated) */
20928 outb_pic(SLAVE_ICW4_DEFAULT, PIC_SLAVE_IMR);
20929
20930+ pax_open_kernel();
20931 if (auto_eoi)
20932 /*
20933 * In AEOI mode we just have to mask the interrupt
20934 * when acking.
20935 */
20936- i8259A_chip.irq_mask_ack = disable_8259A_irq;
20937+ *(void **)&i8259A_chip.irq_mask_ack = disable_8259A_irq;
20938 else
20939- i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20940+ *(void **)&i8259A_chip.irq_mask_ack = mask_and_ack_8259A;
20941+ pax_close_kernel();
20942
20943 udelay(100); /* wait for 8259A to initialize */
20944
20945diff --git a/arch/x86/kernel/io_delay.c b/arch/x86/kernel/io_delay.c
20946index a979b5b..1d6db75 100644
20947--- a/arch/x86/kernel/io_delay.c
20948+++ b/arch/x86/kernel/io_delay.c
20949@@ -58,7 +58,7 @@ static int __init dmi_io_delay_0xed_port(const struct dmi_system_id *id)
20950 * Quirk table for systems that misbehave (lock up, etc.) if port
20951 * 0x80 is used:
20952 */
20953-static struct dmi_system_id __initdata io_delay_0xed_port_dmi_table[] = {
20954+static const struct dmi_system_id __initconst io_delay_0xed_port_dmi_table[] = {
20955 {
20956 .callback = dmi_io_delay_0xed_port,
20957 .ident = "Compaq Presario V6000",
20958diff --git a/arch/x86/kernel/ioport.c b/arch/x86/kernel/ioport.c
20959index 4ddaf66..6292f4e 100644
20960--- a/arch/x86/kernel/ioport.c
20961+++ b/arch/x86/kernel/ioport.c
20962@@ -6,6 +6,7 @@
20963 #include <linux/sched.h>
20964 #include <linux/kernel.h>
20965 #include <linux/capability.h>
20966+#include <linux/security.h>
20967 #include <linux/errno.h>
20968 #include <linux/types.h>
20969 #include <linux/ioport.h>
20970@@ -28,6 +29,12 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20971
20972 if ((from + num <= from) || (from + num > IO_BITMAP_BITS))
20973 return -EINVAL;
20974+#ifdef CONFIG_GRKERNSEC_IO
20975+ if (turn_on && grsec_disable_privio) {
20976+ gr_handle_ioperm();
20977+ return -EPERM;
20978+ }
20979+#endif
20980 if (turn_on && !capable(CAP_SYS_RAWIO))
20981 return -EPERM;
20982
20983@@ -54,7 +61,7 @@ asmlinkage long sys_ioperm(unsigned long from, unsigned long num, int turn_on)
20984 * because the ->io_bitmap_max value must match the bitmap
20985 * contents:
20986 */
20987- tss = &per_cpu(init_tss, get_cpu());
20988+ tss = init_tss + get_cpu();
20989
20990 if (turn_on)
20991 bitmap_clear(t->io_bitmap_ptr, from, num);
20992@@ -103,6 +110,12 @@ SYSCALL_DEFINE1(iopl, unsigned int, level)
20993 return -EINVAL;
20994 /* Trying to gain more privileges? */
20995 if (level > old) {
20996+#ifdef CONFIG_GRKERNSEC_IO
20997+ if (grsec_disable_privio) {
20998+ gr_handle_iopl();
20999+ return -EPERM;
21000+ }
21001+#endif
21002 if (!capable(CAP_SYS_RAWIO))
21003 return -EPERM;
21004 }
21005diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c
21006index ac0631d..ff7cb62 100644
21007--- a/arch/x86/kernel/irq.c
21008+++ b/arch/x86/kernel/irq.c
21009@@ -18,7 +18,7 @@
21010 #include <asm/mce.h>
21011 #include <asm/hw_irq.h>
21012
21013-atomic_t irq_err_count;
21014+atomic_unchecked_t irq_err_count;
21015
21016 /* Function pointer for generic interrupt vector handling */
21017 void (*x86_platform_ipi_callback)(void) = NULL;
21018@@ -122,9 +122,9 @@ int arch_show_interrupts(struct seq_file *p, int prec)
21019 seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
21020 seq_printf(p, " Machine check polls\n");
21021 #endif
21022- seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
21023+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read_unchecked(&irq_err_count));
21024 #if defined(CONFIG_X86_IO_APIC)
21025- seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
21026+ seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read_unchecked(&irq_mis_count));
21027 #endif
21028 return 0;
21029 }
21030@@ -164,7 +164,7 @@ u64 arch_irq_stat_cpu(unsigned int cpu)
21031
21032 u64 arch_irq_stat(void)
21033 {
21034- u64 sum = atomic_read(&irq_err_count);
21035+ u64 sum = atomic_read_unchecked(&irq_err_count);
21036 return sum;
21037 }
21038
21039diff --git a/arch/x86/kernel/irq_32.c b/arch/x86/kernel/irq_32.c
21040index 344faf8..355f60d 100644
21041--- a/arch/x86/kernel/irq_32.c
21042+++ b/arch/x86/kernel/irq_32.c
21043@@ -39,7 +39,7 @@ static int check_stack_overflow(void)
21044 __asm__ __volatile__("andl %%esp,%0" :
21045 "=r" (sp) : "0" (THREAD_SIZE - 1));
21046
21047- return sp < (sizeof(struct thread_info) + STACK_WARN);
21048+ return sp < STACK_WARN;
21049 }
21050
21051 static void print_stack_overflow(void)
21052@@ -59,8 +59,8 @@ static inline void print_stack_overflow(void) { }
21053 * per-CPU IRQ handling contexts (thread information and stack)
21054 */
21055 union irq_ctx {
21056- struct thread_info tinfo;
21057- u32 stack[THREAD_SIZE/sizeof(u32)];
21058+ unsigned long previous_esp;
21059+ u32 stack[THREAD_SIZE/sizeof(u32)];
21060 } __attribute__((aligned(THREAD_SIZE)));
21061
21062 static DEFINE_PER_CPU(union irq_ctx *, hardirq_ctx);
21063@@ -80,10 +80,9 @@ static void call_on_stack(void *func, void *stack)
21064 static inline int
21065 execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21066 {
21067- union irq_ctx *curctx, *irqctx;
21068+ union irq_ctx *irqctx;
21069 u32 *isp, arg1, arg2;
21070
21071- curctx = (union irq_ctx *) current_thread_info();
21072 irqctx = __this_cpu_read(hardirq_ctx);
21073
21074 /*
21075@@ -92,16 +91,16 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21076 * handler) we can't do that and just have to keep using the
21077 * current stack (which is the irq stack already after all)
21078 */
21079- if (unlikely(curctx == irqctx))
21080+ if (unlikely((void *)current_stack_pointer - (void *)irqctx < THREAD_SIZE))
21081 return 0;
21082
21083 /* build the stack frame on the IRQ stack */
21084- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21085- irqctx->tinfo.task = curctx->tinfo.task;
21086- irqctx->tinfo.previous_esp = current_stack_pointer;
21087+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21088+ irqctx->previous_esp = current_stack_pointer;
21089
21090- /* Copy the preempt_count so that the [soft]irq checks work. */
21091- irqctx->tinfo.preempt_count = curctx->tinfo.preempt_count;
21092+#ifdef CONFIG_PAX_MEMORY_UDEREF
21093+ __set_fs(MAKE_MM_SEG(0));
21094+#endif
21095
21096 if (unlikely(overflow))
21097 call_on_stack(print_stack_overflow, isp);
21098@@ -113,6 +112,11 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21099 : "0" (irq), "1" (desc), "2" (isp),
21100 "D" (desc->handle_irq)
21101 : "memory", "cc", "ecx");
21102+
21103+#ifdef CONFIG_PAX_MEMORY_UDEREF
21104+ __set_fs(current_thread_info()->addr_limit);
21105+#endif
21106+
21107 return 1;
21108 }
21109
21110@@ -121,29 +125,14 @@ execute_on_irq_stack(int overflow, struct irq_desc *desc, int irq)
21111 */
21112 void __cpuinit irq_ctx_init(int cpu)
21113 {
21114- union irq_ctx *irqctx;
21115-
21116 if (per_cpu(hardirq_ctx, cpu))
21117 return;
21118
21119- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21120- THREADINFO_GFP,
21121- THREAD_SIZE_ORDER));
21122- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21123- irqctx->tinfo.cpu = cpu;
21124- irqctx->tinfo.preempt_count = HARDIRQ_OFFSET;
21125- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21126-
21127- per_cpu(hardirq_ctx, cpu) = irqctx;
21128-
21129- irqctx = page_address(alloc_pages_node(cpu_to_node(cpu),
21130- THREADINFO_GFP,
21131- THREAD_SIZE_ORDER));
21132- memset(&irqctx->tinfo, 0, sizeof(struct thread_info));
21133- irqctx->tinfo.cpu = cpu;
21134- irqctx->tinfo.addr_limit = MAKE_MM_SEG(0);
21135-
21136- per_cpu(softirq_ctx, cpu) = irqctx;
21137+ per_cpu(hardirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21138+ per_cpu(softirq_ctx, cpu) = page_address(alloc_pages_node(cpu_to_node(cpu), THREADINFO_GFP, THREAD_SIZE_ORDER));
21139+
21140+ printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21141+ cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21142
21143 printk(KERN_DEBUG "CPU %u irqstacks, hard=%p soft=%p\n",
21144 cpu, per_cpu(hardirq_ctx, cpu), per_cpu(softirq_ctx, cpu));
21145@@ -152,7 +141,6 @@ void __cpuinit irq_ctx_init(int cpu)
21146 asmlinkage void do_softirq(void)
21147 {
21148 unsigned long flags;
21149- struct thread_info *curctx;
21150 union irq_ctx *irqctx;
21151 u32 *isp;
21152
21153@@ -162,15 +150,22 @@ asmlinkage void do_softirq(void)
21154 local_irq_save(flags);
21155
21156 if (local_softirq_pending()) {
21157- curctx = current_thread_info();
21158 irqctx = __this_cpu_read(softirq_ctx);
21159- irqctx->tinfo.task = curctx->task;
21160- irqctx->tinfo.previous_esp = current_stack_pointer;
21161+ irqctx->previous_esp = current_stack_pointer;
21162
21163 /* build the stack frame on the softirq stack */
21164- isp = (u32 *) ((char *)irqctx + sizeof(*irqctx));
21165+ isp = (u32 *) ((char *)irqctx + sizeof(*irqctx) - 8);
21166+
21167+#ifdef CONFIG_PAX_MEMORY_UDEREF
21168+ __set_fs(MAKE_MM_SEG(0));
21169+#endif
21170
21171 call_on_stack(__do_softirq, isp);
21172+
21173+#ifdef CONFIG_PAX_MEMORY_UDEREF
21174+ __set_fs(current_thread_info()->addr_limit);
21175+#endif
21176+
21177 /*
21178 * Shouldn't happen, we returned above if in_interrupt():
21179 */
21180@@ -191,7 +186,7 @@ bool handle_irq(unsigned irq, struct pt_regs *regs)
21181 if (unlikely(!desc))
21182 return false;
21183
21184- if (user_mode_vm(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21185+ if (user_mode(regs) || !execute_on_irq_stack(overflow, desc, irq)) {
21186 if (unlikely(overflow))
21187 print_stack_overflow();
21188 desc->handle_irq(irq, desc);
21189diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c
21190index d04d3ec..ea4b374 100644
21191--- a/arch/x86/kernel/irq_64.c
21192+++ b/arch/x86/kernel/irq_64.c
21193@@ -44,7 +44,7 @@ static inline void stack_overflow_check(struct pt_regs *regs)
21194 u64 estack_top, estack_bottom;
21195 u64 curbase = (u64)task_stack_page(current);
21196
21197- if (user_mode_vm(regs))
21198+ if (user_mode(regs))
21199 return;
21200
21201 if (regs->sp >= curbase + sizeof(struct thread_info) +
21202diff --git a/arch/x86/kernel/kdebugfs.c b/arch/x86/kernel/kdebugfs.c
21203index dc1404b..bbc43e7 100644
21204--- a/arch/x86/kernel/kdebugfs.c
21205+++ b/arch/x86/kernel/kdebugfs.c
21206@@ -27,7 +27,7 @@ struct setup_data_node {
21207 u32 len;
21208 };
21209
21210-static ssize_t setup_data_read(struct file *file, char __user *user_buf,
21211+static ssize_t __size_overflow(3) setup_data_read(struct file *file, char __user *user_buf,
21212 size_t count, loff_t *ppos)
21213 {
21214 struct setup_data_node *node = file->private_data;
21215diff --git a/arch/x86/kernel/kgdb.c b/arch/x86/kernel/kgdb.c
21216index 836f832..a8bda67 100644
21217--- a/arch/x86/kernel/kgdb.c
21218+++ b/arch/x86/kernel/kgdb.c
21219@@ -127,11 +127,11 @@ char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
21220 #ifdef CONFIG_X86_32
21221 switch (regno) {
21222 case GDB_SS:
21223- if (!user_mode_vm(regs))
21224+ if (!user_mode(regs))
21225 *(unsigned long *)mem = __KERNEL_DS;
21226 break;
21227 case GDB_SP:
21228- if (!user_mode_vm(regs))
21229+ if (!user_mode(regs))
21230 *(unsigned long *)mem = kernel_stack_pointer(regs);
21231 break;
21232 case GDB_GS:
21233@@ -229,7 +229,10 @@ static void kgdb_correct_hw_break(void)
21234 bp->attr.bp_addr = breakinfo[breakno].addr;
21235 bp->attr.bp_len = breakinfo[breakno].len;
21236 bp->attr.bp_type = breakinfo[breakno].type;
21237- info->address = breakinfo[breakno].addr;
21238+ if (breakinfo[breakno].type == X86_BREAKPOINT_EXECUTE)
21239+ info->address = ktla_ktva(breakinfo[breakno].addr);
21240+ else
21241+ info->address = breakinfo[breakno].addr;
21242 info->len = breakinfo[breakno].len;
21243 info->type = breakinfo[breakno].type;
21244 val = arch_install_hw_breakpoint(bp);
21245@@ -476,12 +479,12 @@ int kgdb_arch_handle_exception(int e_vector, int signo, int err_code,
21246 case 'k':
21247 /* clear the trace bit */
21248 linux_regs->flags &= ~X86_EFLAGS_TF;
21249- atomic_set(&kgdb_cpu_doing_single_step, -1);
21250+ atomic_set_unchecked(&kgdb_cpu_doing_single_step, -1);
21251
21252 /* set the trace bit if we're stepping */
21253 if (remcomInBuffer[0] == 's') {
21254 linux_regs->flags |= X86_EFLAGS_TF;
21255- atomic_set(&kgdb_cpu_doing_single_step,
21256+ atomic_set_unchecked(&kgdb_cpu_doing_single_step,
21257 raw_smp_processor_id());
21258 }
21259
21260@@ -546,7 +549,7 @@ static int __kgdb_notify(struct die_args *args, unsigned long cmd)
21261
21262 switch (cmd) {
21263 case DIE_DEBUG:
21264- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
21265+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
21266 if (user_mode(regs))
21267 return single_step_cont(regs, args);
21268 break;
21269@@ -751,11 +754,11 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21270 #endif /* CONFIG_DEBUG_RODATA */
21271
21272 bpt->type = BP_BREAKPOINT;
21273- err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
21274+ err = probe_kernel_read(bpt->saved_instr, ktla_ktva((char *)bpt->bpt_addr),
21275 BREAK_INSTR_SIZE);
21276 if (err)
21277 return err;
21278- err = probe_kernel_write((char *)bpt->bpt_addr,
21279+ err = probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21280 arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
21281 #ifdef CONFIG_DEBUG_RODATA
21282 if (!err)
21283@@ -768,7 +771,7 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
21284 return -EBUSY;
21285 text_poke((void *)bpt->bpt_addr, arch_kgdb_ops.gdb_bpt_instr,
21286 BREAK_INSTR_SIZE);
21287- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21288+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21289 if (err)
21290 return err;
21291 if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
21292@@ -793,13 +796,13 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
21293 if (mutex_is_locked(&text_mutex))
21294 goto knl_write;
21295 text_poke((void *)bpt->bpt_addr, bpt->saved_instr, BREAK_INSTR_SIZE);
21296- err = probe_kernel_read(opc, (char *)bpt->bpt_addr, BREAK_INSTR_SIZE);
21297+ err = probe_kernel_read(opc, ktla_ktva((char *)bpt->bpt_addr), BREAK_INSTR_SIZE);
21298 if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
21299 goto knl_write;
21300 return err;
21301 knl_write:
21302 #endif /* CONFIG_DEBUG_RODATA */
21303- return probe_kernel_write((char *)bpt->bpt_addr,
21304+ return probe_kernel_write(ktla_ktva((char *)bpt->bpt_addr),
21305 (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
21306 }
21307
21308diff --git a/arch/x86/kernel/kprobes/core.c b/arch/x86/kernel/kprobes/core.c
21309index 211bce4..6e2580a 100644
21310--- a/arch/x86/kernel/kprobes/core.c
21311+++ b/arch/x86/kernel/kprobes/core.c
21312@@ -119,9 +119,12 @@ static void __kprobes __synthesize_relative_insn(void *from, void *to, u8 op)
21313 s32 raddr;
21314 } __packed *insn;
21315
21316- insn = (struct __arch_relative_insn *)from;
21317+ insn = (struct __arch_relative_insn *)ktla_ktva(from);
21318+
21319+ pax_open_kernel();
21320 insn->raddr = (s32)((long)(to) - ((long)(from) + 5));
21321 insn->op = op;
21322+ pax_close_kernel();
21323 }
21324
21325 /* Insert a jump instruction at address 'from', which jumps to address 'to'.*/
21326@@ -164,7 +167,7 @@ int __kprobes can_boost(kprobe_opcode_t *opcodes)
21327 kprobe_opcode_t opcode;
21328 kprobe_opcode_t *orig_opcodes = opcodes;
21329
21330- if (search_exception_tables((unsigned long)opcodes))
21331+ if (search_exception_tables(ktva_ktla((unsigned long)opcodes)))
21332 return 0; /* Page fault may occur on this address. */
21333
21334 retry:
21335@@ -238,9 +241,9 @@ __recover_probed_insn(kprobe_opcode_t *buf, unsigned long addr)
21336 * for the first byte, we can recover the original instruction
21337 * from it and kp->opcode.
21338 */
21339- memcpy(buf, kp->addr, MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21340+ memcpy(buf, ktla_ktva(kp->addr), MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
21341 buf[0] = kp->opcode;
21342- return (unsigned long)buf;
21343+ return ktva_ktla((unsigned long)buf);
21344 }
21345
21346 /*
21347@@ -332,7 +335,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21348 /* Another subsystem puts a breakpoint, failed to recover */
21349 if (insn.opcode.bytes[0] == BREAKPOINT_INSTRUCTION)
21350 return 0;
21351+ pax_open_kernel();
21352 memcpy(dest, insn.kaddr, insn.length);
21353+ pax_close_kernel();
21354
21355 #ifdef CONFIG_X86_64
21356 if (insn_rip_relative(&insn)) {
21357@@ -359,7 +364,9 @@ int __kprobes __copy_instruction(u8 *dest, u8 *src)
21358 return 0;
21359 }
21360 disp = (u8 *) dest + insn_offset_displacement(&insn);
21361+ pax_open_kernel();
21362 *(s32 *) disp = (s32) newdisp;
21363+ pax_close_kernel();
21364 }
21365 #endif
21366 return insn.length;
21367@@ -498,7 +505,7 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21368 * nor set current_kprobe, because it doesn't use single
21369 * stepping.
21370 */
21371- regs->ip = (unsigned long)p->ainsn.insn;
21372+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21373 preempt_enable_no_resched();
21374 return;
21375 }
21376@@ -515,9 +522,9 @@ setup_singlestep(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *k
21377 regs->flags &= ~X86_EFLAGS_IF;
21378 /* single step inline if the instruction is an int3 */
21379 if (p->opcode == BREAKPOINT_INSTRUCTION)
21380- regs->ip = (unsigned long)p->addr;
21381+ regs->ip = ktla_ktva((unsigned long)p->addr);
21382 else
21383- regs->ip = (unsigned long)p->ainsn.insn;
21384+ regs->ip = ktva_ktla((unsigned long)p->ainsn.insn);
21385 }
21386
21387 /*
21388@@ -596,7 +603,7 @@ static int __kprobes kprobe_handler(struct pt_regs *regs)
21389 setup_singlestep(p, regs, kcb, 0);
21390 return 1;
21391 }
21392- } else if (*addr != BREAKPOINT_INSTRUCTION) {
21393+ } else if (*(kprobe_opcode_t *)ktla_ktva((unsigned long)addr) != BREAKPOINT_INSTRUCTION) {
21394 /*
21395 * The breakpoint instruction was removed right
21396 * after we hit it. Another cpu has removed
21397@@ -642,6 +649,9 @@ static void __used __kprobes kretprobe_trampoline_holder(void)
21398 " movq %rax, 152(%rsp)\n"
21399 RESTORE_REGS_STRING
21400 " popfq\n"
21401+#ifdef KERNEXEC_PLUGIN
21402+ " btsq $63,(%rsp)\n"
21403+#endif
21404 #else
21405 " pushf\n"
21406 SAVE_REGS_STRING
21407@@ -779,7 +789,7 @@ static void __kprobes
21408 resume_execution(struct kprobe *p, struct pt_regs *regs, struct kprobe_ctlblk *kcb)
21409 {
21410 unsigned long *tos = stack_addr(regs);
21411- unsigned long copy_ip = (unsigned long)p->ainsn.insn;
21412+ unsigned long copy_ip = ktva_ktla((unsigned long)p->ainsn.insn);
21413 unsigned long orig_ip = (unsigned long)p->addr;
21414 kprobe_opcode_t *insn = p->ainsn.insn;
21415
21416@@ -961,7 +971,7 @@ kprobe_exceptions_notify(struct notifier_block *self, unsigned long val, void *d
21417 struct die_args *args = data;
21418 int ret = NOTIFY_DONE;
21419
21420- if (args->regs && user_mode_vm(args->regs))
21421+ if (args->regs && user_mode(args->regs))
21422 return ret;
21423
21424 switch (val) {
21425diff --git a/arch/x86/kernel/kprobes/opt.c b/arch/x86/kernel/kprobes/opt.c
21426index 76dc6f0..66bdfc3 100644
21427--- a/arch/x86/kernel/kprobes/opt.c
21428+++ b/arch/x86/kernel/kprobes/opt.c
21429@@ -79,6 +79,7 @@ found:
21430 /* Insert a move instruction which sets a pointer to eax/rdi (1st arg). */
21431 static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long val)
21432 {
21433+ pax_open_kernel();
21434 #ifdef CONFIG_X86_64
21435 *addr++ = 0x48;
21436 *addr++ = 0xbf;
21437@@ -86,6 +87,7 @@ static void __kprobes synthesize_set_arg1(kprobe_opcode_t *addr, unsigned long v
21438 *addr++ = 0xb8;
21439 #endif
21440 *(unsigned long *)addr = val;
21441+ pax_close_kernel();
21442 }
21443
21444 static void __used __kprobes kprobes_optinsn_template_holder(void)
21445@@ -338,7 +340,7 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21446 * Verify if the address gap is in 2GB range, because this uses
21447 * a relative jump.
21448 */
21449- rel = (long)op->optinsn.insn - (long)op->kp.addr + RELATIVEJUMP_SIZE;
21450+ rel = (long)op->optinsn.insn - ktla_ktva((long)op->kp.addr) + RELATIVEJUMP_SIZE;
21451 if (abs(rel) > 0x7fffffff)
21452 return -ERANGE;
21453
21454@@ -353,16 +355,18 @@ int __kprobes arch_prepare_optimized_kprobe(struct optimized_kprobe *op)
21455 op->optinsn.size = ret;
21456
21457 /* Copy arch-dep-instance from template */
21458- memcpy(buf, &optprobe_template_entry, TMPL_END_IDX);
21459+ pax_open_kernel();
21460+ memcpy(buf, ktla_ktva(&optprobe_template_entry), TMPL_END_IDX);
21461+ pax_close_kernel();
21462
21463 /* Set probe information */
21464 synthesize_set_arg1(buf + TMPL_MOVE_IDX, (unsigned long)op);
21465
21466 /* Set probe function call */
21467- synthesize_relcall(buf + TMPL_CALL_IDX, optimized_callback);
21468+ synthesize_relcall(ktva_ktla(buf) + TMPL_CALL_IDX, optimized_callback);
21469
21470 /* Set returning jmp instruction at the tail of out-of-line buffer */
21471- synthesize_reljump(buf + TMPL_END_IDX + op->optinsn.size,
21472+ synthesize_reljump(ktva_ktla(buf) + TMPL_END_IDX + op->optinsn.size,
21473 (u8 *)op->kp.addr + op->optinsn.size);
21474
21475 flush_icache_range((unsigned long) buf,
21476@@ -385,7 +389,7 @@ static void __kprobes setup_optimize_kprobe(struct text_poke_param *tprm,
21477 ((long)op->kp.addr + RELATIVEJUMP_SIZE));
21478
21479 /* Backup instructions which will be replaced by jump address */
21480- memcpy(op->optinsn.copied_insn, op->kp.addr + INT3_SIZE,
21481+ memcpy(op->optinsn.copied_insn, ktla_ktva(op->kp.addr) + INT3_SIZE,
21482 RELATIVE_ADDR_SIZE);
21483
21484 insn_buf[0] = RELATIVEJUMP_OPCODE;
21485@@ -483,7 +487,7 @@ setup_detour_execution(struct kprobe *p, struct pt_regs *regs, int reenter)
21486 /* This kprobe is really able to run optimized path. */
21487 op = container_of(p, struct optimized_kprobe, kp);
21488 /* Detour through copied instructions */
21489- regs->ip = (unsigned long)op->optinsn.insn + TMPL_END_IDX;
21490+ regs->ip = ktva_ktla((unsigned long)op->optinsn.insn) + TMPL_END_IDX;
21491 if (!reenter)
21492 reset_current_kprobe();
21493 preempt_enable_no_resched();
21494diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c
21495index cd6d9a5..16245a4 100644
21496--- a/arch/x86/kernel/kvm.c
21497+++ b/arch/x86/kernel/kvm.c
21498@@ -455,7 +455,7 @@ static int __cpuinit kvm_cpu_notify(struct notifier_block *self,
21499 return NOTIFY_OK;
21500 }
21501
21502-static struct notifier_block __cpuinitdata kvm_cpu_notifier = {
21503+static struct notifier_block kvm_cpu_notifier = {
21504 .notifier_call = kvm_cpu_notify,
21505 };
21506 #endif
21507diff --git a/arch/x86/kernel/ldt.c b/arch/x86/kernel/ldt.c
21508index ebc9873..1b9724b 100644
21509--- a/arch/x86/kernel/ldt.c
21510+++ b/arch/x86/kernel/ldt.c
21511@@ -66,13 +66,13 @@ static int alloc_ldt(mm_context_t *pc, int mincount, int reload)
21512 if (reload) {
21513 #ifdef CONFIG_SMP
21514 preempt_disable();
21515- load_LDT(pc);
21516+ load_LDT_nolock(pc);
21517 if (!cpumask_equal(mm_cpumask(current->mm),
21518 cpumask_of(smp_processor_id())))
21519 smp_call_function(flush_ldt, current->mm, 1);
21520 preempt_enable();
21521 #else
21522- load_LDT(pc);
21523+ load_LDT_nolock(pc);
21524 #endif
21525 }
21526 if (oldsize) {
21527@@ -94,7 +94,7 @@ static inline int copy_ldt(mm_context_t *new, mm_context_t *old)
21528 return err;
21529
21530 for (i = 0; i < old->size; i++)
21531- write_ldt_entry(new->ldt, i, old->ldt + i * LDT_ENTRY_SIZE);
21532+ write_ldt_entry(new->ldt, i, old->ldt + i);
21533 return 0;
21534 }
21535
21536@@ -115,6 +115,24 @@ int init_new_context(struct task_struct *tsk, struct mm_struct *mm)
21537 retval = copy_ldt(&mm->context, &old_mm->context);
21538 mutex_unlock(&old_mm->context.lock);
21539 }
21540+
21541+ if (tsk == current) {
21542+ mm->context.vdso = 0;
21543+
21544+#ifdef CONFIG_X86_32
21545+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
21546+ mm->context.user_cs_base = 0UL;
21547+ mm->context.user_cs_limit = ~0UL;
21548+
21549+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_SMP)
21550+ cpus_clear(mm->context.cpu_user_cs_mask);
21551+#endif
21552+
21553+#endif
21554+#endif
21555+
21556+ }
21557+
21558 return retval;
21559 }
21560
21561@@ -229,6 +247,13 @@ static int write_ldt(void __user *ptr, unsigned long bytecount, int oldmode)
21562 }
21563 }
21564
21565+#ifdef CONFIG_PAX_SEGMEXEC
21566+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (ldt_info.contents & MODIFY_LDT_CONTENTS_CODE)) {
21567+ error = -EINVAL;
21568+ goto out_unlock;
21569+ }
21570+#endif
21571+
21572 fill_ldt(&ldt, &ldt_info);
21573 if (oldmode)
21574 ldt.avl = 0;
21575diff --git a/arch/x86/kernel/machine_kexec_32.c b/arch/x86/kernel/machine_kexec_32.c
21576index 5b19e4d..6476a76 100644
21577--- a/arch/x86/kernel/machine_kexec_32.c
21578+++ b/arch/x86/kernel/machine_kexec_32.c
21579@@ -26,7 +26,7 @@
21580 #include <asm/cacheflush.h>
21581 #include <asm/debugreg.h>
21582
21583-static void set_idt(void *newidt, __u16 limit)
21584+static void set_idt(struct desc_struct *newidt, __u16 limit)
21585 {
21586 struct desc_ptr curidt;
21587
21588@@ -38,7 +38,7 @@ static void set_idt(void *newidt, __u16 limit)
21589 }
21590
21591
21592-static void set_gdt(void *newgdt, __u16 limit)
21593+static void set_gdt(struct desc_struct *newgdt, __u16 limit)
21594 {
21595 struct desc_ptr curgdt;
21596
21597@@ -216,7 +216,7 @@ void machine_kexec(struct kimage *image)
21598 }
21599
21600 control_page = page_address(image->control_code_page);
21601- memcpy(control_page, relocate_kernel, KEXEC_CONTROL_CODE_MAX_SIZE);
21602+ memcpy(control_page, (void *)ktla_ktva((unsigned long)relocate_kernel), KEXEC_CONTROL_CODE_MAX_SIZE);
21603
21604 relocate_kernel_ptr = control_page;
21605 page_list[PA_CONTROL_PAGE] = __pa(control_page);
21606diff --git a/arch/x86/kernel/microcode_core.c b/arch/x86/kernel/microcode_core.c
21607index 22db92b..d546bec 100644
21608--- a/arch/x86/kernel/microcode_core.c
21609+++ b/arch/x86/kernel/microcode_core.c
21610@@ -513,7 +513,7 @@ mc_cpu_callback(struct notifier_block *nb, unsigned long action, void *hcpu)
21611 return NOTIFY_OK;
21612 }
21613
21614-static struct notifier_block __refdata mc_cpu_notifier = {
21615+static struct notifier_block mc_cpu_notifier = {
21616 .notifier_call = mc_cpu_callback,
21617 };
21618
21619diff --git a/arch/x86/kernel/microcode_intel.c b/arch/x86/kernel/microcode_intel.c
21620index 5fb2ceb..3ae90bb 100644
21621--- a/arch/x86/kernel/microcode_intel.c
21622+++ b/arch/x86/kernel/microcode_intel.c
21623@@ -293,13 +293,13 @@ static enum ucode_state request_microcode_fw(int cpu, struct device *device,
21624
21625 static int get_ucode_user(void *to, const void *from, size_t n)
21626 {
21627- return copy_from_user(to, from, n);
21628+ return copy_from_user(to, (const void __force_user *)from, n);
21629 }
21630
21631 static enum ucode_state
21632 request_microcode_user(int cpu, const void __user *buf, size_t size)
21633 {
21634- return generic_load_microcode(cpu, (void *)buf, size, &get_ucode_user);
21635+ return generic_load_microcode(cpu, (__force_kernel void *)buf, size, &get_ucode_user);
21636 }
21637
21638 static void microcode_fini_cpu(int cpu)
21639diff --git a/arch/x86/kernel/module.c b/arch/x86/kernel/module.c
21640index 216a4d7..228255a 100644
21641--- a/arch/x86/kernel/module.c
21642+++ b/arch/x86/kernel/module.c
21643@@ -43,15 +43,60 @@ do { \
21644 } while (0)
21645 #endif
21646
21647-void *module_alloc(unsigned long size)
21648+static inline void *__module_alloc(unsigned long size, pgprot_t prot)
21649 {
21650- if (PAGE_ALIGN(size) > MODULES_LEN)
21651+ if (!size || PAGE_ALIGN(size) > MODULES_LEN)
21652 return NULL;
21653 return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
21654- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
21655+ GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, prot,
21656 -1, __builtin_return_address(0));
21657 }
21658
21659+void *module_alloc(unsigned long size)
21660+{
21661+
21662+#ifdef CONFIG_PAX_KERNEXEC
21663+ return __module_alloc(size, PAGE_KERNEL);
21664+#else
21665+ return __module_alloc(size, PAGE_KERNEL_EXEC);
21666+#endif
21667+
21668+}
21669+
21670+#ifdef CONFIG_PAX_KERNEXEC
21671+#ifdef CONFIG_X86_32
21672+void *module_alloc_exec(unsigned long size)
21673+{
21674+ struct vm_struct *area;
21675+
21676+ if (size == 0)
21677+ return NULL;
21678+
21679+ area = __get_vm_area(size, VM_ALLOC, (unsigned long)&MODULES_EXEC_VADDR, (unsigned long)&MODULES_EXEC_END);
21680+ return area ? area->addr : NULL;
21681+}
21682+EXPORT_SYMBOL(module_alloc_exec);
21683+
21684+void module_free_exec(struct module *mod, void *module_region)
21685+{
21686+ vunmap(module_region);
21687+}
21688+EXPORT_SYMBOL(module_free_exec);
21689+#else
21690+void module_free_exec(struct module *mod, void *module_region)
21691+{
21692+ module_free(mod, module_region);
21693+}
21694+EXPORT_SYMBOL(module_free_exec);
21695+
21696+void *module_alloc_exec(unsigned long size)
21697+{
21698+ return __module_alloc(size, PAGE_KERNEL_RX);
21699+}
21700+EXPORT_SYMBOL(module_alloc_exec);
21701+#endif
21702+#endif
21703+
21704 #ifdef CONFIG_X86_32
21705 int apply_relocate(Elf32_Shdr *sechdrs,
21706 const char *strtab,
21707@@ -62,14 +107,16 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21708 unsigned int i;
21709 Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
21710 Elf32_Sym *sym;
21711- uint32_t *location;
21712+ uint32_t *plocation, location;
21713
21714 DEBUGP("Applying relocate section %u to %u\n",
21715 relsec, sechdrs[relsec].sh_info);
21716 for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
21717 /* This is where to make the change */
21718- location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
21719- + rel[i].r_offset;
21720+ plocation = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
21721+ location = (uint32_t)plocation;
21722+ if (sechdrs[sechdrs[relsec].sh_info].sh_flags & SHF_EXECINSTR)
21723+ plocation = ktla_ktva((void *)plocation);
21724 /* This is the symbol it is referring to. Note that all
21725 undefined symbols have been resolved. */
21726 sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
21727@@ -78,11 +125,15 @@ int apply_relocate(Elf32_Shdr *sechdrs,
21728 switch (ELF32_R_TYPE(rel[i].r_info)) {
21729 case R_386_32:
21730 /* We add the value into the location given */
21731- *location += sym->st_value;
21732+ pax_open_kernel();
21733+ *plocation += sym->st_value;
21734+ pax_close_kernel();
21735 break;
21736 case R_386_PC32:
21737 /* Add the value, subtract its position */
21738- *location += sym->st_value - (uint32_t)location;
21739+ pax_open_kernel();
21740+ *plocation += sym->st_value - location;
21741+ pax_close_kernel();
21742 break;
21743 default:
21744 pr_err("%s: Unknown relocation: %u\n",
21745@@ -127,21 +178,30 @@ int apply_relocate_add(Elf64_Shdr *sechdrs,
21746 case R_X86_64_NONE:
21747 break;
21748 case R_X86_64_64:
21749+ pax_open_kernel();
21750 *(u64 *)loc = val;
21751+ pax_close_kernel();
21752 break;
21753 case R_X86_64_32:
21754+ pax_open_kernel();
21755 *(u32 *)loc = val;
21756+ pax_close_kernel();
21757 if (val != *(u32 *)loc)
21758 goto overflow;
21759 break;
21760 case R_X86_64_32S:
21761+ pax_open_kernel();
21762 *(s32 *)loc = val;
21763+ pax_close_kernel();
21764 if ((s64)val != *(s32 *)loc)
21765 goto overflow;
21766 break;
21767 case R_X86_64_PC32:
21768 val -= (u64)loc;
21769+ pax_open_kernel();
21770 *(u32 *)loc = val;
21771+ pax_close_kernel();
21772+
21773 #if 0
21774 if ((s64)val != *(s32 *)loc)
21775 goto overflow;
21776diff --git a/arch/x86/kernel/msr.c b/arch/x86/kernel/msr.c
21777index ce13049..e2e9c3c 100644
21778--- a/arch/x86/kernel/msr.c
21779+++ b/arch/x86/kernel/msr.c
21780@@ -233,7 +233,7 @@ static int __cpuinit msr_class_cpu_callback(struct notifier_block *nfb,
21781 return notifier_from_errno(err);
21782 }
21783
21784-static struct notifier_block __refdata msr_class_cpu_notifier = {
21785+static struct notifier_block msr_class_cpu_notifier = {
21786 .notifier_call = msr_class_cpu_callback,
21787 };
21788
21789diff --git a/arch/x86/kernel/nmi.c b/arch/x86/kernel/nmi.c
21790index 6030805..2d33f21 100644
21791--- a/arch/x86/kernel/nmi.c
21792+++ b/arch/x86/kernel/nmi.c
21793@@ -105,7 +105,7 @@ static int __kprobes nmi_handle(unsigned int type, struct pt_regs *regs, bool b2
21794 return handled;
21795 }
21796
21797-int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21798+int __register_nmi_handler(unsigned int type, const struct nmiaction *action)
21799 {
21800 struct nmi_desc *desc = nmi_to_desc(type);
21801 unsigned long flags;
21802@@ -129,9 +129,9 @@ int __register_nmi_handler(unsigned int type, struct nmiaction *action)
21803 * event confuses some handlers (kdump uses this flag)
21804 */
21805 if (action->flags & NMI_FLAG_FIRST)
21806- list_add_rcu(&action->list, &desc->head);
21807+ pax_list_add_rcu((struct list_head *)&action->list, &desc->head);
21808 else
21809- list_add_tail_rcu(&action->list, &desc->head);
21810+ pax_list_add_tail_rcu((struct list_head *)&action->list, &desc->head);
21811
21812 spin_unlock_irqrestore(&desc->lock, flags);
21813 return 0;
21814@@ -154,7 +154,7 @@ void unregister_nmi_handler(unsigned int type, const char *name)
21815 if (!strcmp(n->name, name)) {
21816 WARN(in_nmi(),
21817 "Trying to free NMI (%s) from NMI context!\n", n->name);
21818- list_del_rcu(&n->list);
21819+ pax_list_del_rcu((struct list_head *)&n->list);
21820 break;
21821 }
21822 }
21823@@ -479,6 +479,17 @@ static inline void nmi_nesting_postprocess(void)
21824 dotraplinkage notrace __kprobes void
21825 do_nmi(struct pt_regs *regs, long error_code)
21826 {
21827+
21828+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
21829+ if (!user_mode(regs)) {
21830+ unsigned long cs = regs->cs & 0xFFFF;
21831+ unsigned long ip = ktva_ktla(regs->ip);
21832+
21833+ if ((cs == __KERNEL_CS || cs == __KERNEXEC_KERNEL_CS) && ip <= (unsigned long)_etext)
21834+ regs->ip = ip;
21835+ }
21836+#endif
21837+
21838 nmi_nesting_preprocess(regs);
21839
21840 nmi_enter();
21841diff --git a/arch/x86/kernel/nmi_selftest.c b/arch/x86/kernel/nmi_selftest.c
21842index 6d9582e..f746287 100644
21843--- a/arch/x86/kernel/nmi_selftest.c
21844+++ b/arch/x86/kernel/nmi_selftest.c
21845@@ -43,7 +43,7 @@ static void __init init_nmi_testsuite(void)
21846 {
21847 /* trap all the unknown NMIs we may generate */
21848 register_nmi_handler(NMI_UNKNOWN, nmi_unk_cb, 0, "nmi_selftest_unk",
21849- __initdata);
21850+ __initconst);
21851 }
21852
21853 static void __init cleanup_nmi_testsuite(void)
21854@@ -66,7 +66,7 @@ static void __init test_nmi_ipi(struct cpumask *mask)
21855 unsigned long timeout;
21856
21857 if (register_nmi_handler(NMI_LOCAL, test_nmi_ipi_callback,
21858- NMI_FLAG_FIRST, "nmi_selftest", __initdata)) {
21859+ NMI_FLAG_FIRST, "nmi_selftest", __initconst)) {
21860 nmi_fail = FAILURE;
21861 return;
21862 }
21863diff --git a/arch/x86/kernel/paravirt-spinlocks.c b/arch/x86/kernel/paravirt-spinlocks.c
21864index 676b8c7..870ba04 100644
21865--- a/arch/x86/kernel/paravirt-spinlocks.c
21866+++ b/arch/x86/kernel/paravirt-spinlocks.c
21867@@ -13,7 +13,7 @@ default_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
21868 arch_spin_lock(lock);
21869 }
21870
21871-struct pv_lock_ops pv_lock_ops = {
21872+struct pv_lock_ops pv_lock_ops __read_only = {
21873 #ifdef CONFIG_SMP
21874 .spin_is_locked = __ticket_spin_is_locked,
21875 .spin_is_contended = __ticket_spin_is_contended,
21876diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c
21877index cd6de64..27c6af0 100644
21878--- a/arch/x86/kernel/paravirt.c
21879+++ b/arch/x86/kernel/paravirt.c
21880@@ -55,6 +55,9 @@ u64 _paravirt_ident_64(u64 x)
21881 {
21882 return x;
21883 }
21884+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21885+PV_CALLEE_SAVE_REGS_THUNK(_paravirt_ident_64);
21886+#endif
21887
21888 void __init default_banner(void)
21889 {
21890@@ -147,15 +150,19 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
21891 if (opfunc == NULL)
21892 /* If there's no function, patch it with a ud2a (BUG) */
21893 ret = paravirt_patch_insns(insnbuf, len, ud2a, ud2a+sizeof(ud2a));
21894- else if (opfunc == _paravirt_nop)
21895+ else if (opfunc == (void *)_paravirt_nop)
21896 /* If the operation is a nop, then nop the callsite */
21897 ret = paravirt_patch_nop();
21898
21899 /* identity functions just return their single argument */
21900- else if (opfunc == _paravirt_ident_32)
21901+ else if (opfunc == (void *)_paravirt_ident_32)
21902 ret = paravirt_patch_ident_32(insnbuf, len);
21903- else if (opfunc == _paravirt_ident_64)
21904+ else if (opfunc == (void *)_paravirt_ident_64)
21905 ret = paravirt_patch_ident_64(insnbuf, len);
21906+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
21907+ else if (opfunc == (void *)__raw_callee_save__paravirt_ident_64)
21908+ ret = paravirt_patch_ident_64(insnbuf, len);
21909+#endif
21910
21911 else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
21912 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
21913@@ -180,7 +187,7 @@ unsigned paravirt_patch_insns(void *insnbuf, unsigned len,
21914 if (insn_len > len || start == NULL)
21915 insn_len = len;
21916 else
21917- memcpy(insnbuf, start, insn_len);
21918+ memcpy(insnbuf, ktla_ktva(start), insn_len);
21919
21920 return insn_len;
21921 }
21922@@ -304,7 +311,7 @@ enum paravirt_lazy_mode paravirt_get_lazy_mode(void)
21923 return this_cpu_read(paravirt_lazy_mode);
21924 }
21925
21926-struct pv_info pv_info = {
21927+struct pv_info pv_info __read_only = {
21928 .name = "bare hardware",
21929 .paravirt_enabled = 0,
21930 .kernel_rpl = 0,
21931@@ -315,16 +322,16 @@ struct pv_info pv_info = {
21932 #endif
21933 };
21934
21935-struct pv_init_ops pv_init_ops = {
21936+struct pv_init_ops pv_init_ops __read_only = {
21937 .patch = native_patch,
21938 };
21939
21940-struct pv_time_ops pv_time_ops = {
21941+struct pv_time_ops pv_time_ops __read_only = {
21942 .sched_clock = native_sched_clock,
21943 .steal_clock = native_steal_clock,
21944 };
21945
21946-struct pv_irq_ops pv_irq_ops = {
21947+struct pv_irq_ops pv_irq_ops __read_only = {
21948 .save_fl = __PV_IS_CALLEE_SAVE(native_save_fl),
21949 .restore_fl = __PV_IS_CALLEE_SAVE(native_restore_fl),
21950 .irq_disable = __PV_IS_CALLEE_SAVE(native_irq_disable),
21951@@ -336,7 +343,7 @@ struct pv_irq_ops pv_irq_ops = {
21952 #endif
21953 };
21954
21955-struct pv_cpu_ops pv_cpu_ops = {
21956+struct pv_cpu_ops pv_cpu_ops __read_only = {
21957 .cpuid = native_cpuid,
21958 .get_debugreg = native_get_debugreg,
21959 .set_debugreg = native_set_debugreg,
21960@@ -394,21 +401,26 @@ struct pv_cpu_ops pv_cpu_ops = {
21961 .end_context_switch = paravirt_nop,
21962 };
21963
21964-struct pv_apic_ops pv_apic_ops = {
21965+struct pv_apic_ops pv_apic_ops __read_only= {
21966 #ifdef CONFIG_X86_LOCAL_APIC
21967 .startup_ipi_hook = paravirt_nop,
21968 #endif
21969 };
21970
21971-#if defined(CONFIG_X86_32) && !defined(CONFIG_X86_PAE)
21972+#ifdef CONFIG_X86_32
21973+#ifdef CONFIG_X86_PAE
21974+/* 64-bit pagetable entries */
21975+#define PTE_IDENT PV_CALLEE_SAVE(_paravirt_ident_64)
21976+#else
21977 /* 32-bit pagetable entries */
21978 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_32)
21979+#endif
21980 #else
21981 /* 64-bit pagetable entries */
21982 #define PTE_IDENT __PV_IS_CALLEE_SAVE(_paravirt_ident_64)
21983 #endif
21984
21985-struct pv_mmu_ops pv_mmu_ops = {
21986+struct pv_mmu_ops pv_mmu_ops __read_only = {
21987
21988 .read_cr2 = native_read_cr2,
21989 .write_cr2 = native_write_cr2,
21990@@ -458,6 +470,7 @@ struct pv_mmu_ops pv_mmu_ops = {
21991 .make_pud = PTE_IDENT,
21992
21993 .set_pgd = native_set_pgd,
21994+ .set_pgd_batched = native_set_pgd_batched,
21995 #endif
21996 #endif /* PAGETABLE_LEVELS >= 3 */
21997
21998@@ -478,6 +491,12 @@ struct pv_mmu_ops pv_mmu_ops = {
21999 },
22000
22001 .set_fixmap = native_set_fixmap,
22002+
22003+#ifdef CONFIG_PAX_KERNEXEC
22004+ .pax_open_kernel = native_pax_open_kernel,
22005+ .pax_close_kernel = native_pax_close_kernel,
22006+#endif
22007+
22008 };
22009
22010 EXPORT_SYMBOL_GPL(pv_time_ops);
22011diff --git a/arch/x86/kernel/pci-calgary_64.c b/arch/x86/kernel/pci-calgary_64.c
22012index 299d493..2ccb0ee 100644
22013--- a/arch/x86/kernel/pci-calgary_64.c
22014+++ b/arch/x86/kernel/pci-calgary_64.c
22015@@ -1339,7 +1339,7 @@ static void __init get_tce_space_from_tar(void)
22016 tce_space = be64_to_cpu(readq(target));
22017 tce_space = tce_space & TAR_SW_BITS;
22018
22019- tce_space = tce_space & (~specified_table_size);
22020+ tce_space = tce_space & (~(unsigned long)specified_table_size);
22021 info->tce_space = (u64 *)__va(tce_space);
22022 }
22023 }
22024diff --git a/arch/x86/kernel/pci-iommu_table.c b/arch/x86/kernel/pci-iommu_table.c
22025index 35ccf75..7a15747 100644
22026--- a/arch/x86/kernel/pci-iommu_table.c
22027+++ b/arch/x86/kernel/pci-iommu_table.c
22028@@ -2,7 +2,7 @@
22029 #include <asm/iommu_table.h>
22030 #include <linux/string.h>
22031 #include <linux/kallsyms.h>
22032-
22033+#include <linux/sched.h>
22034
22035 #define DEBUG 1
22036
22037diff --git a/arch/x86/kernel/pci-swiotlb.c b/arch/x86/kernel/pci-swiotlb.c
22038index 6c483ba..d10ce2f 100644
22039--- a/arch/x86/kernel/pci-swiotlb.c
22040+++ b/arch/x86/kernel/pci-swiotlb.c
22041@@ -32,7 +32,7 @@ static void x86_swiotlb_free_coherent(struct device *dev, size_t size,
22042 void *vaddr, dma_addr_t dma_addr,
22043 struct dma_attrs *attrs)
22044 {
22045- swiotlb_free_coherent(dev, size, vaddr, dma_addr);
22046+ swiotlb_free_coherent(dev, size, vaddr, dma_addr, attrs);
22047 }
22048
22049 static struct dma_map_ops swiotlb_dma_ops = {
22050diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c
22051index 81a5f5e..20f8b58 100644
22052--- a/arch/x86/kernel/process.c
22053+++ b/arch/x86/kernel/process.c
22054@@ -36,7 +36,8 @@
22055 * section. Since TSS's are completely CPU-local, we want them
22056 * on exact cacheline boundaries, to eliminate cacheline ping-pong.
22057 */
22058-DEFINE_PER_CPU_SHARED_ALIGNED(struct tss_struct, init_tss) = INIT_TSS;
22059+struct tss_struct init_tss[NR_CPUS] ____cacheline_internodealigned_in_smp = { [0 ... NR_CPUS-1] = INIT_TSS };
22060+EXPORT_SYMBOL(init_tss);
22061
22062 #ifdef CONFIG_X86_64
22063 static DEFINE_PER_CPU(unsigned char, is_idle);
22064@@ -92,7 +93,7 @@ void arch_task_cache_init(void)
22065 task_xstate_cachep =
22066 kmem_cache_create("task_xstate", xstate_size,
22067 __alignof__(union thread_xstate),
22068- SLAB_PANIC | SLAB_NOTRACK, NULL);
22069+ SLAB_PANIC | SLAB_NOTRACK | SLAB_USERCOPY, NULL);
22070 }
22071
22072 /*
22073@@ -105,7 +106,7 @@ void exit_thread(void)
22074 unsigned long *bp = t->io_bitmap_ptr;
22075
22076 if (bp) {
22077- struct tss_struct *tss = &per_cpu(init_tss, get_cpu());
22078+ struct tss_struct *tss = init_tss + get_cpu();
22079
22080 t->io_bitmap_ptr = NULL;
22081 clear_thread_flag(TIF_IO_BITMAP);
22082@@ -125,6 +126,9 @@ void flush_thread(void)
22083 {
22084 struct task_struct *tsk = current;
22085
22086+#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
22087+ loadsegment(gs, 0);
22088+#endif
22089 flush_ptrace_hw_breakpoint(tsk);
22090 memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
22091 drop_init_fpu(tsk);
22092@@ -271,7 +275,7 @@ static void __exit_idle(void)
22093 void exit_idle(void)
22094 {
22095 /* idle loop has pid 0 */
22096- if (current->pid)
22097+ if (task_pid_nr(current))
22098 return;
22099 __exit_idle();
22100 }
22101@@ -327,7 +331,7 @@ bool xen_set_default_idle(void)
22102 return ret;
22103 }
22104 #endif
22105-void stop_this_cpu(void *dummy)
22106+__noreturn void stop_this_cpu(void *dummy)
22107 {
22108 local_irq_disable();
22109 /*
22110@@ -456,16 +460,37 @@ static int __init idle_setup(char *str)
22111 }
22112 early_param("idle", idle_setup);
22113
22114-unsigned long arch_align_stack(unsigned long sp)
22115+#ifdef CONFIG_PAX_RANDKSTACK
22116+void pax_randomize_kstack(struct pt_regs *regs)
22117 {
22118- if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
22119- sp -= get_random_int() % 8192;
22120- return sp & ~0xf;
22121-}
22122+ struct thread_struct *thread = &current->thread;
22123+ unsigned long time;
22124
22125-unsigned long arch_randomize_brk(struct mm_struct *mm)
22126-{
22127- unsigned long range_end = mm->brk + 0x02000000;
22128- return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
22129-}
22130+ if (!randomize_va_space)
22131+ return;
22132+
22133+ if (v8086_mode(regs))
22134+ return;
22135
22136+ rdtscl(time);
22137+
22138+ /* P4 seems to return a 0 LSB, ignore it */
22139+#ifdef CONFIG_MPENTIUM4
22140+ time &= 0x3EUL;
22141+ time <<= 2;
22142+#elif defined(CONFIG_X86_64)
22143+ time &= 0xFUL;
22144+ time <<= 4;
22145+#else
22146+ time &= 0x1FUL;
22147+ time <<= 3;
22148+#endif
22149+
22150+ thread->sp0 ^= time;
22151+ load_sp0(init_tss + smp_processor_id(), thread);
22152+
22153+#ifdef CONFIG_X86_64
22154+ this_cpu_write(kernel_stack, thread->sp0);
22155+#endif
22156+}
22157+#endif
22158diff --git a/arch/x86/kernel/process_32.c b/arch/x86/kernel/process_32.c
22159index 7305f7d..22f73d6 100644
22160--- a/arch/x86/kernel/process_32.c
22161+++ b/arch/x86/kernel/process_32.c
22162@@ -65,6 +65,7 @@ asmlinkage void ret_from_kernel_thread(void) __asm__("ret_from_kernel_thread");
22163 unsigned long thread_saved_pc(struct task_struct *tsk)
22164 {
22165 return ((unsigned long *)tsk->thread.sp)[3];
22166+//XXX return tsk->thread.eip;
22167 }
22168
22169 void __show_regs(struct pt_regs *regs, int all)
22170@@ -74,19 +75,18 @@ void __show_regs(struct pt_regs *regs, int all)
22171 unsigned long sp;
22172 unsigned short ss, gs;
22173
22174- if (user_mode_vm(regs)) {
22175+ if (user_mode(regs)) {
22176 sp = regs->sp;
22177 ss = regs->ss & 0xffff;
22178- gs = get_user_gs(regs);
22179 } else {
22180 sp = kernel_stack_pointer(regs);
22181 savesegment(ss, ss);
22182- savesegment(gs, gs);
22183 }
22184+ gs = get_user_gs(regs);
22185
22186 printk(KERN_DEFAULT "EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
22187 (u16)regs->cs, regs->ip, regs->flags,
22188- smp_processor_id());
22189+ raw_smp_processor_id());
22190 print_symbol("EIP is at %s\n", regs->ip);
22191
22192 printk(KERN_DEFAULT "EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
22193@@ -128,20 +128,21 @@ void release_thread(struct task_struct *dead_task)
22194 int copy_thread(unsigned long clone_flags, unsigned long sp,
22195 unsigned long arg, struct task_struct *p)
22196 {
22197- struct pt_regs *childregs = task_pt_regs(p);
22198+ struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
22199 struct task_struct *tsk;
22200 int err;
22201
22202 p->thread.sp = (unsigned long) childregs;
22203 p->thread.sp0 = (unsigned long) (childregs+1);
22204+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22205
22206 if (unlikely(p->flags & PF_KTHREAD)) {
22207 /* kernel thread */
22208 memset(childregs, 0, sizeof(struct pt_regs));
22209 p->thread.ip = (unsigned long) ret_from_kernel_thread;
22210- task_user_gs(p) = __KERNEL_STACK_CANARY;
22211- childregs->ds = __USER_DS;
22212- childregs->es = __USER_DS;
22213+ savesegment(gs, childregs->gs);
22214+ childregs->ds = __KERNEL_DS;
22215+ childregs->es = __KERNEL_DS;
22216 childregs->fs = __KERNEL_PERCPU;
22217 childregs->bx = sp; /* function */
22218 childregs->bp = arg;
22219@@ -248,7 +249,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22220 struct thread_struct *prev = &prev_p->thread,
22221 *next = &next_p->thread;
22222 int cpu = smp_processor_id();
22223- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22224+ struct tss_struct *tss = init_tss + cpu;
22225 fpu_switch_t fpu;
22226
22227 /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
22228@@ -272,6 +273,10 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22229 */
22230 lazy_save_gs(prev->gs);
22231
22232+#ifdef CONFIG_PAX_MEMORY_UDEREF
22233+ __set_fs(task_thread_info(next_p)->addr_limit);
22234+#endif
22235+
22236 /*
22237 * Load the per-thread Thread-Local Storage descriptor.
22238 */
22239@@ -302,6 +307,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22240 */
22241 arch_end_context_switch(next_p);
22242
22243+ this_cpu_write(current_task, next_p);
22244+ this_cpu_write(current_tinfo, &next_p->tinfo);
22245+
22246 /*
22247 * Restore %gs if needed (which is common)
22248 */
22249@@ -310,8 +318,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22250
22251 switch_fpu_finish(next_p, fpu);
22252
22253- this_cpu_write(current_task, next_p);
22254-
22255 return prev_p;
22256 }
22257
22258@@ -341,4 +347,3 @@ unsigned long get_wchan(struct task_struct *p)
22259 } while (count++ < 16);
22260 return 0;
22261 }
22262-
22263diff --git a/arch/x86/kernel/process_64.c b/arch/x86/kernel/process_64.c
22264index 355ae06..4530766 100644
22265--- a/arch/x86/kernel/process_64.c
22266+++ b/arch/x86/kernel/process_64.c
22267@@ -151,10 +151,11 @@ int copy_thread(unsigned long clone_flags, unsigned long sp,
22268 struct pt_regs *childregs;
22269 struct task_struct *me = current;
22270
22271- p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE;
22272+ p->thread.sp0 = (unsigned long)task_stack_page(p) + THREAD_SIZE - 16;
22273 childregs = task_pt_regs(p);
22274 p->thread.sp = (unsigned long) childregs;
22275 p->thread.usersp = me->thread.usersp;
22276+ p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);
22277 set_tsk_thread_flag(p, TIF_FORK);
22278 p->fpu_counter = 0;
22279 p->thread.io_bitmap_ptr = NULL;
22280@@ -273,7 +274,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22281 struct thread_struct *prev = &prev_p->thread;
22282 struct thread_struct *next = &next_p->thread;
22283 int cpu = smp_processor_id();
22284- struct tss_struct *tss = &per_cpu(init_tss, cpu);
22285+ struct tss_struct *tss = init_tss + cpu;
22286 unsigned fsindex, gsindex;
22287 fpu_switch_t fpu;
22288
22289@@ -355,10 +356,9 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
22290 prev->usersp = this_cpu_read(old_rsp);
22291 this_cpu_write(old_rsp, next->usersp);
22292 this_cpu_write(current_task, next_p);
22293+ this_cpu_write(current_tinfo, &next_p->tinfo);
22294
22295- this_cpu_write(kernel_stack,
22296- (unsigned long)task_stack_page(next_p) +
22297- THREAD_SIZE - KERNEL_STACK_OFFSET);
22298+ this_cpu_write(kernel_stack, next->sp0);
22299
22300 /*
22301 * Now maybe reload the debug registers and handle I/O bitmaps
22302@@ -427,12 +427,11 @@ unsigned long get_wchan(struct task_struct *p)
22303 if (!p || p == current || p->state == TASK_RUNNING)
22304 return 0;
22305 stack = (unsigned long)task_stack_page(p);
22306- if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
22307+ if (p->thread.sp < stack || p->thread.sp > stack+THREAD_SIZE-16-sizeof(u64))
22308 return 0;
22309 fp = *(u64 *)(p->thread.sp);
22310 do {
22311- if (fp < (unsigned long)stack ||
22312- fp >= (unsigned long)stack+THREAD_SIZE)
22313+ if (fp < stack || fp > stack+THREAD_SIZE-16-sizeof(u64))
22314 return 0;
22315 ip = *(u64 *)(fp+8);
22316 if (!in_sched_functions(ip))
22317diff --git a/arch/x86/kernel/ptrace.c b/arch/x86/kernel/ptrace.c
22318index 29a8120..a50b5ee 100644
22319--- a/arch/x86/kernel/ptrace.c
22320+++ b/arch/x86/kernel/ptrace.c
22321@@ -184,14 +184,13 @@ unsigned long kernel_stack_pointer(struct pt_regs *regs)
22322 {
22323 unsigned long context = (unsigned long)regs & ~(THREAD_SIZE - 1);
22324 unsigned long sp = (unsigned long)&regs->sp;
22325- struct thread_info *tinfo;
22326
22327- if (context == (sp & ~(THREAD_SIZE - 1)))
22328+ if (context == ((sp + 8) & ~(THREAD_SIZE - 1)))
22329 return sp;
22330
22331- tinfo = (struct thread_info *)context;
22332- if (tinfo->previous_esp)
22333- return tinfo->previous_esp;
22334+ sp = *(unsigned long *)context;
22335+ if (sp)
22336+ return sp;
22337
22338 return (unsigned long)regs;
22339 }
22340@@ -588,7 +587,7 @@ static void ptrace_triggered(struct perf_event *bp,
22341 static unsigned long ptrace_get_dr7(struct perf_event *bp[])
22342 {
22343 int i;
22344- int dr7 = 0;
22345+ unsigned long dr7 = 0;
22346 struct arch_hw_breakpoint *info;
22347
22348 for (i = 0; i < HBP_NUM; i++) {
22349@@ -856,7 +855,7 @@ long arch_ptrace(struct task_struct *child, long request,
22350 unsigned long addr, unsigned long data)
22351 {
22352 int ret;
22353- unsigned long __user *datap = (unsigned long __user *)data;
22354+ unsigned long __user *datap = (__force unsigned long __user *)data;
22355
22356 switch (request) {
22357 /* read the word at location addr in the USER area. */
22358@@ -941,14 +940,14 @@ long arch_ptrace(struct task_struct *child, long request,
22359 if ((int) addr < 0)
22360 return -EIO;
22361 ret = do_get_thread_area(child, addr,
22362- (struct user_desc __user *)data);
22363+ (__force struct user_desc __user *) data);
22364 break;
22365
22366 case PTRACE_SET_THREAD_AREA:
22367 if ((int) addr < 0)
22368 return -EIO;
22369 ret = do_set_thread_area(child, addr,
22370- (struct user_desc __user *)data, 0);
22371+ (__force struct user_desc __user *) data, 0);
22372 break;
22373 #endif
22374
22375@@ -1326,7 +1325,7 @@ long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
22376
22377 #ifdef CONFIG_X86_64
22378
22379-static struct user_regset x86_64_regsets[] __read_mostly = {
22380+static user_regset_no_const x86_64_regsets[] __read_only = {
22381 [REGSET_GENERAL] = {
22382 .core_note_type = NT_PRSTATUS,
22383 .n = sizeof(struct user_regs_struct) / sizeof(long),
22384@@ -1367,7 +1366,7 @@ static const struct user_regset_view user_x86_64_view = {
22385 #endif /* CONFIG_X86_64 */
22386
22387 #if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
22388-static struct user_regset x86_32_regsets[] __read_mostly = {
22389+static user_regset_no_const x86_32_regsets[] __read_only = {
22390 [REGSET_GENERAL] = {
22391 .core_note_type = NT_PRSTATUS,
22392 .n = sizeof(struct user_regs_struct32) / sizeof(u32),
22393@@ -1420,7 +1419,7 @@ static const struct user_regset_view user_x86_32_view = {
22394 */
22395 u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
22396
22397-void update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22398+void __init update_regset_xstate_info(unsigned int size, u64 xstate_mask)
22399 {
22400 #ifdef CONFIG_X86_64
22401 x86_64_regsets[REGSET_XSTATE].n = size / sizeof(u64);
22402@@ -1455,7 +1454,7 @@ static void fill_sigtrap_info(struct task_struct *tsk,
22403 memset(info, 0, sizeof(*info));
22404 info->si_signo = SIGTRAP;
22405 info->si_code = si_code;
22406- info->si_addr = user_mode_vm(regs) ? (void __user *)regs->ip : NULL;
22407+ info->si_addr = user_mode(regs) ? (__force void __user *)regs->ip : NULL;
22408 }
22409
22410 void user_single_step_siginfo(struct task_struct *tsk,
22411@@ -1484,6 +1483,10 @@ void send_sigtrap(struct task_struct *tsk, struct pt_regs *regs,
22412 # define IS_IA32 0
22413 #endif
22414
22415+#ifdef CONFIG_GRKERNSEC_SETXID
22416+extern void gr_delayed_cred_worker(void);
22417+#endif
22418+
22419 /*
22420 * We must return the syscall number to actually look up in the table.
22421 * This can be -1L to skip running any syscall at all.
22422@@ -1494,6 +1497,11 @@ long syscall_trace_enter(struct pt_regs *regs)
22423
22424 user_exit();
22425
22426+#ifdef CONFIG_GRKERNSEC_SETXID
22427+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22428+ gr_delayed_cred_worker();
22429+#endif
22430+
22431 /*
22432 * If we stepped into a sysenter/syscall insn, it trapped in
22433 * kernel mode; do_debug() cleared TF and set TIF_SINGLESTEP.
22434@@ -1549,6 +1557,11 @@ void syscall_trace_leave(struct pt_regs *regs)
22435 */
22436 user_exit();
22437
22438+#ifdef CONFIG_GRKERNSEC_SETXID
22439+ if (unlikely(test_and_clear_thread_flag(TIF_GRSEC_SETXID)))
22440+ gr_delayed_cred_worker();
22441+#endif
22442+
22443 audit_syscall_exit(regs);
22444
22445 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
22446diff --git a/arch/x86/kernel/pvclock.c b/arch/x86/kernel/pvclock.c
22447index 2cb9470..ff1fd80 100644
22448--- a/arch/x86/kernel/pvclock.c
22449+++ b/arch/x86/kernel/pvclock.c
22450@@ -43,11 +43,11 @@ unsigned long pvclock_tsc_khz(struct pvclock_vcpu_time_info *src)
22451 return pv_tsc_khz;
22452 }
22453
22454-static atomic64_t last_value = ATOMIC64_INIT(0);
22455+static atomic64_unchecked_t last_value = ATOMIC64_INIT(0);
22456
22457 void pvclock_resume(void)
22458 {
22459- atomic64_set(&last_value, 0);
22460+ atomic64_set_unchecked(&last_value, 0);
22461 }
22462
22463 u8 pvclock_read_flags(struct pvclock_vcpu_time_info *src)
22464@@ -92,11 +92,11 @@ cycle_t pvclock_clocksource_read(struct pvclock_vcpu_time_info *src)
22465 * updating at the same time, and one of them could be slightly behind,
22466 * making the assumption that last_value always go forward fail to hold.
22467 */
22468- last = atomic64_read(&last_value);
22469+ last = atomic64_read_unchecked(&last_value);
22470 do {
22471 if (ret < last)
22472 return last;
22473- last = atomic64_cmpxchg(&last_value, last, ret);
22474+ last = atomic64_cmpxchg_unchecked(&last_value, last, ret);
22475 } while (unlikely(last != ret));
22476
22477 return ret;
22478diff --git a/arch/x86/kernel/reboot.c b/arch/x86/kernel/reboot.c
22479index 76fa1e9..abf09ea 100644
22480--- a/arch/x86/kernel/reboot.c
22481+++ b/arch/x86/kernel/reboot.c
22482@@ -36,7 +36,7 @@ void (*pm_power_off)(void);
22483 EXPORT_SYMBOL(pm_power_off);
22484
22485 static const struct desc_ptr no_idt = {};
22486-static int reboot_mode;
22487+static unsigned short reboot_mode;
22488 enum reboot_type reboot_type = BOOT_ACPI;
22489 int reboot_force;
22490
22491@@ -157,6 +157,11 @@ static int __init set_bios_reboot(const struct dmi_system_id *d)
22492
22493 void __noreturn machine_real_restart(unsigned int type)
22494 {
22495+
22496+#if defined(CONFIG_X86_32) && (defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF))
22497+ struct desc_struct *gdt;
22498+#endif
22499+
22500 local_irq_disable();
22501
22502 /*
22503@@ -184,7 +189,29 @@ void __noreturn machine_real_restart(unsigned int type)
22504
22505 /* Jump to the identity-mapped low memory code */
22506 #ifdef CONFIG_X86_32
22507- asm volatile("jmpl *%0" : :
22508+
22509+#if defined(CONFIG_PAX_KERNEXEC) || defined(CONFIG_PAX_MEMORY_UDEREF)
22510+ gdt = get_cpu_gdt_table(smp_processor_id());
22511+ pax_open_kernel();
22512+#ifdef CONFIG_PAX_MEMORY_UDEREF
22513+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
22514+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
22515+ loadsegment(ds, __KERNEL_DS);
22516+ loadsegment(es, __KERNEL_DS);
22517+ loadsegment(ss, __KERNEL_DS);
22518+#endif
22519+#ifdef CONFIG_PAX_KERNEXEC
22520+ gdt[GDT_ENTRY_KERNEL_CS].base0 = 0;
22521+ gdt[GDT_ENTRY_KERNEL_CS].base1 = 0;
22522+ gdt[GDT_ENTRY_KERNEL_CS].base2 = 0;
22523+ gdt[GDT_ENTRY_KERNEL_CS].limit0 = 0xffff;
22524+ gdt[GDT_ENTRY_KERNEL_CS].limit = 0xf;
22525+ gdt[GDT_ENTRY_KERNEL_CS].g = 1;
22526+#endif
22527+ pax_close_kernel();
22528+#endif
22529+
22530+ asm volatile("ljmpl *%0" : :
22531 "rm" (real_mode_header->machine_real_restart_asm),
22532 "a" (type));
22533 #else
22534@@ -531,7 +558,7 @@ void __attribute__((weak)) mach_reboot_fixups(void)
22535 * try to force a triple fault and then cycle between hitting the keyboard
22536 * controller and doing that
22537 */
22538-static void native_machine_emergency_restart(void)
22539+static void __noreturn native_machine_emergency_restart(void)
22540 {
22541 int i;
22542 int attempt = 0;
22543@@ -654,13 +681,13 @@ void native_machine_shutdown(void)
22544 #endif
22545 }
22546
22547-static void __machine_emergency_restart(int emergency)
22548+static void __noreturn __machine_emergency_restart(int emergency)
22549 {
22550 reboot_emergency = emergency;
22551 machine_ops.emergency_restart();
22552 }
22553
22554-static void native_machine_restart(char *__unused)
22555+static void __noreturn native_machine_restart(char *__unused)
22556 {
22557 pr_notice("machine restart\n");
22558
22559@@ -669,7 +696,7 @@ static void native_machine_restart(char *__unused)
22560 __machine_emergency_restart(0);
22561 }
22562
22563-static void native_machine_halt(void)
22564+static void __noreturn native_machine_halt(void)
22565 {
22566 /* Stop other cpus and apics */
22567 machine_shutdown();
22568@@ -679,7 +706,7 @@ static void native_machine_halt(void)
22569 stop_this_cpu(NULL);
22570 }
22571
22572-static void native_machine_power_off(void)
22573+static void __noreturn native_machine_power_off(void)
22574 {
22575 if (pm_power_off) {
22576 if (!reboot_force)
22577@@ -688,9 +715,10 @@ static void native_machine_power_off(void)
22578 }
22579 /* A fallback in case there is no PM info available */
22580 tboot_shutdown(TB_SHUTDOWN_HALT);
22581+ unreachable();
22582 }
22583
22584-struct machine_ops machine_ops = {
22585+struct machine_ops machine_ops __read_only = {
22586 .power_off = native_machine_power_off,
22587 .shutdown = native_machine_shutdown,
22588 .emergency_restart = native_machine_emergency_restart,
22589diff --git a/arch/x86/kernel/relocate_kernel_64.S b/arch/x86/kernel/relocate_kernel_64.S
22590index f2bb9c9..bed145d7 100644
22591--- a/arch/x86/kernel/relocate_kernel_64.S
22592+++ b/arch/x86/kernel/relocate_kernel_64.S
22593@@ -11,6 +11,7 @@
22594 #include <asm/kexec.h>
22595 #include <asm/processor-flags.h>
22596 #include <asm/pgtable_types.h>
22597+#include <asm/alternative-asm.h>
22598
22599 /*
22600 * Must be relocatable PIC code callable as a C function
22601@@ -167,6 +168,7 @@ identity_mapped:
22602 xorq %r14, %r14
22603 xorq %r15, %r15
22604
22605+ pax_force_retaddr 0, 1
22606 ret
22607
22608 1:
22609diff --git a/arch/x86/kernel/setup.c b/arch/x86/kernel/setup.c
22610index 56f7fcf..fa229f4 100644
22611--- a/arch/x86/kernel/setup.c
22612+++ b/arch/x86/kernel/setup.c
22613@@ -110,6 +110,7 @@
22614 #include <asm/mce.h>
22615 #include <asm/alternative.h>
22616 #include <asm/prom.h>
22617+#include <asm/boot.h>
22618
22619 /*
22620 * max_low_pfn_mapped: highest direct mapped pfn under 4GB
22621@@ -444,7 +445,7 @@ static void __init parse_setup_data(void)
22622
22623 switch (data->type) {
22624 case SETUP_E820_EXT:
22625- parse_e820_ext(data);
22626+ parse_e820_ext((struct setup_data __force_kernel *)data);
22627 break;
22628 case SETUP_DTB:
22629 add_dtb(pa_data);
22630@@ -771,7 +772,7 @@ static void __init trim_bios_range(void)
22631 * area (640->1Mb) as ram even though it is not.
22632 * take them out.
22633 */
22634- e820_remove_range(BIOS_BEGIN, BIOS_END - BIOS_BEGIN, E820_RAM, 1);
22635+ e820_remove_range(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_RAM, 1);
22636
22637 sanitize_e820_map(e820.map, ARRAY_SIZE(e820.map), &e820.nr_map);
22638 }
22639@@ -779,7 +780,7 @@ static void __init trim_bios_range(void)
22640 /* called before trim_bios_range() to spare extra sanitize */
22641 static void __init e820_add_kernel_range(void)
22642 {
22643- u64 start = __pa_symbol(_text);
22644+ u64 start = __pa_symbol(ktla_ktva(_text));
22645 u64 size = __pa_symbol(_end) - start;
22646
22647 /*
22648@@ -841,8 +842,12 @@ static void __init trim_low_memory_range(void)
22649
22650 void __init setup_arch(char **cmdline_p)
22651 {
22652+#ifdef CONFIG_X86_32
22653+ memblock_reserve(LOAD_PHYSICAL_ADDR, __pa_symbol(__bss_stop) - LOAD_PHYSICAL_ADDR);
22654+#else
22655 memblock_reserve(__pa_symbol(_text),
22656 (unsigned long)__bss_stop - (unsigned long)_text);
22657+#endif
22658
22659 early_reserve_initrd();
22660
22661@@ -934,14 +939,14 @@ void __init setup_arch(char **cmdline_p)
22662
22663 if (!boot_params.hdr.root_flags)
22664 root_mountflags &= ~MS_RDONLY;
22665- init_mm.start_code = (unsigned long) _text;
22666- init_mm.end_code = (unsigned long) _etext;
22667+ init_mm.start_code = ktla_ktva((unsigned long) _text);
22668+ init_mm.end_code = ktla_ktva((unsigned long) _etext);
22669 init_mm.end_data = (unsigned long) _edata;
22670 init_mm.brk = _brk_end;
22671
22672- code_resource.start = __pa_symbol(_text);
22673- code_resource.end = __pa_symbol(_etext)-1;
22674- data_resource.start = __pa_symbol(_etext);
22675+ code_resource.start = __pa_symbol(ktla_ktva(_text));
22676+ code_resource.end = __pa_symbol(ktla_ktva(_etext))-1;
22677+ data_resource.start = __pa_symbol(_sdata);
22678 data_resource.end = __pa_symbol(_edata)-1;
22679 bss_resource.start = __pa_symbol(__bss_start);
22680 bss_resource.end = __pa_symbol(__bss_stop)-1;
22681diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
22682index 5cdff03..80fa283 100644
22683--- a/arch/x86/kernel/setup_percpu.c
22684+++ b/arch/x86/kernel/setup_percpu.c
22685@@ -21,19 +21,17 @@
22686 #include <asm/cpu.h>
22687 #include <asm/stackprotector.h>
22688
22689-DEFINE_PER_CPU_READ_MOSTLY(int, cpu_number);
22690+#ifdef CONFIG_SMP
22691+DEFINE_PER_CPU_READ_MOSTLY(unsigned int, cpu_number);
22692 EXPORT_PER_CPU_SYMBOL(cpu_number);
22693+#endif
22694
22695-#ifdef CONFIG_X86_64
22696 #define BOOT_PERCPU_OFFSET ((unsigned long)__per_cpu_load)
22697-#else
22698-#define BOOT_PERCPU_OFFSET 0
22699-#endif
22700
22701 DEFINE_PER_CPU(unsigned long, this_cpu_off) = BOOT_PERCPU_OFFSET;
22702 EXPORT_PER_CPU_SYMBOL(this_cpu_off);
22703
22704-unsigned long __per_cpu_offset[NR_CPUS] __read_mostly = {
22705+unsigned long __per_cpu_offset[NR_CPUS] __read_only = {
22706 [0 ... NR_CPUS-1] = BOOT_PERCPU_OFFSET,
22707 };
22708 EXPORT_SYMBOL(__per_cpu_offset);
22709@@ -66,7 +64,7 @@ static bool __init pcpu_need_numa(void)
22710 {
22711 #ifdef CONFIG_NEED_MULTIPLE_NODES
22712 pg_data_t *last = NULL;
22713- unsigned int cpu;
22714+ int cpu;
22715
22716 for_each_possible_cpu(cpu) {
22717 int node = early_cpu_to_node(cpu);
22718@@ -155,10 +153,10 @@ static inline void setup_percpu_segment(int cpu)
22719 {
22720 #ifdef CONFIG_X86_32
22721 struct desc_struct gdt;
22722+ unsigned long base = per_cpu_offset(cpu);
22723
22724- pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
22725- 0x2 | DESCTYPE_S, 0x8);
22726- gdt.s = 1;
22727+ pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
22728+ 0x83 | DESCTYPE_S, 0xC);
22729 write_gdt_entry(get_cpu_gdt_table(cpu),
22730 GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
22731 #endif
22732@@ -219,6 +217,11 @@ void __init setup_per_cpu_areas(void)
22733 /* alrighty, percpu areas up and running */
22734 delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
22735 for_each_possible_cpu(cpu) {
22736+#ifdef CONFIG_CC_STACKPROTECTOR
22737+#ifdef CONFIG_X86_32
22738+ unsigned long canary = per_cpu(stack_canary.canary, cpu);
22739+#endif
22740+#endif
22741 per_cpu_offset(cpu) = delta + pcpu_unit_offsets[cpu];
22742 per_cpu(this_cpu_off, cpu) = per_cpu_offset(cpu);
22743 per_cpu(cpu_number, cpu) = cpu;
22744@@ -259,6 +262,12 @@ void __init setup_per_cpu_areas(void)
22745 */
22746 set_cpu_numa_node(cpu, early_cpu_to_node(cpu));
22747 #endif
22748+#ifdef CONFIG_CC_STACKPROTECTOR
22749+#ifdef CONFIG_X86_32
22750+ if (!cpu)
22751+ per_cpu(stack_canary.canary, cpu) = canary;
22752+#endif
22753+#endif
22754 /*
22755 * Up to this point, the boot CPU has been using .init.data
22756 * area. Reload any changed state for the boot CPU.
22757diff --git a/arch/x86/kernel/signal.c b/arch/x86/kernel/signal.c
22758index 6956299..f20beae 100644
22759--- a/arch/x86/kernel/signal.c
22760+++ b/arch/x86/kernel/signal.c
22761@@ -196,7 +196,7 @@ static unsigned long align_sigframe(unsigned long sp)
22762 * Align the stack pointer according to the i386 ABI,
22763 * i.e. so that on function entry ((sp + 4) & 15) == 0.
22764 */
22765- sp = ((sp + 4) & -16ul) - 4;
22766+ sp = ((sp - 12) & -16ul) - 4;
22767 #else /* !CONFIG_X86_32 */
22768 sp = round_down(sp, 16) - 8;
22769 #endif
22770@@ -304,9 +304,9 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22771 }
22772
22773 if (current->mm->context.vdso)
22774- restorer = VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22775+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, sigreturn);
22776 else
22777- restorer = &frame->retcode;
22778+ restorer = (void __user *)&frame->retcode;
22779 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22780 restorer = ksig->ka.sa.sa_restorer;
22781
22782@@ -320,7 +320,7 @@ __setup_frame(int sig, struct ksignal *ksig, sigset_t *set,
22783 * reasons and because gdb uses it as a signature to notice
22784 * signal handler stack frames.
22785 */
22786- err |= __put_user(*((u64 *)&retcode), (u64 *)frame->retcode);
22787+ err |= __put_user(*((u64 *)&retcode), (u64 __user *)frame->retcode);
22788
22789 if (err)
22790 return -EFAULT;
22791@@ -367,7 +367,10 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22792 err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
22793
22794 /* Set up to return from userspace. */
22795- restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22796+ if (current->mm->context.vdso)
22797+ restorer = (__force void __user *)VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
22798+ else
22799+ restorer = (void __user *)&frame->retcode;
22800 if (ksig->ka.sa.sa_flags & SA_RESTORER)
22801 restorer = ksig->ka.sa.sa_restorer;
22802 put_user_ex(restorer, &frame->pretcode);
22803@@ -379,7 +382,7 @@ static int __setup_rt_frame(int sig, struct ksignal *ksig,
22804 * reasons and because gdb uses it as a signature to notice
22805 * signal handler stack frames.
22806 */
22807- put_user_ex(*((u64 *)&rt_retcode), (u64 *)frame->retcode);
22808+ put_user_ex(*((u64 *)&rt_retcode), (u64 __user *)frame->retcode);
22809 } put_user_catch(err);
22810
22811 err |= copy_siginfo_to_user(&frame->info, &ksig->info);
22812@@ -615,7 +618,12 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22813 {
22814 int usig = signr_convert(ksig->sig);
22815 sigset_t *set = sigmask_to_save();
22816- compat_sigset_t *cset = (compat_sigset_t *) set;
22817+ sigset_t sigcopy;
22818+ compat_sigset_t *cset;
22819+
22820+ sigcopy = *set;
22821+
22822+ cset = (compat_sigset_t *) &sigcopy;
22823
22824 /* Set up the stack frame */
22825 if (is_ia32_frame()) {
22826@@ -626,7 +634,7 @@ setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
22827 } else if (is_x32_frame()) {
22828 return x32_setup_rt_frame(ksig, cset, regs);
22829 } else {
22830- return __setup_rt_frame(ksig->sig, ksig, set, regs);
22831+ return __setup_rt_frame(ksig->sig, ksig, &sigcopy, regs);
22832 }
22833 }
22834
22835diff --git a/arch/x86/kernel/smp.c b/arch/x86/kernel/smp.c
22836index 48d2b7d..90d328a 100644
22837--- a/arch/x86/kernel/smp.c
22838+++ b/arch/x86/kernel/smp.c
22839@@ -285,7 +285,7 @@ static int __init nonmi_ipi_setup(char *str)
22840
22841 __setup("nonmi_ipi", nonmi_ipi_setup);
22842
22843-struct smp_ops smp_ops = {
22844+struct smp_ops smp_ops __read_only = {
22845 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
22846 .smp_prepare_cpus = native_smp_prepare_cpus,
22847 .smp_cpus_done = native_smp_cpus_done,
22848diff --git a/arch/x86/kernel/smpboot.c b/arch/x86/kernel/smpboot.c
22849index bfd348e..4816ad8 100644
22850--- a/arch/x86/kernel/smpboot.c
22851+++ b/arch/x86/kernel/smpboot.c
22852@@ -748,6 +748,7 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22853 idle->thread.sp = (unsigned long) (((struct pt_regs *)
22854 (THREAD_SIZE + task_stack_page(idle))) - 1);
22855 per_cpu(current_task, cpu) = idle;
22856+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
22857
22858 #ifdef CONFIG_X86_32
22859 /* Stack for startup_32 can be just as for start_secondary onwards */
22860@@ -755,11 +756,13 @@ static int __cpuinit do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
22861 #else
22862 clear_tsk_thread_flag(idle, TIF_FORK);
22863 initial_gs = per_cpu_offset(cpu);
22864- per_cpu(kernel_stack, cpu) =
22865- (unsigned long)task_stack_page(idle) -
22866- KERNEL_STACK_OFFSET + THREAD_SIZE;
22867+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
22868 #endif
22869+
22870+ pax_open_kernel();
22871 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
22872+ pax_close_kernel();
22873+
22874 initial_code = (unsigned long)start_secondary;
22875 stack_start = idle->thread.sp;
22876
22877@@ -908,6 +911,15 @@ int __cpuinit native_cpu_up(unsigned int cpu, struct task_struct *tidle)
22878 /* the FPU context is blank, nobody can own it */
22879 __cpu_disable_lazy_restore(cpu);
22880
22881+#ifdef CONFIG_PAX_PER_CPU_PGD
22882+ clone_pgd_range(get_cpu_pgd(cpu) + KERNEL_PGD_BOUNDARY,
22883+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
22884+ KERNEL_PGD_PTRS);
22885+#endif
22886+
22887+ /* the FPU context is blank, nobody can own it */
22888+ __cpu_disable_lazy_restore(cpu);
22889+
22890 err = do_boot_cpu(apicid, cpu, tidle);
22891 if (err) {
22892 pr_debug("do_boot_cpu failed %d\n", err);
22893diff --git a/arch/x86/kernel/step.c b/arch/x86/kernel/step.c
22894index 9b4d51d..5d28b58 100644
22895--- a/arch/x86/kernel/step.c
22896+++ b/arch/x86/kernel/step.c
22897@@ -27,10 +27,10 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22898 struct desc_struct *desc;
22899 unsigned long base;
22900
22901- seg &= ~7UL;
22902+ seg >>= 3;
22903
22904 mutex_lock(&child->mm->context.lock);
22905- if (unlikely((seg >> 3) >= child->mm->context.size))
22906+ if (unlikely(seg >= child->mm->context.size))
22907 addr = -1L; /* bogus selector, access would fault */
22908 else {
22909 desc = child->mm->context.ldt + seg;
22910@@ -42,7 +42,8 @@ unsigned long convert_ip_to_linear(struct task_struct *child, struct pt_regs *re
22911 addr += base;
22912 }
22913 mutex_unlock(&child->mm->context.lock);
22914- }
22915+ } else if (seg == __KERNEL_CS || seg == __KERNEXEC_KERNEL_CS)
22916+ addr = ktla_ktva(addr);
22917
22918 return addr;
22919 }
22920@@ -53,6 +54,9 @@ static int is_setting_trap_flag(struct task_struct *child, struct pt_regs *regs)
22921 unsigned char opcode[15];
22922 unsigned long addr = convert_ip_to_linear(child, regs);
22923
22924+ if (addr == -EINVAL)
22925+ return 0;
22926+
22927 copied = access_process_vm(child, addr, opcode, sizeof(opcode), 0);
22928 for (i = 0; i < copied; i++) {
22929 switch (opcode[i]) {
22930diff --git a/arch/x86/kernel/sys_i386_32.c b/arch/x86/kernel/sys_i386_32.c
22931new file mode 100644
22932index 0000000..5877189
22933--- /dev/null
22934+++ b/arch/x86/kernel/sys_i386_32.c
22935@@ -0,0 +1,189 @@
22936+/*
22937+ * This file contains various random system calls that
22938+ * have a non-standard calling sequence on the Linux/i386
22939+ * platform.
22940+ */
22941+
22942+#include <linux/errno.h>
22943+#include <linux/sched.h>
22944+#include <linux/mm.h>
22945+#include <linux/fs.h>
22946+#include <linux/smp.h>
22947+#include <linux/sem.h>
22948+#include <linux/msg.h>
22949+#include <linux/shm.h>
22950+#include <linux/stat.h>
22951+#include <linux/syscalls.h>
22952+#include <linux/mman.h>
22953+#include <linux/file.h>
22954+#include <linux/utsname.h>
22955+#include <linux/ipc.h>
22956+#include <linux/elf.h>
22957+
22958+#include <linux/uaccess.h>
22959+#include <linux/unistd.h>
22960+
22961+#include <asm/syscalls.h>
22962+
22963+int i386_mmap_check(unsigned long addr, unsigned long len, unsigned long flags)
22964+{
22965+ unsigned long pax_task_size = TASK_SIZE;
22966+
22967+#ifdef CONFIG_PAX_SEGMEXEC
22968+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
22969+ pax_task_size = SEGMEXEC_TASK_SIZE;
22970+#endif
22971+
22972+ if (flags & MAP_FIXED)
22973+ if (len > pax_task_size || addr > pax_task_size - len)
22974+ return -EINVAL;
22975+
22976+ return 0;
22977+}
22978+
22979+/*
22980+ * Align a virtual address to avoid aliasing in the I$ on AMD F15h.
22981+ */
22982+static unsigned long get_align_mask(void)
22983+{
22984+ if (va_align.flags < 0 || !(va_align.flags & ALIGN_VA_32))
22985+ return 0;
22986+
22987+ if (!(current->flags & PF_RANDOMIZE))
22988+ return 0;
22989+
22990+ return va_align.mask;
22991+}
22992+
22993+unsigned long
22994+arch_get_unmapped_area(struct file *filp, unsigned long addr,
22995+ unsigned long len, unsigned long pgoff, unsigned long flags)
22996+{
22997+ struct mm_struct *mm = current->mm;
22998+ struct vm_area_struct *vma;
22999+ unsigned long pax_task_size = TASK_SIZE;
23000+ struct vm_unmapped_area_info info;
23001+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23002+
23003+#ifdef CONFIG_PAX_SEGMEXEC
23004+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23005+ pax_task_size = SEGMEXEC_TASK_SIZE;
23006+#endif
23007+
23008+ pax_task_size -= PAGE_SIZE;
23009+
23010+ if (len > pax_task_size)
23011+ return -ENOMEM;
23012+
23013+ if (flags & MAP_FIXED)
23014+ return addr;
23015+
23016+#ifdef CONFIG_PAX_RANDMMAP
23017+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23018+#endif
23019+
23020+ if (addr) {
23021+ addr = PAGE_ALIGN(addr);
23022+ if (pax_task_size - len >= addr) {
23023+ vma = find_vma(mm, addr);
23024+ if (check_heap_stack_gap(vma, addr, len, offset))
23025+ return addr;
23026+ }
23027+ }
23028+
23029+ info.flags = 0;
23030+ info.length = len;
23031+ info.align_mask = filp ? get_align_mask() : 0;
23032+ info.align_offset = pgoff << PAGE_SHIFT;
23033+ info.threadstack_offset = offset;
23034+
23035+#ifdef CONFIG_PAX_PAGEEXEC
23036+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE)) {
23037+ info.low_limit = 0x00110000UL;
23038+ info.high_limit = mm->start_code;
23039+
23040+#ifdef CONFIG_PAX_RANDMMAP
23041+ if (mm->pax_flags & MF_PAX_RANDMMAP)
23042+ info.low_limit += mm->delta_mmap & 0x03FFF000UL;
23043+#endif
23044+
23045+ if (info.low_limit < info.high_limit) {
23046+ addr = vm_unmapped_area(&info);
23047+ if (!IS_ERR_VALUE(addr))
23048+ return addr;
23049+ }
23050+ } else
23051+#endif
23052+
23053+ info.low_limit = mm->mmap_base;
23054+ info.high_limit = pax_task_size;
23055+
23056+ return vm_unmapped_area(&info);
23057+}
23058+
23059+unsigned long
23060+arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23061+ const unsigned long len, const unsigned long pgoff,
23062+ const unsigned long flags)
23063+{
23064+ struct vm_area_struct *vma;
23065+ struct mm_struct *mm = current->mm;
23066+ unsigned long addr = addr0, pax_task_size = TASK_SIZE;
23067+ struct vm_unmapped_area_info info;
23068+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23069+
23070+#ifdef CONFIG_PAX_SEGMEXEC
23071+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
23072+ pax_task_size = SEGMEXEC_TASK_SIZE;
23073+#endif
23074+
23075+ pax_task_size -= PAGE_SIZE;
23076+
23077+ /* requested length too big for entire address space */
23078+ if (len > pax_task_size)
23079+ return -ENOMEM;
23080+
23081+ if (flags & MAP_FIXED)
23082+ return addr;
23083+
23084+#ifdef CONFIG_PAX_PAGEEXEC
23085+ if (!(__supported_pte_mask & _PAGE_NX) && (mm->pax_flags & MF_PAX_PAGEEXEC) && (flags & MAP_EXECUTABLE))
23086+ goto bottomup;
23087+#endif
23088+
23089+#ifdef CONFIG_PAX_RANDMMAP
23090+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23091+#endif
23092+
23093+ /* requesting a specific address */
23094+ if (addr) {
23095+ addr = PAGE_ALIGN(addr);
23096+ if (pax_task_size - len >= addr) {
23097+ vma = find_vma(mm, addr);
23098+ if (check_heap_stack_gap(vma, addr, len, offset))
23099+ return addr;
23100+ }
23101+ }
23102+
23103+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
23104+ info.length = len;
23105+ info.low_limit = PAGE_SIZE;
23106+ info.high_limit = mm->mmap_base;
23107+ info.align_mask = filp ? get_align_mask() : 0;
23108+ info.align_offset = pgoff << PAGE_SHIFT;
23109+ info.threadstack_offset = offset;
23110+
23111+ addr = vm_unmapped_area(&info);
23112+ if (!(addr & ~PAGE_MASK))
23113+ return addr;
23114+ VM_BUG_ON(addr != -ENOMEM);
23115+
23116+bottomup:
23117+ /*
23118+ * A failed mmap() very likely causes application failure,
23119+ * so fall back to the bottom-up function here. This scenario
23120+ * can happen with large stack limits and large mmap()
23121+ * allocations.
23122+ */
23123+ return arch_get_unmapped_area(filp, addr0, len, pgoff, flags);
23124+}
23125diff --git a/arch/x86/kernel/sys_x86_64.c b/arch/x86/kernel/sys_x86_64.c
23126index dbded5a..ace2781 100644
23127--- a/arch/x86/kernel/sys_x86_64.c
23128+++ b/arch/x86/kernel/sys_x86_64.c
23129@@ -81,8 +81,8 @@ out:
23130 return error;
23131 }
23132
23133-static void find_start_end(unsigned long flags, unsigned long *begin,
23134- unsigned long *end)
23135+static void find_start_end(struct mm_struct *mm, unsigned long flags,
23136+ unsigned long *begin, unsigned long *end)
23137 {
23138 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT)) {
23139 unsigned long new_begin;
23140@@ -101,7 +101,7 @@ static void find_start_end(unsigned long flags, unsigned long *begin,
23141 *begin = new_begin;
23142 }
23143 } else {
23144- *begin = TASK_UNMAPPED_BASE;
23145+ *begin = mm->mmap_base;
23146 *end = TASK_SIZE;
23147 }
23148 }
23149@@ -114,20 +114,24 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23150 struct vm_area_struct *vma;
23151 struct vm_unmapped_area_info info;
23152 unsigned long begin, end;
23153+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23154
23155 if (flags & MAP_FIXED)
23156 return addr;
23157
23158- find_start_end(flags, &begin, &end);
23159+ find_start_end(mm, flags, &begin, &end);
23160
23161 if (len > end)
23162 return -ENOMEM;
23163
23164+#ifdef CONFIG_PAX_RANDMMAP
23165+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23166+#endif
23167+
23168 if (addr) {
23169 addr = PAGE_ALIGN(addr);
23170 vma = find_vma(mm, addr);
23171- if (end - len >= addr &&
23172- (!vma || addr + len <= vma->vm_start))
23173+ if (end - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23174 return addr;
23175 }
23176
23177@@ -137,6 +141,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
23178 info.high_limit = end;
23179 info.align_mask = filp ? get_align_mask() : 0;
23180 info.align_offset = pgoff << PAGE_SHIFT;
23181+ info.threadstack_offset = offset;
23182 return vm_unmapped_area(&info);
23183 }
23184
23185@@ -149,6 +154,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23186 struct mm_struct *mm = current->mm;
23187 unsigned long addr = addr0;
23188 struct vm_unmapped_area_info info;
23189+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
23190
23191 /* requested length too big for entire address space */
23192 if (len > TASK_SIZE)
23193@@ -161,12 +167,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23194 if (!test_thread_flag(TIF_ADDR32) && (flags & MAP_32BIT))
23195 goto bottomup;
23196
23197+#ifdef CONFIG_PAX_RANDMMAP
23198+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
23199+#endif
23200+
23201 /* requesting a specific address */
23202 if (addr) {
23203 addr = PAGE_ALIGN(addr);
23204 vma = find_vma(mm, addr);
23205- if (TASK_SIZE - len >= addr &&
23206- (!vma || addr + len <= vma->vm_start))
23207+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
23208 return addr;
23209 }
23210
23211@@ -176,6 +185,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
23212 info.high_limit = mm->mmap_base;
23213 info.align_mask = filp ? get_align_mask() : 0;
23214 info.align_offset = pgoff << PAGE_SHIFT;
23215+ info.threadstack_offset = offset;
23216 addr = vm_unmapped_area(&info);
23217 if (!(addr & ~PAGE_MASK))
23218 return addr;
23219diff --git a/arch/x86/kernel/tboot.c b/arch/x86/kernel/tboot.c
23220index f84fe00..f41d9f1 100644
23221--- a/arch/x86/kernel/tboot.c
23222+++ b/arch/x86/kernel/tboot.c
23223@@ -220,7 +220,7 @@ static int tboot_setup_sleep(void)
23224
23225 void tboot_shutdown(u32 shutdown_type)
23226 {
23227- void (*shutdown)(void);
23228+ void (* __noreturn shutdown)(void);
23229
23230 if (!tboot_enabled())
23231 return;
23232@@ -242,7 +242,7 @@ void tboot_shutdown(u32 shutdown_type)
23233
23234 switch_to_tboot_pt();
23235
23236- shutdown = (void(*)(void))(unsigned long)tboot->shutdown_entry;
23237+ shutdown = (void *)tboot->shutdown_entry;
23238 shutdown();
23239
23240 /* should not reach here */
23241@@ -300,7 +300,7 @@ static int tboot_sleep(u8 sleep_state, u32 pm1a_control, u32 pm1b_control)
23242 return 0;
23243 }
23244
23245-static atomic_t ap_wfs_count;
23246+static atomic_unchecked_t ap_wfs_count;
23247
23248 static int tboot_wait_for_aps(int num_aps)
23249 {
23250@@ -324,16 +324,16 @@ static int __cpuinit tboot_cpu_callback(struct notifier_block *nfb,
23251 {
23252 switch (action) {
23253 case CPU_DYING:
23254- atomic_inc(&ap_wfs_count);
23255+ atomic_inc_unchecked(&ap_wfs_count);
23256 if (num_online_cpus() == 1)
23257- if (tboot_wait_for_aps(atomic_read(&ap_wfs_count)))
23258+ if (tboot_wait_for_aps(atomic_read_unchecked(&ap_wfs_count)))
23259 return NOTIFY_BAD;
23260 break;
23261 }
23262 return NOTIFY_OK;
23263 }
23264
23265-static struct notifier_block tboot_cpu_notifier __cpuinitdata =
23266+static struct notifier_block tboot_cpu_notifier =
23267 {
23268 .notifier_call = tboot_cpu_callback,
23269 };
23270@@ -345,7 +345,7 @@ static __init int tboot_late_init(void)
23271
23272 tboot_create_trampoline();
23273
23274- atomic_set(&ap_wfs_count, 0);
23275+ atomic_set_unchecked(&ap_wfs_count, 0);
23276 register_hotcpu_notifier(&tboot_cpu_notifier);
23277
23278 acpi_os_set_prepare_sleep(&tboot_sleep);
23279diff --git a/arch/x86/kernel/time.c b/arch/x86/kernel/time.c
23280index 24d3c91..d06b473 100644
23281--- a/arch/x86/kernel/time.c
23282+++ b/arch/x86/kernel/time.c
23283@@ -30,9 +30,9 @@ unsigned long profile_pc(struct pt_regs *regs)
23284 {
23285 unsigned long pc = instruction_pointer(regs);
23286
23287- if (!user_mode_vm(regs) && in_lock_functions(pc)) {
23288+ if (!user_mode(regs) && in_lock_functions(pc)) {
23289 #ifdef CONFIG_FRAME_POINTER
23290- return *(unsigned long *)(regs->bp + sizeof(long));
23291+ return ktla_ktva(*(unsigned long *)(regs->bp + sizeof(long)));
23292 #else
23293 unsigned long *sp =
23294 (unsigned long *)kernel_stack_pointer(regs);
23295@@ -41,11 +41,17 @@ unsigned long profile_pc(struct pt_regs *regs)
23296 * or above a saved flags. Eflags has bits 22-31 zero,
23297 * kernel addresses don't.
23298 */
23299+
23300+#ifdef CONFIG_PAX_KERNEXEC
23301+ return ktla_ktva(sp[0]);
23302+#else
23303 if (sp[0] >> 22)
23304 return sp[0];
23305 if (sp[1] >> 22)
23306 return sp[1];
23307 #endif
23308+
23309+#endif
23310 }
23311 return pc;
23312 }
23313diff --git a/arch/x86/kernel/tls.c b/arch/x86/kernel/tls.c
23314index f7fec09..9991981 100644
23315--- a/arch/x86/kernel/tls.c
23316+++ b/arch/x86/kernel/tls.c
23317@@ -84,6 +84,11 @@ int do_set_thread_area(struct task_struct *p, int idx,
23318 if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
23319 return -EINVAL;
23320
23321+#ifdef CONFIG_PAX_SEGMEXEC
23322+ if ((p->mm->pax_flags & MF_PAX_SEGMEXEC) && (info.contents & MODIFY_LDT_CONTENTS_CODE))
23323+ return -EINVAL;
23324+#endif
23325+
23326 set_tls_desc(p, idx, &info, 1);
23327
23328 return 0;
23329@@ -200,7 +205,7 @@ int regset_tls_set(struct task_struct *target, const struct user_regset *regset,
23330
23331 if (kbuf)
23332 info = kbuf;
23333- else if (__copy_from_user(infobuf, ubuf, count))
23334+ else if (count > sizeof infobuf || __copy_from_user(infobuf, ubuf, count))
23335 return -EFAULT;
23336 else
23337 info = infobuf;
23338diff --git a/arch/x86/kernel/traps.c b/arch/x86/kernel/traps.c
23339index 772e2a8..bad5bf6 100644
23340--- a/arch/x86/kernel/traps.c
23341+++ b/arch/x86/kernel/traps.c
23342@@ -68,12 +68,6 @@
23343 #include <asm/setup.h>
23344
23345 asmlinkage int system_call(void);
23346-
23347-/*
23348- * The IDT has to be page-aligned to simplify the Pentium
23349- * F0 0F bug workaround.
23350- */
23351-gate_desc idt_table[NR_VECTORS] __page_aligned_data = { { { { 0, 0 } } }, };
23352 #endif
23353
23354 DECLARE_BITMAP(used_vectors, NR_VECTORS);
23355@@ -106,11 +100,11 @@ static inline void preempt_conditional_cli(struct pt_regs *regs)
23356 }
23357
23358 static int __kprobes
23359-do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23360+do_trap_no_signal(struct task_struct *tsk, int trapnr, const char *str,
23361 struct pt_regs *regs, long error_code)
23362 {
23363 #ifdef CONFIG_X86_32
23364- if (regs->flags & X86_VM_MASK) {
23365+ if (v8086_mode(regs)) {
23366 /*
23367 * Traps 0, 1, 3, 4, and 5 should be forwarded to vm86.
23368 * On nmi (interrupt 2), do_trap should not be called.
23369@@ -123,12 +117,24 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23370 return -1;
23371 }
23372 #endif
23373- if (!user_mode(regs)) {
23374+ if (!user_mode_novm(regs)) {
23375 if (!fixup_exception(regs)) {
23376 tsk->thread.error_code = error_code;
23377 tsk->thread.trap_nr = trapnr;
23378+
23379+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23380+ if (trapnr == 12 && ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS))
23381+ str = "PAX: suspicious stack segment fault";
23382+#endif
23383+
23384 die(str, regs, error_code);
23385 }
23386+
23387+#ifdef CONFIG_PAX_REFCOUNT
23388+ if (trapnr == 4)
23389+ pax_report_refcount_overflow(regs);
23390+#endif
23391+
23392 return 0;
23393 }
23394
23395@@ -136,7 +142,7 @@ do_trap_no_signal(struct task_struct *tsk, int trapnr, char *str,
23396 }
23397
23398 static void __kprobes
23399-do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23400+do_trap(int trapnr, int signr, const char *str, struct pt_regs *regs,
23401 long error_code, siginfo_t *info)
23402 {
23403 struct task_struct *tsk = current;
23404@@ -160,7 +166,7 @@ do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
23405 if (show_unhandled_signals && unhandled_signal(tsk, signr) &&
23406 printk_ratelimit()) {
23407 pr_info("%s[%d] trap %s ip:%lx sp:%lx error:%lx",
23408- tsk->comm, tsk->pid, str,
23409+ tsk->comm, task_pid_nr(tsk), str,
23410 regs->ip, regs->sp, error_code);
23411 print_vma_addr(" in ", regs->ip);
23412 pr_cont("\n");
23413@@ -273,7 +279,7 @@ do_general_protection(struct pt_regs *regs, long error_code)
23414 conditional_sti(regs);
23415
23416 #ifdef CONFIG_X86_32
23417- if (regs->flags & X86_VM_MASK) {
23418+ if (v8086_mode(regs)) {
23419 local_irq_enable();
23420 handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
23421 goto exit;
23422@@ -281,18 +287,42 @@ do_general_protection(struct pt_regs *regs, long error_code)
23423 #endif
23424
23425 tsk = current;
23426- if (!user_mode(regs)) {
23427+ if (!user_mode_novm(regs)) {
23428 if (fixup_exception(regs))
23429 goto exit;
23430
23431 tsk->thread.error_code = error_code;
23432 tsk->thread.trap_nr = X86_TRAP_GP;
23433 if (notify_die(DIE_GPF, "general protection fault", regs, error_code,
23434- X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP)
23435+ X86_TRAP_GP, SIGSEGV) != NOTIFY_STOP) {
23436+
23437+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23438+ if ((regs->cs & 0xFFFF) == __KERNEL_CS || (regs->cs & 0xFFFF) == __KERNEXEC_KERNEL_CS)
23439+ die("PAX: suspicious general protection fault", regs, error_code);
23440+ else
23441+#endif
23442+
23443 die("general protection fault", regs, error_code);
23444+ }
23445 goto exit;
23446 }
23447
23448+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
23449+ if (!(__supported_pte_mask & _PAGE_NX) && tsk->mm && (tsk->mm->pax_flags & MF_PAX_PAGEEXEC)) {
23450+ struct mm_struct *mm = tsk->mm;
23451+ unsigned long limit;
23452+
23453+ down_write(&mm->mmap_sem);
23454+ limit = mm->context.user_cs_limit;
23455+ if (limit < TASK_SIZE) {
23456+ track_exec_limit(mm, limit, TASK_SIZE, VM_EXEC);
23457+ up_write(&mm->mmap_sem);
23458+ return;
23459+ }
23460+ up_write(&mm->mmap_sem);
23461+ }
23462+#endif
23463+
23464 tsk->thread.error_code = error_code;
23465 tsk->thread.trap_nr = X86_TRAP_GP;
23466
23467@@ -450,7 +480,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23468 /* It's safe to allow irq's after DR6 has been saved */
23469 preempt_conditional_sti(regs);
23470
23471- if (regs->flags & X86_VM_MASK) {
23472+ if (v8086_mode(regs)) {
23473 handle_vm86_trap((struct kernel_vm86_regs *) regs, error_code,
23474 X86_TRAP_DB);
23475 preempt_conditional_cli(regs);
23476@@ -465,7 +495,7 @@ dotraplinkage void __kprobes do_debug(struct pt_regs *regs, long error_code)
23477 * We already checked v86 mode above, so we can check for kernel mode
23478 * by just checking the CPL of CS.
23479 */
23480- if ((dr6 & DR_STEP) && !user_mode(regs)) {
23481+ if ((dr6 & DR_STEP) && !user_mode_novm(regs)) {
23482 tsk->thread.debugreg6 &= ~DR_STEP;
23483 set_tsk_thread_flag(tsk, TIF_SINGLESTEP);
23484 regs->flags &= ~X86_EFLAGS_TF;
23485@@ -497,7 +527,7 @@ void math_error(struct pt_regs *regs, int error_code, int trapnr)
23486 return;
23487 conditional_sti(regs);
23488
23489- if (!user_mode_vm(regs))
23490+ if (!user_mode(regs))
23491 {
23492 if (!fixup_exception(regs)) {
23493 task->thread.error_code = error_code;
23494diff --git a/arch/x86/kernel/uprobes.c b/arch/x86/kernel/uprobes.c
23495index 2ed8459..7cf329f 100644
23496--- a/arch/x86/kernel/uprobes.c
23497+++ b/arch/x86/kernel/uprobes.c
23498@@ -629,7 +629,7 @@ int arch_uprobe_exception_notify(struct notifier_block *self, unsigned long val,
23499 int ret = NOTIFY_DONE;
23500
23501 /* We are only interested in userspace traps */
23502- if (regs && !user_mode_vm(regs))
23503+ if (regs && !user_mode(regs))
23504 return NOTIFY_DONE;
23505
23506 switch (val) {
23507@@ -719,7 +719,7 @@ arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr, struct pt_regs
23508
23509 if (ncopied != rasize) {
23510 pr_err("uprobe: return address clobbered: pid=%d, %%sp=%#lx, "
23511- "%%ip=%#lx\n", current->pid, regs->sp, regs->ip);
23512+ "%%ip=%#lx\n", task_pid_nr(current), regs->sp, regs->ip);
23513
23514 force_sig_info(SIGSEGV, SEND_SIG_FORCED, current);
23515 }
23516diff --git a/arch/x86/kernel/verify_cpu.S b/arch/x86/kernel/verify_cpu.S
23517index b9242ba..50c5edd 100644
23518--- a/arch/x86/kernel/verify_cpu.S
23519+++ b/arch/x86/kernel/verify_cpu.S
23520@@ -20,6 +20,7 @@
23521 * arch/x86/boot/compressed/head_64.S: Boot cpu verification
23522 * arch/x86/kernel/trampoline_64.S: secondary processor verification
23523 * arch/x86/kernel/head_32.S: processor startup
23524+ * arch/x86/kernel/acpi/realmode/wakeup.S: 32bit processor resume
23525 *
23526 * verify_cpu, returns the status of longmode and SSE in register %eax.
23527 * 0: Success 1: Failure
23528diff --git a/arch/x86/kernel/vm86_32.c b/arch/x86/kernel/vm86_32.c
23529index e8edcf5..27f9344 100644
23530--- a/arch/x86/kernel/vm86_32.c
23531+++ b/arch/x86/kernel/vm86_32.c
23532@@ -44,6 +44,7 @@
23533 #include <linux/ptrace.h>
23534 #include <linux/audit.h>
23535 #include <linux/stddef.h>
23536+#include <linux/grsecurity.h>
23537
23538 #include <asm/uaccess.h>
23539 #include <asm/io.h>
23540@@ -150,7 +151,7 @@ struct pt_regs *save_v86_state(struct kernel_vm86_regs *regs)
23541 do_exit(SIGSEGV);
23542 }
23543
23544- tss = &per_cpu(init_tss, get_cpu());
23545+ tss = init_tss + get_cpu();
23546 current->thread.sp0 = current->thread.saved_sp0;
23547 current->thread.sysenter_cs = __KERNEL_CS;
23548 load_sp0(tss, &current->thread);
23549@@ -214,6 +215,14 @@ SYSCALL_DEFINE1(vm86old, struct vm86_struct __user *, v86)
23550
23551 if (tsk->thread.saved_sp0)
23552 return -EPERM;
23553+
23554+#ifdef CONFIG_GRKERNSEC_VM86
23555+ if (!capable(CAP_SYS_RAWIO)) {
23556+ gr_handle_vm86();
23557+ return -EPERM;
23558+ }
23559+#endif
23560+
23561 tmp = copy_vm86_regs_from_user(&info.regs, &v86->regs,
23562 offsetof(struct kernel_vm86_struct, vm86plus) -
23563 sizeof(info.regs));
23564@@ -238,6 +247,13 @@ SYSCALL_DEFINE2(vm86, unsigned long, cmd, unsigned long, arg)
23565 int tmp;
23566 struct vm86plus_struct __user *v86;
23567
23568+#ifdef CONFIG_GRKERNSEC_VM86
23569+ if (!capable(CAP_SYS_RAWIO)) {
23570+ gr_handle_vm86();
23571+ return -EPERM;
23572+ }
23573+#endif
23574+
23575 tsk = current;
23576 switch (cmd) {
23577 case VM86_REQUEST_IRQ:
23578@@ -318,7 +334,7 @@ static void do_sys_vm86(struct kernel_vm86_struct *info, struct task_struct *tsk
23579 tsk->thread.saved_fs = info->regs32->fs;
23580 tsk->thread.saved_gs = get_user_gs(info->regs32);
23581
23582- tss = &per_cpu(init_tss, get_cpu());
23583+ tss = init_tss + get_cpu();
23584 tsk->thread.sp0 = (unsigned long) &info->VM86_TSS_ESP0;
23585 if (cpu_has_sep)
23586 tsk->thread.sysenter_cs = 0;
23587@@ -525,7 +541,7 @@ static void do_int(struct kernel_vm86_regs *regs, int i,
23588 goto cannot_handle;
23589 if (i == 0x21 && is_revectored(AH(regs), &KVM86->int21_revectored))
23590 goto cannot_handle;
23591- intr_ptr = (unsigned long __user *) (i << 2);
23592+ intr_ptr = (__force unsigned long __user *) (i << 2);
23593 if (get_user(segoffs, intr_ptr))
23594 goto cannot_handle;
23595 if ((segoffs >> 16) == BIOSSEG)
23596diff --git a/arch/x86/kernel/vmlinux.lds.S b/arch/x86/kernel/vmlinux.lds.S
23597index 10c4f30..57377c2 100644
23598--- a/arch/x86/kernel/vmlinux.lds.S
23599+++ b/arch/x86/kernel/vmlinux.lds.S
23600@@ -26,6 +26,13 @@
23601 #include <asm/page_types.h>
23602 #include <asm/cache.h>
23603 #include <asm/boot.h>
23604+#include <asm/segment.h>
23605+
23606+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
23607+#define __KERNEL_TEXT_OFFSET (LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR)
23608+#else
23609+#define __KERNEL_TEXT_OFFSET 0
23610+#endif
23611
23612 #undef i386 /* in case the preprocessor is a 32bit one */
23613
23614@@ -69,30 +76,43 @@ jiffies_64 = jiffies;
23615
23616 PHDRS {
23617 text PT_LOAD FLAGS(5); /* R_E */
23618+#ifdef CONFIG_X86_32
23619+ module PT_LOAD FLAGS(5); /* R_E */
23620+#endif
23621+#ifdef CONFIG_XEN
23622+ rodata PT_LOAD FLAGS(5); /* R_E */
23623+#else
23624+ rodata PT_LOAD FLAGS(4); /* R__ */
23625+#endif
23626 data PT_LOAD FLAGS(6); /* RW_ */
23627-#ifdef CONFIG_X86_64
23628+ init.begin PT_LOAD FLAGS(6); /* RW_ */
23629 #ifdef CONFIG_SMP
23630 percpu PT_LOAD FLAGS(6); /* RW_ */
23631 #endif
23632+ text.init PT_LOAD FLAGS(5); /* R_E */
23633+ text.exit PT_LOAD FLAGS(5); /* R_E */
23634 init PT_LOAD FLAGS(7); /* RWE */
23635-#endif
23636 note PT_NOTE FLAGS(0); /* ___ */
23637 }
23638
23639 SECTIONS
23640 {
23641 #ifdef CONFIG_X86_32
23642- . = LOAD_OFFSET + LOAD_PHYSICAL_ADDR;
23643- phys_startup_32 = startup_32 - LOAD_OFFSET;
23644+ . = LOAD_OFFSET + ____LOAD_PHYSICAL_ADDR;
23645 #else
23646- . = __START_KERNEL;
23647- phys_startup_64 = startup_64 - LOAD_OFFSET;
23648+ . = __START_KERNEL;
23649 #endif
23650
23651 /* Text and read-only data */
23652- .text : AT(ADDR(.text) - LOAD_OFFSET) {
23653- _text = .;
23654+ .text (. - __KERNEL_TEXT_OFFSET): AT(ADDR(.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23655 /* bootstrapping code */
23656+#ifdef CONFIG_X86_32
23657+ phys_startup_32 = startup_32 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23658+#else
23659+ phys_startup_64 = startup_64 - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23660+#endif
23661+ __LOAD_PHYSICAL_ADDR = . - LOAD_OFFSET + __KERNEL_TEXT_OFFSET;
23662+ _text = .;
23663 HEAD_TEXT
23664 . = ALIGN(8);
23665 _stext = .;
23666@@ -104,13 +124,48 @@ SECTIONS
23667 IRQENTRY_TEXT
23668 *(.fixup)
23669 *(.gnu.warning)
23670- /* End of text section */
23671- _etext = .;
23672 } :text = 0x9090
23673
23674- NOTES :text :note
23675+ . += __KERNEL_TEXT_OFFSET;
23676
23677- EXCEPTION_TABLE(16) :text = 0x9090
23678+#ifdef CONFIG_X86_32
23679+ . = ALIGN(PAGE_SIZE);
23680+ .module.text : AT(ADDR(.module.text) - LOAD_OFFSET) {
23681+
23682+#ifdef CONFIG_PAX_KERNEXEC
23683+ MODULES_EXEC_VADDR = .;
23684+ BYTE(0)
23685+ . += (CONFIG_PAX_KERNEXEC_MODULE_TEXT * 1024 * 1024);
23686+ . = ALIGN(HPAGE_SIZE) - 1;
23687+ MODULES_EXEC_END = .;
23688+#endif
23689+
23690+ } :module
23691+#endif
23692+
23693+ .text.end : AT(ADDR(.text.end) - LOAD_OFFSET) {
23694+ /* End of text section */
23695+ BYTE(0)
23696+ _etext = . - __KERNEL_TEXT_OFFSET;
23697+ }
23698+
23699+#ifdef CONFIG_X86_32
23700+ . = ALIGN(PAGE_SIZE);
23701+ .rodata.page_aligned : AT(ADDR(.rodata.page_aligned) - LOAD_OFFSET) {
23702+ *(.idt)
23703+ . = ALIGN(PAGE_SIZE);
23704+ *(.empty_zero_page)
23705+ *(.initial_pg_fixmap)
23706+ *(.initial_pg_pmd)
23707+ *(.initial_page_table)
23708+ *(.swapper_pg_dir)
23709+ } :rodata
23710+#endif
23711+
23712+ . = ALIGN(PAGE_SIZE);
23713+ NOTES :rodata :note
23714+
23715+ EXCEPTION_TABLE(16) :rodata
23716
23717 #if defined(CONFIG_DEBUG_RODATA)
23718 /* .text should occupy whole number of pages */
23719@@ -122,16 +177,20 @@ SECTIONS
23720
23721 /* Data */
23722 .data : AT(ADDR(.data) - LOAD_OFFSET) {
23723+
23724+#ifdef CONFIG_PAX_KERNEXEC
23725+ . = ALIGN(HPAGE_SIZE);
23726+#else
23727+ . = ALIGN(PAGE_SIZE);
23728+#endif
23729+
23730 /* Start of data section */
23731 _sdata = .;
23732
23733 /* init_task */
23734 INIT_TASK_DATA(THREAD_SIZE)
23735
23736-#ifdef CONFIG_X86_32
23737- /* 32 bit has nosave before _edata */
23738 NOSAVE_DATA
23739-#endif
23740
23741 PAGE_ALIGNED_DATA(PAGE_SIZE)
23742
23743@@ -172,12 +231,19 @@ SECTIONS
23744 #endif /* CONFIG_X86_64 */
23745
23746 /* Init code and data - will be freed after init */
23747- . = ALIGN(PAGE_SIZE);
23748 .init.begin : AT(ADDR(.init.begin) - LOAD_OFFSET) {
23749+ BYTE(0)
23750+
23751+#ifdef CONFIG_PAX_KERNEXEC
23752+ . = ALIGN(HPAGE_SIZE);
23753+#else
23754+ . = ALIGN(PAGE_SIZE);
23755+#endif
23756+
23757 __init_begin = .; /* paired with __init_end */
23758- }
23759+ } :init.begin
23760
23761-#if defined(CONFIG_X86_64) && defined(CONFIG_SMP)
23762+#ifdef CONFIG_SMP
23763 /*
23764 * percpu offsets are zero-based on SMP. PERCPU_VADDR() changes the
23765 * output PHDR, so the next output section - .init.text - should
23766@@ -186,12 +252,27 @@ SECTIONS
23767 PERCPU_VADDR(INTERNODE_CACHE_BYTES, 0, :percpu)
23768 #endif
23769
23770- INIT_TEXT_SECTION(PAGE_SIZE)
23771-#ifdef CONFIG_X86_64
23772- :init
23773-#endif
23774+ . = ALIGN(PAGE_SIZE);
23775+ init_begin = .;
23776+ .init.text (. - __KERNEL_TEXT_OFFSET): AT(init_begin - LOAD_OFFSET) {
23777+ VMLINUX_SYMBOL(_sinittext) = .;
23778+ INIT_TEXT
23779+ VMLINUX_SYMBOL(_einittext) = .;
23780+ . = ALIGN(PAGE_SIZE);
23781+ } :text.init
23782
23783- INIT_DATA_SECTION(16)
23784+ /*
23785+ * .exit.text is discard at runtime, not link time, to deal with
23786+ * references from .altinstructions and .eh_frame
23787+ */
23788+ .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET + __KERNEL_TEXT_OFFSET) {
23789+ EXIT_TEXT
23790+ . = ALIGN(16);
23791+ } :text.exit
23792+ . = init_begin + SIZEOF(.init.text) + SIZEOF(.exit.text);
23793+
23794+ . = ALIGN(PAGE_SIZE);
23795+ INIT_DATA_SECTION(16) :init
23796
23797 .x86_cpu_dev.init : AT(ADDR(.x86_cpu_dev.init) - LOAD_OFFSET) {
23798 __x86_cpu_dev_start = .;
23799@@ -253,19 +334,12 @@ SECTIONS
23800 }
23801
23802 . = ALIGN(8);
23803- /*
23804- * .exit.text is discard at runtime, not link time, to deal with
23805- * references from .altinstructions and .eh_frame
23806- */
23807- .exit.text : AT(ADDR(.exit.text) - LOAD_OFFSET) {
23808- EXIT_TEXT
23809- }
23810
23811 .exit.data : AT(ADDR(.exit.data) - LOAD_OFFSET) {
23812 EXIT_DATA
23813 }
23814
23815-#if !defined(CONFIG_X86_64) || !defined(CONFIG_SMP)
23816+#ifndef CONFIG_SMP
23817 PERCPU_SECTION(INTERNODE_CACHE_BYTES)
23818 #endif
23819
23820@@ -284,16 +358,10 @@ SECTIONS
23821 .smp_locks : AT(ADDR(.smp_locks) - LOAD_OFFSET) {
23822 __smp_locks = .;
23823 *(.smp_locks)
23824- . = ALIGN(PAGE_SIZE);
23825 __smp_locks_end = .;
23826+ . = ALIGN(PAGE_SIZE);
23827 }
23828
23829-#ifdef CONFIG_X86_64
23830- .data_nosave : AT(ADDR(.data_nosave) - LOAD_OFFSET) {
23831- NOSAVE_DATA
23832- }
23833-#endif
23834-
23835 /* BSS */
23836 . = ALIGN(PAGE_SIZE);
23837 .bss : AT(ADDR(.bss) - LOAD_OFFSET) {
23838@@ -309,6 +377,7 @@ SECTIONS
23839 __brk_base = .;
23840 . += 64 * 1024; /* 64k alignment slop space */
23841 *(.brk_reservation) /* areas brk users have reserved */
23842+ . = ALIGN(HPAGE_SIZE);
23843 __brk_limit = .;
23844 }
23845
23846@@ -335,13 +404,12 @@ SECTIONS
23847 * for the boot processor.
23848 */
23849 #define INIT_PER_CPU(x) init_per_cpu__##x = x + __per_cpu_load
23850-INIT_PER_CPU(gdt_page);
23851 INIT_PER_CPU(irq_stack_union);
23852
23853 /*
23854 * Build-time check on the image size:
23855 */
23856-. = ASSERT((_end - _text <= KERNEL_IMAGE_SIZE),
23857+. = ASSERT((_end - _text - __KERNEL_TEXT_OFFSET <= KERNEL_IMAGE_SIZE),
23858 "kernel image bigger than KERNEL_IMAGE_SIZE");
23859
23860 #ifdef CONFIG_SMP
23861diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
23862index 9a907a6..f83f921 100644
23863--- a/arch/x86/kernel/vsyscall_64.c
23864+++ b/arch/x86/kernel/vsyscall_64.c
23865@@ -56,15 +56,13 @@
23866 DEFINE_VVAR(int, vgetcpu_mode);
23867 DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
23868
23869-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
23870+static enum { EMULATE, NONE } vsyscall_mode = EMULATE;
23871
23872 static int __init vsyscall_setup(char *str)
23873 {
23874 if (str) {
23875 if (!strcmp("emulate", str))
23876 vsyscall_mode = EMULATE;
23877- else if (!strcmp("native", str))
23878- vsyscall_mode = NATIVE;
23879 else if (!strcmp("none", str))
23880 vsyscall_mode = NONE;
23881 else
23882@@ -323,8 +321,7 @@ do_ret:
23883 return true;
23884
23885 sigsegv:
23886- force_sig(SIGSEGV, current);
23887- return true;
23888+ do_group_exit(SIGKILL);
23889 }
23890
23891 /*
23892@@ -377,10 +374,7 @@ void __init map_vsyscall(void)
23893 extern char __vvar_page;
23894 unsigned long physaddr_vvar_page = __pa_symbol(&__vvar_page);
23895
23896- __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall,
23897- vsyscall_mode == NATIVE
23898- ? PAGE_KERNEL_VSYSCALL
23899- : PAGE_KERNEL_VVAR);
23900+ __set_fixmap(VSYSCALL_FIRST_PAGE, physaddr_vsyscall, PAGE_KERNEL_VVAR);
23901 BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_FIRST_PAGE) !=
23902 (unsigned long)VSYSCALL_START);
23903
23904diff --git a/arch/x86/kernel/x8664_ksyms_64.c b/arch/x86/kernel/x8664_ksyms_64.c
23905index b014d94..6d6ca7b 100644
23906--- a/arch/x86/kernel/x8664_ksyms_64.c
23907+++ b/arch/x86/kernel/x8664_ksyms_64.c
23908@@ -34,8 +34,6 @@ EXPORT_SYMBOL(copy_user_generic_string);
23909 EXPORT_SYMBOL(copy_user_generic_unrolled);
23910 EXPORT_SYMBOL(copy_user_enhanced_fast_string);
23911 EXPORT_SYMBOL(__copy_user_nocache);
23912-EXPORT_SYMBOL(_copy_from_user);
23913-EXPORT_SYMBOL(_copy_to_user);
23914
23915 EXPORT_SYMBOL(copy_page);
23916 EXPORT_SYMBOL(clear_page);
23917diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c
23918index 45a14db..075bb9b 100644
23919--- a/arch/x86/kernel/x86_init.c
23920+++ b/arch/x86/kernel/x86_init.c
23921@@ -85,7 +85,7 @@ struct x86_init_ops x86_init __initdata = {
23922 },
23923 };
23924
23925-struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23926+struct x86_cpuinit_ops x86_cpuinit __cpuinitconst = {
23927 .early_percpu_clock_init = x86_init_noop,
23928 .setup_percpu_clockev = setup_secondary_APIC_clock,
23929 };
23930@@ -93,7 +93,7 @@ struct x86_cpuinit_ops x86_cpuinit __cpuinitdata = {
23931 static void default_nmi_init(void) { };
23932 static int default_i8042_detect(void) { return 1; };
23933
23934-struct x86_platform_ops x86_platform = {
23935+struct x86_platform_ops x86_platform __read_only = {
23936 .calibrate_tsc = native_calibrate_tsc,
23937 .get_wallclock = mach_get_cmos_time,
23938 .set_wallclock = mach_set_rtc_mmss,
23939@@ -107,7 +107,7 @@ struct x86_platform_ops x86_platform = {
23940 };
23941
23942 EXPORT_SYMBOL_GPL(x86_platform);
23943-struct x86_msi_ops x86_msi = {
23944+struct x86_msi_ops x86_msi __read_only = {
23945 .setup_msi_irqs = native_setup_msi_irqs,
23946 .compose_msi_msg = native_compose_msi_msg,
23947 .teardown_msi_irq = native_teardown_msi_irq,
23948@@ -116,7 +116,7 @@ struct x86_msi_ops x86_msi = {
23949 .setup_hpet_msi = default_setup_hpet_msi,
23950 };
23951
23952-struct x86_io_apic_ops x86_io_apic_ops = {
23953+struct x86_io_apic_ops x86_io_apic_ops __read_only = {
23954 .init = native_io_apic_init_mappings,
23955 .read = native_io_apic_read,
23956 .write = native_io_apic_write,
23957diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
23958index ada87a3..afea76d 100644
23959--- a/arch/x86/kernel/xsave.c
23960+++ b/arch/x86/kernel/xsave.c
23961@@ -199,6 +199,7 @@ static inline int save_user_xstate(struct xsave_struct __user *buf)
23962 {
23963 int err;
23964
23965+ buf = (struct xsave_struct __user *)____m(buf);
23966 if (use_xsave())
23967 err = xsave_user(buf);
23968 else if (use_fxsr())
23969@@ -311,6 +312,7 @@ sanitize_restored_xstate(struct task_struct *tsk,
23970 */
23971 static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
23972 {
23973+ buf = (void __user *)____m(buf);
23974 if (use_xsave()) {
23975 if ((unsigned long)buf % 64 || fx_only) {
23976 u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
23977diff --git a/arch/x86/kvm/cpuid.c b/arch/x86/kvm/cpuid.c
23978index a20ecb5..d0e2194 100644
23979--- a/arch/x86/kvm/cpuid.c
23980+++ b/arch/x86/kvm/cpuid.c
23981@@ -124,15 +124,20 @@ int kvm_vcpu_ioctl_set_cpuid2(struct kvm_vcpu *vcpu,
23982 struct kvm_cpuid2 *cpuid,
23983 struct kvm_cpuid_entry2 __user *entries)
23984 {
23985- int r;
23986+ int r, i;
23987
23988 r = -E2BIG;
23989 if (cpuid->nent > KVM_MAX_CPUID_ENTRIES)
23990 goto out;
23991 r = -EFAULT;
23992- if (copy_from_user(&vcpu->arch.cpuid_entries, entries,
23993- cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23994+ if (!access_ok(VERIFY_READ, entries, cpuid->nent * sizeof(struct kvm_cpuid_entry2)))
23995 goto out;
23996+ for (i = 0; i < cpuid->nent; ++i) {
23997+ struct kvm_cpuid_entry2 cpuid_entry;
23998+ if (__copy_from_user(&cpuid_entry, entries + i, sizeof(cpuid_entry)))
23999+ goto out;
24000+ vcpu->arch.cpuid_entries[i] = cpuid_entry;
24001+ }
24002 vcpu->arch.cpuid_nent = cpuid->nent;
24003 kvm_apic_set_version(vcpu);
24004 kvm_x86_ops->cpuid_update(vcpu);
24005@@ -147,15 +152,19 @@ int kvm_vcpu_ioctl_get_cpuid2(struct kvm_vcpu *vcpu,
24006 struct kvm_cpuid2 *cpuid,
24007 struct kvm_cpuid_entry2 __user *entries)
24008 {
24009- int r;
24010+ int r, i;
24011
24012 r = -E2BIG;
24013 if (cpuid->nent < vcpu->arch.cpuid_nent)
24014 goto out;
24015 r = -EFAULT;
24016- if (copy_to_user(entries, &vcpu->arch.cpuid_entries,
24017- vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24018+ if (!access_ok(VERIFY_WRITE, entries, vcpu->arch.cpuid_nent * sizeof(struct kvm_cpuid_entry2)))
24019 goto out;
24020+ for (i = 0; i < vcpu->arch.cpuid_nent; ++i) {
24021+ struct kvm_cpuid_entry2 cpuid_entry = vcpu->arch.cpuid_entries[i];
24022+ if (__copy_to_user(entries + i, &cpuid_entry, sizeof(cpuid_entry)))
24023+ goto out;
24024+ }
24025 return 0;
24026
24027 out:
24028diff --git a/arch/x86/kvm/emulate.c b/arch/x86/kvm/emulate.c
24029index 5953dce..f11a7d2 100644
24030--- a/arch/x86/kvm/emulate.c
24031+++ b/arch/x86/kvm/emulate.c
24032@@ -329,6 +329,7 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24033
24034 #define ____emulate_2op(ctxt, _op, _x, _y, _suffix, _dsttype) \
24035 do { \
24036+ unsigned long _tmp; \
24037 __asm__ __volatile__ ( \
24038 _PRE_EFLAGS("0", "4", "2") \
24039 _op _suffix " %"_x"3,%1; " \
24040@@ -343,8 +344,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24041 /* Raw emulation: instruction has two explicit operands. */
24042 #define __emulate_2op_nobyte(ctxt,_op,_wx,_wy,_lx,_ly,_qx,_qy) \
24043 do { \
24044- unsigned long _tmp; \
24045- \
24046 switch ((ctxt)->dst.bytes) { \
24047 case 2: \
24048 ____emulate_2op(ctxt,_op,_wx,_wy,"w",u16); \
24049@@ -360,7 +359,6 @@ static void invalidate_registers(struct x86_emulate_ctxt *ctxt)
24050
24051 #define __emulate_2op(ctxt,_op,_bx,_by,_wx,_wy,_lx,_ly,_qx,_qy) \
24052 do { \
24053- unsigned long _tmp; \
24054 switch ((ctxt)->dst.bytes) { \
24055 case 1: \
24056 ____emulate_2op(ctxt,_op,_bx,_by,"b",u8); \
24057diff --git a/arch/x86/kvm/lapic.c b/arch/x86/kvm/lapic.c
24058index 0eee2c8..94a32c3 100644
24059--- a/arch/x86/kvm/lapic.c
24060+++ b/arch/x86/kvm/lapic.c
24061@@ -55,7 +55,7 @@
24062 #define APIC_BUS_CYCLE_NS 1
24063
24064 /* #define apic_debug(fmt,arg...) printk(KERN_WARNING fmt,##arg) */
24065-#define apic_debug(fmt, arg...)
24066+#define apic_debug(fmt, arg...) do {} while (0)
24067
24068 #define APIC_LVT_NUM 6
24069 /* 14 is the version for Xeon and Pentium 8.4.8*/
24070diff --git a/arch/x86/kvm/paging_tmpl.h b/arch/x86/kvm/paging_tmpl.h
24071index da20860..d19fdf5 100644
24072--- a/arch/x86/kvm/paging_tmpl.h
24073+++ b/arch/x86/kvm/paging_tmpl.h
24074@@ -208,7 +208,7 @@ retry_walk:
24075 if (unlikely(kvm_is_error_hva(host_addr)))
24076 goto error;
24077
24078- ptep_user = (pt_element_t __user *)((void *)host_addr + offset);
24079+ ptep_user = (pt_element_t __force_user *)((void *)host_addr + offset);
24080 if (unlikely(__copy_from_user(&pte, ptep_user, sizeof(pte))))
24081 goto error;
24082 walker->ptep_user[walker->level - 1] = ptep_user;
24083diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
24084index a14a6ea..dc86cf0 100644
24085--- a/arch/x86/kvm/svm.c
24086+++ b/arch/x86/kvm/svm.c
24087@@ -3493,7 +3493,11 @@ static void reload_tss(struct kvm_vcpu *vcpu)
24088 int cpu = raw_smp_processor_id();
24089
24090 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
24091+
24092+ pax_open_kernel();
24093 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
24094+ pax_close_kernel();
24095+
24096 load_TR_desc();
24097 }
24098
24099@@ -3894,6 +3898,10 @@ static void svm_vcpu_run(struct kvm_vcpu *vcpu)
24100 #endif
24101 #endif
24102
24103+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
24104+ __set_fs(current_thread_info()->addr_limit);
24105+#endif
24106+
24107 reload_tss(vcpu);
24108
24109 local_irq_disable();
24110diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
24111index 5402c94..c3bdeee 100644
24112--- a/arch/x86/kvm/vmx.c
24113+++ b/arch/x86/kvm/vmx.c
24114@@ -1311,12 +1311,12 @@ static void vmcs_write64(unsigned long field, u64 value)
24115 #endif
24116 }
24117
24118-static void vmcs_clear_bits(unsigned long field, u32 mask)
24119+static void vmcs_clear_bits(unsigned long field, unsigned long mask)
24120 {
24121 vmcs_writel(field, vmcs_readl(field) & ~mask);
24122 }
24123
24124-static void vmcs_set_bits(unsigned long field, u32 mask)
24125+static void vmcs_set_bits(unsigned long field, unsigned long mask)
24126 {
24127 vmcs_writel(field, vmcs_readl(field) | mask);
24128 }
24129@@ -1517,7 +1517,11 @@ static void reload_tss(void)
24130 struct desc_struct *descs;
24131
24132 descs = (void *)gdt->address;
24133+
24134+ pax_open_kernel();
24135 descs[GDT_ENTRY_TSS].type = 9; /* available TSS */
24136+ pax_close_kernel();
24137+
24138 load_TR_desc();
24139 }
24140
24141@@ -1741,6 +1745,10 @@ static void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
24142 vmcs_writel(HOST_TR_BASE, kvm_read_tr_base()); /* 22.2.4 */
24143 vmcs_writel(HOST_GDTR_BASE, gdt->address); /* 22.2.4 */
24144
24145+#ifdef CONFIG_PAX_PER_CPU_PGD
24146+ vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24147+#endif
24148+
24149 rdmsrl(MSR_IA32_SYSENTER_ESP, sysenter_esp);
24150 vmcs_writel(HOST_IA32_SYSENTER_ESP, sysenter_esp); /* 22.2.3 */
24151 vmx->loaded_vmcs->cpu = cpu;
24152@@ -2935,8 +2943,11 @@ static __init int hardware_setup(void)
24153 if (!cpu_has_vmx_flexpriority())
24154 flexpriority_enabled = 0;
24155
24156- if (!cpu_has_vmx_tpr_shadow())
24157- kvm_x86_ops->update_cr8_intercept = NULL;
24158+ if (!cpu_has_vmx_tpr_shadow()) {
24159+ pax_open_kernel();
24160+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24161+ pax_close_kernel();
24162+ }
24163
24164 if (enable_ept && !cpu_has_vmx_ept_2m_page())
24165 kvm_disable_largepages();
24166@@ -2947,13 +2958,15 @@ static __init int hardware_setup(void)
24167 if (!cpu_has_vmx_apicv())
24168 enable_apicv = 0;
24169
24170+ pax_open_kernel();
24171 if (enable_apicv)
24172- kvm_x86_ops->update_cr8_intercept = NULL;
24173+ *(void **)&kvm_x86_ops->update_cr8_intercept = NULL;
24174 else {
24175- kvm_x86_ops->hwapic_irr_update = NULL;
24176- kvm_x86_ops->deliver_posted_interrupt = NULL;
24177- kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
24178+ *(void **)&kvm_x86_ops->hwapic_irr_update = NULL;
24179+ *(void **)&kvm_x86_ops->deliver_posted_interrupt = NULL;
24180+ *(void **)&kvm_x86_ops->sync_pir_to_irr = vmx_sync_pir_to_irr_dummy;
24181 }
24182+ pax_close_kernel();
24183
24184 if (nested)
24185 nested_vmx_setup_ctls_msrs();
24186@@ -4076,7 +4089,10 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
24187
24188 vmcs_writel(HOST_CR0, read_cr0() & ~X86_CR0_TS); /* 22.2.3 */
24189 vmcs_writel(HOST_CR4, read_cr4()); /* 22.2.3, 22.2.5 */
24190+
24191+#ifndef CONFIG_PAX_PER_CPU_PGD
24192 vmcs_writel(HOST_CR3, read_cr3()); /* 22.2.3 FIXME: shadow tables */
24193+#endif
24194
24195 vmcs_write16(HOST_CS_SELECTOR, __KERNEL_CS); /* 22.2.4 */
24196 #ifdef CONFIG_X86_64
24197@@ -4098,7 +4114,7 @@ static void vmx_set_constant_host_state(struct vcpu_vmx *vmx)
24198 vmcs_writel(HOST_IDTR_BASE, dt.address); /* 22.2.4 */
24199 vmx->host_idt_base = dt.address;
24200
24201- vmcs_writel(HOST_RIP, vmx_return); /* 22.2.5 */
24202+ vmcs_writel(HOST_RIP, ktla_ktva(vmx_return)); /* 22.2.5 */
24203
24204 rdmsr(MSR_IA32_SYSENTER_CS, low32, high32);
24205 vmcs_write32(HOST_IA32_SYSENTER_CS, low32);
24206@@ -7030,6 +7046,12 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24207 "jmp 2f \n\t"
24208 "1: " __ex(ASM_VMX_VMRESUME) "\n\t"
24209 "2: "
24210+
24211+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24212+ "ljmp %[cs],$3f\n\t"
24213+ "3: "
24214+#endif
24215+
24216 /* Save guest registers, load host registers, keep flags */
24217 "mov %0, %c[wordsize](%%" _ASM_SP ") \n\t"
24218 "pop %0 \n\t"
24219@@ -7082,6 +7104,11 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24220 #endif
24221 [cr2]"i"(offsetof(struct vcpu_vmx, vcpu.arch.cr2)),
24222 [wordsize]"i"(sizeof(ulong))
24223+
24224+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
24225+ ,[cs]"i"(__KERNEL_CS)
24226+#endif
24227+
24228 : "cc", "memory"
24229 #ifdef CONFIG_X86_64
24230 , "rax", "rbx", "rdi", "rsi"
24231@@ -7095,7 +7122,7 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24232 if (debugctlmsr)
24233 update_debugctlmsr(debugctlmsr);
24234
24235-#ifndef CONFIG_X86_64
24236+#ifdef CONFIG_X86_32
24237 /*
24238 * The sysexit path does not restore ds/es, so we must set them to
24239 * a reasonable value ourselves.
24240@@ -7104,8 +7131,18 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
24241 * may be executed in interrupt context, which saves and restore segments
24242 * around it, nullifying its effect.
24243 */
24244- loadsegment(ds, __USER_DS);
24245- loadsegment(es, __USER_DS);
24246+ loadsegment(ds, __KERNEL_DS);
24247+ loadsegment(es, __KERNEL_DS);
24248+ loadsegment(ss, __KERNEL_DS);
24249+
24250+#ifdef CONFIG_PAX_KERNEXEC
24251+ loadsegment(fs, __KERNEL_PERCPU);
24252+#endif
24253+
24254+#ifdef CONFIG_PAX_MEMORY_UDEREF
24255+ __set_fs(current_thread_info()->addr_limit);
24256+#endif
24257+
24258 #endif
24259
24260 vcpu->arch.regs_avail = ~((1 << VCPU_REGS_RIP) | (1 << VCPU_REGS_RSP)
24261diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
24262index e8ba99c..ee9d7d9 100644
24263--- a/arch/x86/kvm/x86.c
24264+++ b/arch/x86/kvm/x86.c
24265@@ -1725,8 +1725,8 @@ static int xen_hvm_config(struct kvm_vcpu *vcpu, u64 data)
24266 {
24267 struct kvm *kvm = vcpu->kvm;
24268 int lm = is_long_mode(vcpu);
24269- u8 *blob_addr = lm ? (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24270- : (u8 *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24271+ u8 __user *blob_addr = lm ? (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_64
24272+ : (u8 __user *)(long)kvm->arch.xen_hvm_config.blob_addr_32;
24273 u8 blob_size = lm ? kvm->arch.xen_hvm_config.blob_size_64
24274 : kvm->arch.xen_hvm_config.blob_size_32;
24275 u32 page_num = data & ~PAGE_MASK;
24276@@ -2609,6 +2609,8 @@ long kvm_arch_dev_ioctl(struct file *filp,
24277 if (n < msr_list.nmsrs)
24278 goto out;
24279 r = -EFAULT;
24280+ if (num_msrs_to_save > ARRAY_SIZE(msrs_to_save))
24281+ goto out;
24282 if (copy_to_user(user_msr_list->indices, &msrs_to_save,
24283 num_msrs_to_save * sizeof(u32)))
24284 goto out;
24285@@ -5297,7 +5299,7 @@ static struct notifier_block pvclock_gtod_notifier = {
24286 };
24287 #endif
24288
24289-int kvm_arch_init(void *opaque)
24290+int kvm_arch_init(const void *opaque)
24291 {
24292 int r;
24293 struct kvm_x86_ops *ops = (struct kvm_x86_ops *)opaque;
24294diff --git a/arch/x86/lguest/boot.c b/arch/x86/lguest/boot.c
24295index 7114c63..a1018fc 100644
24296--- a/arch/x86/lguest/boot.c
24297+++ b/arch/x86/lguest/boot.c
24298@@ -1201,9 +1201,10 @@ static __init int early_put_chars(u32 vtermno, const char *buf, int count)
24299 * Rebooting also tells the Host we're finished, but the RESTART flag tells the
24300 * Launcher to reboot us.
24301 */
24302-static void lguest_restart(char *reason)
24303+static __noreturn void lguest_restart(char *reason)
24304 {
24305 hcall(LHCALL_SHUTDOWN, __pa(reason), LGUEST_SHUTDOWN_RESTART, 0, 0);
24306+ BUG();
24307 }
24308
24309 /*G:050
24310diff --git a/arch/x86/lib/atomic64_386_32.S b/arch/x86/lib/atomic64_386_32.S
24311index 00933d5..3a64af9 100644
24312--- a/arch/x86/lib/atomic64_386_32.S
24313+++ b/arch/x86/lib/atomic64_386_32.S
24314@@ -48,6 +48,10 @@ BEGIN(read)
24315 movl (v), %eax
24316 movl 4(v), %edx
24317 RET_ENDP
24318+BEGIN(read_unchecked)
24319+ movl (v), %eax
24320+ movl 4(v), %edx
24321+RET_ENDP
24322 #undef v
24323
24324 #define v %esi
24325@@ -55,6 +59,10 @@ BEGIN(set)
24326 movl %ebx, (v)
24327 movl %ecx, 4(v)
24328 RET_ENDP
24329+BEGIN(set_unchecked)
24330+ movl %ebx, (v)
24331+ movl %ecx, 4(v)
24332+RET_ENDP
24333 #undef v
24334
24335 #define v %esi
24336@@ -70,6 +78,20 @@ RET_ENDP
24337 BEGIN(add)
24338 addl %eax, (v)
24339 adcl %edx, 4(v)
24340+
24341+#ifdef CONFIG_PAX_REFCOUNT
24342+ jno 0f
24343+ subl %eax, (v)
24344+ sbbl %edx, 4(v)
24345+ int $4
24346+0:
24347+ _ASM_EXTABLE(0b, 0b)
24348+#endif
24349+
24350+RET_ENDP
24351+BEGIN(add_unchecked)
24352+ addl %eax, (v)
24353+ adcl %edx, 4(v)
24354 RET_ENDP
24355 #undef v
24356
24357@@ -77,6 +99,24 @@ RET_ENDP
24358 BEGIN(add_return)
24359 addl (v), %eax
24360 adcl 4(v), %edx
24361+
24362+#ifdef CONFIG_PAX_REFCOUNT
24363+ into
24364+1234:
24365+ _ASM_EXTABLE(1234b, 2f)
24366+#endif
24367+
24368+ movl %eax, (v)
24369+ movl %edx, 4(v)
24370+
24371+#ifdef CONFIG_PAX_REFCOUNT
24372+2:
24373+#endif
24374+
24375+RET_ENDP
24376+BEGIN(add_return_unchecked)
24377+ addl (v), %eax
24378+ adcl 4(v), %edx
24379 movl %eax, (v)
24380 movl %edx, 4(v)
24381 RET_ENDP
24382@@ -86,6 +126,20 @@ RET_ENDP
24383 BEGIN(sub)
24384 subl %eax, (v)
24385 sbbl %edx, 4(v)
24386+
24387+#ifdef CONFIG_PAX_REFCOUNT
24388+ jno 0f
24389+ addl %eax, (v)
24390+ adcl %edx, 4(v)
24391+ int $4
24392+0:
24393+ _ASM_EXTABLE(0b, 0b)
24394+#endif
24395+
24396+RET_ENDP
24397+BEGIN(sub_unchecked)
24398+ subl %eax, (v)
24399+ sbbl %edx, 4(v)
24400 RET_ENDP
24401 #undef v
24402
24403@@ -96,6 +150,27 @@ BEGIN(sub_return)
24404 sbbl $0, %edx
24405 addl (v), %eax
24406 adcl 4(v), %edx
24407+
24408+#ifdef CONFIG_PAX_REFCOUNT
24409+ into
24410+1234:
24411+ _ASM_EXTABLE(1234b, 2f)
24412+#endif
24413+
24414+ movl %eax, (v)
24415+ movl %edx, 4(v)
24416+
24417+#ifdef CONFIG_PAX_REFCOUNT
24418+2:
24419+#endif
24420+
24421+RET_ENDP
24422+BEGIN(sub_return_unchecked)
24423+ negl %edx
24424+ negl %eax
24425+ sbbl $0, %edx
24426+ addl (v), %eax
24427+ adcl 4(v), %edx
24428 movl %eax, (v)
24429 movl %edx, 4(v)
24430 RET_ENDP
24431@@ -105,6 +180,20 @@ RET_ENDP
24432 BEGIN(inc)
24433 addl $1, (v)
24434 adcl $0, 4(v)
24435+
24436+#ifdef CONFIG_PAX_REFCOUNT
24437+ jno 0f
24438+ subl $1, (v)
24439+ sbbl $0, 4(v)
24440+ int $4
24441+0:
24442+ _ASM_EXTABLE(0b, 0b)
24443+#endif
24444+
24445+RET_ENDP
24446+BEGIN(inc_unchecked)
24447+ addl $1, (v)
24448+ adcl $0, 4(v)
24449 RET_ENDP
24450 #undef v
24451
24452@@ -114,6 +203,26 @@ BEGIN(inc_return)
24453 movl 4(v), %edx
24454 addl $1, %eax
24455 adcl $0, %edx
24456+
24457+#ifdef CONFIG_PAX_REFCOUNT
24458+ into
24459+1234:
24460+ _ASM_EXTABLE(1234b, 2f)
24461+#endif
24462+
24463+ movl %eax, (v)
24464+ movl %edx, 4(v)
24465+
24466+#ifdef CONFIG_PAX_REFCOUNT
24467+2:
24468+#endif
24469+
24470+RET_ENDP
24471+BEGIN(inc_return_unchecked)
24472+ movl (v), %eax
24473+ movl 4(v), %edx
24474+ addl $1, %eax
24475+ adcl $0, %edx
24476 movl %eax, (v)
24477 movl %edx, 4(v)
24478 RET_ENDP
24479@@ -123,6 +232,20 @@ RET_ENDP
24480 BEGIN(dec)
24481 subl $1, (v)
24482 sbbl $0, 4(v)
24483+
24484+#ifdef CONFIG_PAX_REFCOUNT
24485+ jno 0f
24486+ addl $1, (v)
24487+ adcl $0, 4(v)
24488+ int $4
24489+0:
24490+ _ASM_EXTABLE(0b, 0b)
24491+#endif
24492+
24493+RET_ENDP
24494+BEGIN(dec_unchecked)
24495+ subl $1, (v)
24496+ sbbl $0, 4(v)
24497 RET_ENDP
24498 #undef v
24499
24500@@ -132,6 +255,26 @@ BEGIN(dec_return)
24501 movl 4(v), %edx
24502 subl $1, %eax
24503 sbbl $0, %edx
24504+
24505+#ifdef CONFIG_PAX_REFCOUNT
24506+ into
24507+1234:
24508+ _ASM_EXTABLE(1234b, 2f)
24509+#endif
24510+
24511+ movl %eax, (v)
24512+ movl %edx, 4(v)
24513+
24514+#ifdef CONFIG_PAX_REFCOUNT
24515+2:
24516+#endif
24517+
24518+RET_ENDP
24519+BEGIN(dec_return_unchecked)
24520+ movl (v), %eax
24521+ movl 4(v), %edx
24522+ subl $1, %eax
24523+ sbbl $0, %edx
24524 movl %eax, (v)
24525 movl %edx, 4(v)
24526 RET_ENDP
24527@@ -143,6 +286,13 @@ BEGIN(add_unless)
24528 adcl %edx, %edi
24529 addl (v), %eax
24530 adcl 4(v), %edx
24531+
24532+#ifdef CONFIG_PAX_REFCOUNT
24533+ into
24534+1234:
24535+ _ASM_EXTABLE(1234b, 2f)
24536+#endif
24537+
24538 cmpl %eax, %ecx
24539 je 3f
24540 1:
24541@@ -168,6 +318,13 @@ BEGIN(inc_not_zero)
24542 1:
24543 addl $1, %eax
24544 adcl $0, %edx
24545+
24546+#ifdef CONFIG_PAX_REFCOUNT
24547+ into
24548+1234:
24549+ _ASM_EXTABLE(1234b, 2f)
24550+#endif
24551+
24552 movl %eax, (v)
24553 movl %edx, 4(v)
24554 movl $1, %eax
24555@@ -186,6 +343,13 @@ BEGIN(dec_if_positive)
24556 movl 4(v), %edx
24557 subl $1, %eax
24558 sbbl $0, %edx
24559+
24560+#ifdef CONFIG_PAX_REFCOUNT
24561+ into
24562+1234:
24563+ _ASM_EXTABLE(1234b, 1f)
24564+#endif
24565+
24566 js 1f
24567 movl %eax, (v)
24568 movl %edx, 4(v)
24569diff --git a/arch/x86/lib/atomic64_cx8_32.S b/arch/x86/lib/atomic64_cx8_32.S
24570index f5cc9eb..51fa319 100644
24571--- a/arch/x86/lib/atomic64_cx8_32.S
24572+++ b/arch/x86/lib/atomic64_cx8_32.S
24573@@ -35,10 +35,20 @@ ENTRY(atomic64_read_cx8)
24574 CFI_STARTPROC
24575
24576 read64 %ecx
24577+ pax_force_retaddr
24578 ret
24579 CFI_ENDPROC
24580 ENDPROC(atomic64_read_cx8)
24581
24582+ENTRY(atomic64_read_unchecked_cx8)
24583+ CFI_STARTPROC
24584+
24585+ read64 %ecx
24586+ pax_force_retaddr
24587+ ret
24588+ CFI_ENDPROC
24589+ENDPROC(atomic64_read_unchecked_cx8)
24590+
24591 ENTRY(atomic64_set_cx8)
24592 CFI_STARTPROC
24593
24594@@ -48,10 +58,25 @@ ENTRY(atomic64_set_cx8)
24595 cmpxchg8b (%esi)
24596 jne 1b
24597
24598+ pax_force_retaddr
24599 ret
24600 CFI_ENDPROC
24601 ENDPROC(atomic64_set_cx8)
24602
24603+ENTRY(atomic64_set_unchecked_cx8)
24604+ CFI_STARTPROC
24605+
24606+1:
24607+/* we don't need LOCK_PREFIX since aligned 64-bit writes
24608+ * are atomic on 586 and newer */
24609+ cmpxchg8b (%esi)
24610+ jne 1b
24611+
24612+ pax_force_retaddr
24613+ ret
24614+ CFI_ENDPROC
24615+ENDPROC(atomic64_set_unchecked_cx8)
24616+
24617 ENTRY(atomic64_xchg_cx8)
24618 CFI_STARTPROC
24619
24620@@ -60,12 +85,13 @@ ENTRY(atomic64_xchg_cx8)
24621 cmpxchg8b (%esi)
24622 jne 1b
24623
24624+ pax_force_retaddr
24625 ret
24626 CFI_ENDPROC
24627 ENDPROC(atomic64_xchg_cx8)
24628
24629-.macro addsub_return func ins insc
24630-ENTRY(atomic64_\func\()_return_cx8)
24631+.macro addsub_return func ins insc unchecked=""
24632+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24633 CFI_STARTPROC
24634 SAVE ebp
24635 SAVE ebx
24636@@ -82,27 +108,44 @@ ENTRY(atomic64_\func\()_return_cx8)
24637 movl %edx, %ecx
24638 \ins\()l %esi, %ebx
24639 \insc\()l %edi, %ecx
24640+
24641+.ifb \unchecked
24642+#ifdef CONFIG_PAX_REFCOUNT
24643+ into
24644+2:
24645+ _ASM_EXTABLE(2b, 3f)
24646+#endif
24647+.endif
24648+
24649 LOCK_PREFIX
24650 cmpxchg8b (%ebp)
24651 jne 1b
24652-
24653-10:
24654 movl %ebx, %eax
24655 movl %ecx, %edx
24656+
24657+.ifb \unchecked
24658+#ifdef CONFIG_PAX_REFCOUNT
24659+3:
24660+#endif
24661+.endif
24662+
24663 RESTORE edi
24664 RESTORE esi
24665 RESTORE ebx
24666 RESTORE ebp
24667+ pax_force_retaddr
24668 ret
24669 CFI_ENDPROC
24670-ENDPROC(atomic64_\func\()_return_cx8)
24671+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24672 .endm
24673
24674 addsub_return add add adc
24675 addsub_return sub sub sbb
24676+addsub_return add add adc _unchecked
24677+addsub_return sub sub sbb _unchecked
24678
24679-.macro incdec_return func ins insc
24680-ENTRY(atomic64_\func\()_return_cx8)
24681+.macro incdec_return func ins insc unchecked=""
24682+ENTRY(atomic64_\func\()_return\unchecked\()_cx8)
24683 CFI_STARTPROC
24684 SAVE ebx
24685
24686@@ -112,21 +155,39 @@ ENTRY(atomic64_\func\()_return_cx8)
24687 movl %edx, %ecx
24688 \ins\()l $1, %ebx
24689 \insc\()l $0, %ecx
24690+
24691+.ifb \unchecked
24692+#ifdef CONFIG_PAX_REFCOUNT
24693+ into
24694+2:
24695+ _ASM_EXTABLE(2b, 3f)
24696+#endif
24697+.endif
24698+
24699 LOCK_PREFIX
24700 cmpxchg8b (%esi)
24701 jne 1b
24702
24703-10:
24704 movl %ebx, %eax
24705 movl %ecx, %edx
24706+
24707+.ifb \unchecked
24708+#ifdef CONFIG_PAX_REFCOUNT
24709+3:
24710+#endif
24711+.endif
24712+
24713 RESTORE ebx
24714+ pax_force_retaddr
24715 ret
24716 CFI_ENDPROC
24717-ENDPROC(atomic64_\func\()_return_cx8)
24718+ENDPROC(atomic64_\func\()_return\unchecked\()_cx8)
24719 .endm
24720
24721 incdec_return inc add adc
24722 incdec_return dec sub sbb
24723+incdec_return inc add adc _unchecked
24724+incdec_return dec sub sbb _unchecked
24725
24726 ENTRY(atomic64_dec_if_positive_cx8)
24727 CFI_STARTPROC
24728@@ -138,6 +199,13 @@ ENTRY(atomic64_dec_if_positive_cx8)
24729 movl %edx, %ecx
24730 subl $1, %ebx
24731 sbb $0, %ecx
24732+
24733+#ifdef CONFIG_PAX_REFCOUNT
24734+ into
24735+1234:
24736+ _ASM_EXTABLE(1234b, 2f)
24737+#endif
24738+
24739 js 2f
24740 LOCK_PREFIX
24741 cmpxchg8b (%esi)
24742@@ -147,6 +215,7 @@ ENTRY(atomic64_dec_if_positive_cx8)
24743 movl %ebx, %eax
24744 movl %ecx, %edx
24745 RESTORE ebx
24746+ pax_force_retaddr
24747 ret
24748 CFI_ENDPROC
24749 ENDPROC(atomic64_dec_if_positive_cx8)
24750@@ -171,6 +240,13 @@ ENTRY(atomic64_add_unless_cx8)
24751 movl %edx, %ecx
24752 addl %ebp, %ebx
24753 adcl %edi, %ecx
24754+
24755+#ifdef CONFIG_PAX_REFCOUNT
24756+ into
24757+1234:
24758+ _ASM_EXTABLE(1234b, 3f)
24759+#endif
24760+
24761 LOCK_PREFIX
24762 cmpxchg8b (%esi)
24763 jne 1b
24764@@ -181,6 +257,7 @@ ENTRY(atomic64_add_unless_cx8)
24765 CFI_ADJUST_CFA_OFFSET -8
24766 RESTORE ebx
24767 RESTORE ebp
24768+ pax_force_retaddr
24769 ret
24770 4:
24771 cmpl %edx, 4(%esp)
24772@@ -203,6 +280,13 @@ ENTRY(atomic64_inc_not_zero_cx8)
24773 xorl %ecx, %ecx
24774 addl $1, %ebx
24775 adcl %edx, %ecx
24776+
24777+#ifdef CONFIG_PAX_REFCOUNT
24778+ into
24779+1234:
24780+ _ASM_EXTABLE(1234b, 3f)
24781+#endif
24782+
24783 LOCK_PREFIX
24784 cmpxchg8b (%esi)
24785 jne 1b
24786@@ -210,6 +294,7 @@ ENTRY(atomic64_inc_not_zero_cx8)
24787 movl $1, %eax
24788 3:
24789 RESTORE ebx
24790+ pax_force_retaddr
24791 ret
24792 CFI_ENDPROC
24793 ENDPROC(atomic64_inc_not_zero_cx8)
24794diff --git a/arch/x86/lib/checksum_32.S b/arch/x86/lib/checksum_32.S
24795index e78b8ee..7e173a8 100644
24796--- a/arch/x86/lib/checksum_32.S
24797+++ b/arch/x86/lib/checksum_32.S
24798@@ -29,7 +29,8 @@
24799 #include <asm/dwarf2.h>
24800 #include <asm/errno.h>
24801 #include <asm/asm.h>
24802-
24803+#include <asm/segment.h>
24804+
24805 /*
24806 * computes a partial checksum, e.g. for TCP/UDP fragments
24807 */
24808@@ -293,9 +294,24 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
24809
24810 #define ARGBASE 16
24811 #define FP 12
24812-
24813-ENTRY(csum_partial_copy_generic)
24814+
24815+ENTRY(csum_partial_copy_generic_to_user)
24816 CFI_STARTPROC
24817+
24818+#ifdef CONFIG_PAX_MEMORY_UDEREF
24819+ pushl_cfi %gs
24820+ popl_cfi %es
24821+ jmp csum_partial_copy_generic
24822+#endif
24823+
24824+ENTRY(csum_partial_copy_generic_from_user)
24825+
24826+#ifdef CONFIG_PAX_MEMORY_UDEREF
24827+ pushl_cfi %gs
24828+ popl_cfi %ds
24829+#endif
24830+
24831+ENTRY(csum_partial_copy_generic)
24832 subl $4,%esp
24833 CFI_ADJUST_CFA_OFFSET 4
24834 pushl_cfi %edi
24835@@ -317,7 +333,7 @@ ENTRY(csum_partial_copy_generic)
24836 jmp 4f
24837 SRC(1: movw (%esi), %bx )
24838 addl $2, %esi
24839-DST( movw %bx, (%edi) )
24840+DST( movw %bx, %es:(%edi) )
24841 addl $2, %edi
24842 addw %bx, %ax
24843 adcl $0, %eax
24844@@ -329,30 +345,30 @@ DST( movw %bx, (%edi) )
24845 SRC(1: movl (%esi), %ebx )
24846 SRC( movl 4(%esi), %edx )
24847 adcl %ebx, %eax
24848-DST( movl %ebx, (%edi) )
24849+DST( movl %ebx, %es:(%edi) )
24850 adcl %edx, %eax
24851-DST( movl %edx, 4(%edi) )
24852+DST( movl %edx, %es:4(%edi) )
24853
24854 SRC( movl 8(%esi), %ebx )
24855 SRC( movl 12(%esi), %edx )
24856 adcl %ebx, %eax
24857-DST( movl %ebx, 8(%edi) )
24858+DST( movl %ebx, %es:8(%edi) )
24859 adcl %edx, %eax
24860-DST( movl %edx, 12(%edi) )
24861+DST( movl %edx, %es:12(%edi) )
24862
24863 SRC( movl 16(%esi), %ebx )
24864 SRC( movl 20(%esi), %edx )
24865 adcl %ebx, %eax
24866-DST( movl %ebx, 16(%edi) )
24867+DST( movl %ebx, %es:16(%edi) )
24868 adcl %edx, %eax
24869-DST( movl %edx, 20(%edi) )
24870+DST( movl %edx, %es:20(%edi) )
24871
24872 SRC( movl 24(%esi), %ebx )
24873 SRC( movl 28(%esi), %edx )
24874 adcl %ebx, %eax
24875-DST( movl %ebx, 24(%edi) )
24876+DST( movl %ebx, %es:24(%edi) )
24877 adcl %edx, %eax
24878-DST( movl %edx, 28(%edi) )
24879+DST( movl %edx, %es:28(%edi) )
24880
24881 lea 32(%esi), %esi
24882 lea 32(%edi), %edi
24883@@ -366,7 +382,7 @@ DST( movl %edx, 28(%edi) )
24884 shrl $2, %edx # This clears CF
24885 SRC(3: movl (%esi), %ebx )
24886 adcl %ebx, %eax
24887-DST( movl %ebx, (%edi) )
24888+DST( movl %ebx, %es:(%edi) )
24889 lea 4(%esi), %esi
24890 lea 4(%edi), %edi
24891 dec %edx
24892@@ -378,12 +394,12 @@ DST( movl %ebx, (%edi) )
24893 jb 5f
24894 SRC( movw (%esi), %cx )
24895 leal 2(%esi), %esi
24896-DST( movw %cx, (%edi) )
24897+DST( movw %cx, %es:(%edi) )
24898 leal 2(%edi), %edi
24899 je 6f
24900 shll $16,%ecx
24901 SRC(5: movb (%esi), %cl )
24902-DST( movb %cl, (%edi) )
24903+DST( movb %cl, %es:(%edi) )
24904 6: addl %ecx, %eax
24905 adcl $0, %eax
24906 7:
24907@@ -394,7 +410,7 @@ DST( movb %cl, (%edi) )
24908
24909 6001:
24910 movl ARGBASE+20(%esp), %ebx # src_err_ptr
24911- movl $-EFAULT, (%ebx)
24912+ movl $-EFAULT, %ss:(%ebx)
24913
24914 # zero the complete destination - computing the rest
24915 # is too much work
24916@@ -407,11 +423,15 @@ DST( movb %cl, (%edi) )
24917
24918 6002:
24919 movl ARGBASE+24(%esp), %ebx # dst_err_ptr
24920- movl $-EFAULT,(%ebx)
24921+ movl $-EFAULT,%ss:(%ebx)
24922 jmp 5000b
24923
24924 .previous
24925
24926+ pushl_cfi %ss
24927+ popl_cfi %ds
24928+ pushl_cfi %ss
24929+ popl_cfi %es
24930 popl_cfi %ebx
24931 CFI_RESTORE ebx
24932 popl_cfi %esi
24933@@ -421,26 +441,43 @@ DST( movb %cl, (%edi) )
24934 popl_cfi %ecx # equivalent to addl $4,%esp
24935 ret
24936 CFI_ENDPROC
24937-ENDPROC(csum_partial_copy_generic)
24938+ENDPROC(csum_partial_copy_generic_to_user)
24939
24940 #else
24941
24942 /* Version for PentiumII/PPro */
24943
24944 #define ROUND1(x) \
24945+ nop; nop; nop; \
24946 SRC(movl x(%esi), %ebx ) ; \
24947 addl %ebx, %eax ; \
24948- DST(movl %ebx, x(%edi) ) ;
24949+ DST(movl %ebx, %es:x(%edi)) ;
24950
24951 #define ROUND(x) \
24952+ nop; nop; nop; \
24953 SRC(movl x(%esi), %ebx ) ; \
24954 adcl %ebx, %eax ; \
24955- DST(movl %ebx, x(%edi) ) ;
24956+ DST(movl %ebx, %es:x(%edi)) ;
24957
24958 #define ARGBASE 12
24959-
24960-ENTRY(csum_partial_copy_generic)
24961+
24962+ENTRY(csum_partial_copy_generic_to_user)
24963 CFI_STARTPROC
24964+
24965+#ifdef CONFIG_PAX_MEMORY_UDEREF
24966+ pushl_cfi %gs
24967+ popl_cfi %es
24968+ jmp csum_partial_copy_generic
24969+#endif
24970+
24971+ENTRY(csum_partial_copy_generic_from_user)
24972+
24973+#ifdef CONFIG_PAX_MEMORY_UDEREF
24974+ pushl_cfi %gs
24975+ popl_cfi %ds
24976+#endif
24977+
24978+ENTRY(csum_partial_copy_generic)
24979 pushl_cfi %ebx
24980 CFI_REL_OFFSET ebx, 0
24981 pushl_cfi %edi
24982@@ -461,7 +498,7 @@ ENTRY(csum_partial_copy_generic)
24983 subl %ebx, %edi
24984 lea -1(%esi),%edx
24985 andl $-32,%edx
24986- lea 3f(%ebx,%ebx), %ebx
24987+ lea 3f(%ebx,%ebx,2), %ebx
24988 testl %esi, %esi
24989 jmp *%ebx
24990 1: addl $64,%esi
24991@@ -482,19 +519,19 @@ ENTRY(csum_partial_copy_generic)
24992 jb 5f
24993 SRC( movw (%esi), %dx )
24994 leal 2(%esi), %esi
24995-DST( movw %dx, (%edi) )
24996+DST( movw %dx, %es:(%edi) )
24997 leal 2(%edi), %edi
24998 je 6f
24999 shll $16,%edx
25000 5:
25001 SRC( movb (%esi), %dl )
25002-DST( movb %dl, (%edi) )
25003+DST( movb %dl, %es:(%edi) )
25004 6: addl %edx, %eax
25005 adcl $0, %eax
25006 7:
25007 .section .fixup, "ax"
25008 6001: movl ARGBASE+20(%esp), %ebx # src_err_ptr
25009- movl $-EFAULT, (%ebx)
25010+ movl $-EFAULT, %ss:(%ebx)
25011 # zero the complete destination (computing the rest is too much work)
25012 movl ARGBASE+8(%esp),%edi # dst
25013 movl ARGBASE+12(%esp),%ecx # len
25014@@ -502,10 +539,17 @@ DST( movb %dl, (%edi) )
25015 rep; stosb
25016 jmp 7b
25017 6002: movl ARGBASE+24(%esp), %ebx # dst_err_ptr
25018- movl $-EFAULT, (%ebx)
25019+ movl $-EFAULT, %ss:(%ebx)
25020 jmp 7b
25021 .previous
25022
25023+#ifdef CONFIG_PAX_MEMORY_UDEREF
25024+ pushl_cfi %ss
25025+ popl_cfi %ds
25026+ pushl_cfi %ss
25027+ popl_cfi %es
25028+#endif
25029+
25030 popl_cfi %esi
25031 CFI_RESTORE esi
25032 popl_cfi %edi
25033@@ -514,7 +558,7 @@ DST( movb %dl, (%edi) )
25034 CFI_RESTORE ebx
25035 ret
25036 CFI_ENDPROC
25037-ENDPROC(csum_partial_copy_generic)
25038+ENDPROC(csum_partial_copy_generic_to_user)
25039
25040 #undef ROUND
25041 #undef ROUND1
25042diff --git a/arch/x86/lib/clear_page_64.S b/arch/x86/lib/clear_page_64.S
25043index f2145cf..cea889d 100644
25044--- a/arch/x86/lib/clear_page_64.S
25045+++ b/arch/x86/lib/clear_page_64.S
25046@@ -11,6 +11,7 @@ ENTRY(clear_page_c)
25047 movl $4096/8,%ecx
25048 xorl %eax,%eax
25049 rep stosq
25050+ pax_force_retaddr
25051 ret
25052 CFI_ENDPROC
25053 ENDPROC(clear_page_c)
25054@@ -20,6 +21,7 @@ ENTRY(clear_page_c_e)
25055 movl $4096,%ecx
25056 xorl %eax,%eax
25057 rep stosb
25058+ pax_force_retaddr
25059 ret
25060 CFI_ENDPROC
25061 ENDPROC(clear_page_c_e)
25062@@ -43,6 +45,7 @@ ENTRY(clear_page)
25063 leaq 64(%rdi),%rdi
25064 jnz .Lloop
25065 nop
25066+ pax_force_retaddr
25067 ret
25068 CFI_ENDPROC
25069 .Lclear_page_end:
25070@@ -58,7 +61,7 @@ ENDPROC(clear_page)
25071
25072 #include <asm/cpufeature.h>
25073
25074- .section .altinstr_replacement,"ax"
25075+ .section .altinstr_replacement,"a"
25076 1: .byte 0xeb /* jmp <disp8> */
25077 .byte (clear_page_c - clear_page) - (2f - 1b) /* offset */
25078 2: .byte 0xeb /* jmp <disp8> */
25079diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S
25080index 1e572c5..2a162cd 100644
25081--- a/arch/x86/lib/cmpxchg16b_emu.S
25082+++ b/arch/x86/lib/cmpxchg16b_emu.S
25083@@ -53,11 +53,13 @@ this_cpu_cmpxchg16b_emu:
25084
25085 popf
25086 mov $1, %al
25087+ pax_force_retaddr
25088 ret
25089
25090 not_same:
25091 popf
25092 xor %al,%al
25093+ pax_force_retaddr
25094 ret
25095
25096 CFI_ENDPROC
25097diff --git a/arch/x86/lib/copy_page_64.S b/arch/x86/lib/copy_page_64.S
25098index 176cca6..1166c50 100644
25099--- a/arch/x86/lib/copy_page_64.S
25100+++ b/arch/x86/lib/copy_page_64.S
25101@@ -9,6 +9,7 @@ copy_page_rep:
25102 CFI_STARTPROC
25103 movl $4096/8, %ecx
25104 rep movsq
25105+ pax_force_retaddr
25106 ret
25107 CFI_ENDPROC
25108 ENDPROC(copy_page_rep)
25109@@ -20,12 +21,14 @@ ENDPROC(copy_page_rep)
25110
25111 ENTRY(copy_page)
25112 CFI_STARTPROC
25113- subq $2*8, %rsp
25114- CFI_ADJUST_CFA_OFFSET 2*8
25115+ subq $3*8, %rsp
25116+ CFI_ADJUST_CFA_OFFSET 3*8
25117 movq %rbx, (%rsp)
25118 CFI_REL_OFFSET rbx, 0
25119 movq %r12, 1*8(%rsp)
25120 CFI_REL_OFFSET r12, 1*8
25121+ movq %r13, 2*8(%rsp)
25122+ CFI_REL_OFFSET r13, 2*8
25123
25124 movl $(4096/64)-5, %ecx
25125 .p2align 4
25126@@ -36,7 +39,7 @@ ENTRY(copy_page)
25127 movq 0x8*2(%rsi), %rdx
25128 movq 0x8*3(%rsi), %r8
25129 movq 0x8*4(%rsi), %r9
25130- movq 0x8*5(%rsi), %r10
25131+ movq 0x8*5(%rsi), %r13
25132 movq 0x8*6(%rsi), %r11
25133 movq 0x8*7(%rsi), %r12
25134
25135@@ -47,7 +50,7 @@ ENTRY(copy_page)
25136 movq %rdx, 0x8*2(%rdi)
25137 movq %r8, 0x8*3(%rdi)
25138 movq %r9, 0x8*4(%rdi)
25139- movq %r10, 0x8*5(%rdi)
25140+ movq %r13, 0x8*5(%rdi)
25141 movq %r11, 0x8*6(%rdi)
25142 movq %r12, 0x8*7(%rdi)
25143
25144@@ -66,7 +69,7 @@ ENTRY(copy_page)
25145 movq 0x8*2(%rsi), %rdx
25146 movq 0x8*3(%rsi), %r8
25147 movq 0x8*4(%rsi), %r9
25148- movq 0x8*5(%rsi), %r10
25149+ movq 0x8*5(%rsi), %r13
25150 movq 0x8*6(%rsi), %r11
25151 movq 0x8*7(%rsi), %r12
25152
25153@@ -75,7 +78,7 @@ ENTRY(copy_page)
25154 movq %rdx, 0x8*2(%rdi)
25155 movq %r8, 0x8*3(%rdi)
25156 movq %r9, 0x8*4(%rdi)
25157- movq %r10, 0x8*5(%rdi)
25158+ movq %r13, 0x8*5(%rdi)
25159 movq %r11, 0x8*6(%rdi)
25160 movq %r12, 0x8*7(%rdi)
25161
25162@@ -87,8 +90,11 @@ ENTRY(copy_page)
25163 CFI_RESTORE rbx
25164 movq 1*8(%rsp), %r12
25165 CFI_RESTORE r12
25166- addq $2*8, %rsp
25167- CFI_ADJUST_CFA_OFFSET -2*8
25168+ movq 2*8(%rsp), %r13
25169+ CFI_RESTORE r13
25170+ addq $3*8, %rsp
25171+ CFI_ADJUST_CFA_OFFSET -3*8
25172+ pax_force_retaddr
25173 ret
25174 .Lcopy_page_end:
25175 CFI_ENDPROC
25176@@ -99,7 +105,7 @@ ENDPROC(copy_page)
25177
25178 #include <asm/cpufeature.h>
25179
25180- .section .altinstr_replacement,"ax"
25181+ .section .altinstr_replacement,"a"
25182 1: .byte 0xeb /* jmp <disp8> */
25183 .byte (copy_page_rep - copy_page) - (2f - 1b) /* offset */
25184 2:
25185diff --git a/arch/x86/lib/copy_user_64.S b/arch/x86/lib/copy_user_64.S
25186index a30ca15..d25fab6 100644
25187--- a/arch/x86/lib/copy_user_64.S
25188+++ b/arch/x86/lib/copy_user_64.S
25189@@ -18,6 +18,7 @@
25190 #include <asm/alternative-asm.h>
25191 #include <asm/asm.h>
25192 #include <asm/smap.h>
25193+#include <asm/pgtable.h>
25194
25195 /*
25196 * By placing feature2 after feature1 in altinstructions section, we logically
25197@@ -31,7 +32,7 @@
25198 .byte 0xe9 /* 32bit jump */
25199 .long \orig-1f /* by default jump to orig */
25200 1:
25201- .section .altinstr_replacement,"ax"
25202+ .section .altinstr_replacement,"a"
25203 2: .byte 0xe9 /* near jump with 32bit immediate */
25204 .long \alt1-1b /* offset */ /* or alternatively to alt1 */
25205 3: .byte 0xe9 /* near jump with 32bit immediate */
25206@@ -70,47 +71,20 @@
25207 #endif
25208 .endm
25209
25210-/* Standard copy_to_user with segment limit checking */
25211-ENTRY(_copy_to_user)
25212- CFI_STARTPROC
25213- GET_THREAD_INFO(%rax)
25214- movq %rdi,%rcx
25215- addq %rdx,%rcx
25216- jc bad_to_user
25217- cmpq TI_addr_limit(%rax),%rcx
25218- ja bad_to_user
25219- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25220- copy_user_generic_unrolled,copy_user_generic_string, \
25221- copy_user_enhanced_fast_string
25222- CFI_ENDPROC
25223-ENDPROC(_copy_to_user)
25224-
25225-/* Standard copy_from_user with segment limit checking */
25226-ENTRY(_copy_from_user)
25227- CFI_STARTPROC
25228- GET_THREAD_INFO(%rax)
25229- movq %rsi,%rcx
25230- addq %rdx,%rcx
25231- jc bad_from_user
25232- cmpq TI_addr_limit(%rax),%rcx
25233- ja bad_from_user
25234- ALTERNATIVE_JUMP X86_FEATURE_REP_GOOD,X86_FEATURE_ERMS, \
25235- copy_user_generic_unrolled,copy_user_generic_string, \
25236- copy_user_enhanced_fast_string
25237- CFI_ENDPROC
25238-ENDPROC(_copy_from_user)
25239-
25240 .section .fixup,"ax"
25241 /* must zero dest */
25242 ENTRY(bad_from_user)
25243 bad_from_user:
25244 CFI_STARTPROC
25245+ testl %edx,%edx
25246+ js bad_to_user
25247 movl %edx,%ecx
25248 xorl %eax,%eax
25249 rep
25250 stosb
25251 bad_to_user:
25252 movl %edx,%eax
25253+ pax_force_retaddr
25254 ret
25255 CFI_ENDPROC
25256 ENDPROC(bad_from_user)
25257@@ -141,19 +115,19 @@ ENTRY(copy_user_generic_unrolled)
25258 jz 17f
25259 1: movq (%rsi),%r8
25260 2: movq 1*8(%rsi),%r9
25261-3: movq 2*8(%rsi),%r10
25262+3: movq 2*8(%rsi),%rax
25263 4: movq 3*8(%rsi),%r11
25264 5: movq %r8,(%rdi)
25265 6: movq %r9,1*8(%rdi)
25266-7: movq %r10,2*8(%rdi)
25267+7: movq %rax,2*8(%rdi)
25268 8: movq %r11,3*8(%rdi)
25269 9: movq 4*8(%rsi),%r8
25270 10: movq 5*8(%rsi),%r9
25271-11: movq 6*8(%rsi),%r10
25272+11: movq 6*8(%rsi),%rax
25273 12: movq 7*8(%rsi),%r11
25274 13: movq %r8,4*8(%rdi)
25275 14: movq %r9,5*8(%rdi)
25276-15: movq %r10,6*8(%rdi)
25277+15: movq %rax,6*8(%rdi)
25278 16: movq %r11,7*8(%rdi)
25279 leaq 64(%rsi),%rsi
25280 leaq 64(%rdi),%rdi
25281@@ -180,6 +154,7 @@ ENTRY(copy_user_generic_unrolled)
25282 jnz 21b
25283 23: xor %eax,%eax
25284 ASM_CLAC
25285+ pax_force_retaddr
25286 ret
25287
25288 .section .fixup,"ax"
25289@@ -251,6 +226,7 @@ ENTRY(copy_user_generic_string)
25290 movsb
25291 4: xorl %eax,%eax
25292 ASM_CLAC
25293+ pax_force_retaddr
25294 ret
25295
25296 .section .fixup,"ax"
25297@@ -286,6 +262,7 @@ ENTRY(copy_user_enhanced_fast_string)
25298 movsb
25299 2: xorl %eax,%eax
25300 ASM_CLAC
25301+ pax_force_retaddr
25302 ret
25303
25304 .section .fixup,"ax"
25305diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
25306index 6a4f43c..f08b4a2 100644
25307--- a/arch/x86/lib/copy_user_nocache_64.S
25308+++ b/arch/x86/lib/copy_user_nocache_64.S
25309@@ -8,6 +8,7 @@
25310
25311 #include <linux/linkage.h>
25312 #include <asm/dwarf2.h>
25313+#include <asm/alternative-asm.h>
25314
25315 #define FIX_ALIGNMENT 1
25316
25317@@ -16,6 +17,7 @@
25318 #include <asm/thread_info.h>
25319 #include <asm/asm.h>
25320 #include <asm/smap.h>
25321+#include <asm/pgtable.h>
25322
25323 .macro ALIGN_DESTINATION
25324 #ifdef FIX_ALIGNMENT
25325@@ -49,6 +51,15 @@
25326 */
25327 ENTRY(__copy_user_nocache)
25328 CFI_STARTPROC
25329+
25330+#ifdef CONFIG_PAX_MEMORY_UDEREF
25331+ mov pax_user_shadow_base,%rcx
25332+ cmp %rcx,%rsi
25333+ jae 1f
25334+ add %rcx,%rsi
25335+1:
25336+#endif
25337+
25338 ASM_STAC
25339 cmpl $8,%edx
25340 jb 20f /* less then 8 bytes, go to byte copy loop */
25341@@ -59,19 +70,19 @@ ENTRY(__copy_user_nocache)
25342 jz 17f
25343 1: movq (%rsi),%r8
25344 2: movq 1*8(%rsi),%r9
25345-3: movq 2*8(%rsi),%r10
25346+3: movq 2*8(%rsi),%rax
25347 4: movq 3*8(%rsi),%r11
25348 5: movnti %r8,(%rdi)
25349 6: movnti %r9,1*8(%rdi)
25350-7: movnti %r10,2*8(%rdi)
25351+7: movnti %rax,2*8(%rdi)
25352 8: movnti %r11,3*8(%rdi)
25353 9: movq 4*8(%rsi),%r8
25354 10: movq 5*8(%rsi),%r9
25355-11: movq 6*8(%rsi),%r10
25356+11: movq 6*8(%rsi),%rax
25357 12: movq 7*8(%rsi),%r11
25358 13: movnti %r8,4*8(%rdi)
25359 14: movnti %r9,5*8(%rdi)
25360-15: movnti %r10,6*8(%rdi)
25361+15: movnti %rax,6*8(%rdi)
25362 16: movnti %r11,7*8(%rdi)
25363 leaq 64(%rsi),%rsi
25364 leaq 64(%rdi),%rdi
25365@@ -99,6 +110,7 @@ ENTRY(__copy_user_nocache)
25366 23: xorl %eax,%eax
25367 ASM_CLAC
25368 sfence
25369+ pax_force_retaddr
25370 ret
25371
25372 .section .fixup,"ax"
25373diff --git a/arch/x86/lib/csum-copy_64.S b/arch/x86/lib/csum-copy_64.S
25374index 2419d5f..953ee51 100644
25375--- a/arch/x86/lib/csum-copy_64.S
25376+++ b/arch/x86/lib/csum-copy_64.S
25377@@ -9,6 +9,7 @@
25378 #include <asm/dwarf2.h>
25379 #include <asm/errno.h>
25380 #include <asm/asm.h>
25381+#include <asm/alternative-asm.h>
25382
25383 /*
25384 * Checksum copy with exception handling.
25385@@ -220,6 +221,7 @@ ENTRY(csum_partial_copy_generic)
25386 CFI_RESTORE rbp
25387 addq $7*8, %rsp
25388 CFI_ADJUST_CFA_OFFSET -7*8
25389+ pax_force_retaddr 0, 1
25390 ret
25391 CFI_RESTORE_STATE
25392
25393diff --git a/arch/x86/lib/csum-wrappers_64.c b/arch/x86/lib/csum-wrappers_64.c
25394index 25b7ae8..169fafc 100644
25395--- a/arch/x86/lib/csum-wrappers_64.c
25396+++ b/arch/x86/lib/csum-wrappers_64.c
25397@@ -52,7 +52,7 @@ csum_partial_copy_from_user(const void __user *src, void *dst,
25398 len -= 2;
25399 }
25400 }
25401- isum = csum_partial_copy_generic((__force const void *)src,
25402+ isum = csum_partial_copy_generic((const void __force_kernel *)____m(src),
25403 dst, len, isum, errp, NULL);
25404 if (unlikely(*errp))
25405 goto out_err;
25406@@ -105,7 +105,7 @@ csum_partial_copy_to_user(const void *src, void __user *dst,
25407 }
25408
25409 *errp = 0;
25410- return csum_partial_copy_generic(src, (void __force *)dst,
25411+ return csum_partial_copy_generic(src, (void __force_kernel *)____m(dst),
25412 len, isum, NULL, errp);
25413 }
25414 EXPORT_SYMBOL(csum_partial_copy_to_user);
25415diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S
25416index a451235..79fb5cf 100644
25417--- a/arch/x86/lib/getuser.S
25418+++ b/arch/x86/lib/getuser.S
25419@@ -33,17 +33,40 @@
25420 #include <asm/thread_info.h>
25421 #include <asm/asm.h>
25422 #include <asm/smap.h>
25423+#include <asm/segment.h>
25424+#include <asm/pgtable.h>
25425+#include <asm/alternative-asm.h>
25426+
25427+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
25428+#define __copyuser_seg gs;
25429+#else
25430+#define __copyuser_seg
25431+#endif
25432
25433 .text
25434 ENTRY(__get_user_1)
25435 CFI_STARTPROC
25436+
25437+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25438 GET_THREAD_INFO(%_ASM_DX)
25439 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25440 jae bad_get_user
25441 ASM_STAC
25442-1: movzbl (%_ASM_AX),%edx
25443+
25444+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25445+ mov pax_user_shadow_base,%_ASM_DX
25446+ cmp %_ASM_DX,%_ASM_AX
25447+ jae 1234f
25448+ add %_ASM_DX,%_ASM_AX
25449+1234:
25450+#endif
25451+
25452+#endif
25453+
25454+1: __copyuser_seg movzbl (%_ASM_AX),%edx
25455 xor %eax,%eax
25456 ASM_CLAC
25457+ pax_force_retaddr
25458 ret
25459 CFI_ENDPROC
25460 ENDPROC(__get_user_1)
25461@@ -51,14 +74,28 @@ ENDPROC(__get_user_1)
25462 ENTRY(__get_user_2)
25463 CFI_STARTPROC
25464 add $1,%_ASM_AX
25465+
25466+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25467 jc bad_get_user
25468 GET_THREAD_INFO(%_ASM_DX)
25469 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25470 jae bad_get_user
25471 ASM_STAC
25472-2: movzwl -1(%_ASM_AX),%edx
25473+
25474+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25475+ mov pax_user_shadow_base,%_ASM_DX
25476+ cmp %_ASM_DX,%_ASM_AX
25477+ jae 1234f
25478+ add %_ASM_DX,%_ASM_AX
25479+1234:
25480+#endif
25481+
25482+#endif
25483+
25484+2: __copyuser_seg movzwl -1(%_ASM_AX),%edx
25485 xor %eax,%eax
25486 ASM_CLAC
25487+ pax_force_retaddr
25488 ret
25489 CFI_ENDPROC
25490 ENDPROC(__get_user_2)
25491@@ -66,14 +103,28 @@ ENDPROC(__get_user_2)
25492 ENTRY(__get_user_4)
25493 CFI_STARTPROC
25494 add $3,%_ASM_AX
25495+
25496+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
25497 jc bad_get_user
25498 GET_THREAD_INFO(%_ASM_DX)
25499 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25500 jae bad_get_user
25501 ASM_STAC
25502-3: movl -3(%_ASM_AX),%edx
25503+
25504+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
25505+ mov pax_user_shadow_base,%_ASM_DX
25506+ cmp %_ASM_DX,%_ASM_AX
25507+ jae 1234f
25508+ add %_ASM_DX,%_ASM_AX
25509+1234:
25510+#endif
25511+
25512+#endif
25513+
25514+3: __copyuser_seg movl -3(%_ASM_AX),%edx
25515 xor %eax,%eax
25516 ASM_CLAC
25517+ pax_force_retaddr
25518 ret
25519 CFI_ENDPROC
25520 ENDPROC(__get_user_4)
25521@@ -86,10 +137,20 @@ ENTRY(__get_user_8)
25522 GET_THREAD_INFO(%_ASM_DX)
25523 cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
25524 jae bad_get_user
25525+
25526+#ifdef CONFIG_PAX_MEMORY_UDEREF
25527+ mov pax_user_shadow_base,%_ASM_DX
25528+ cmp %_ASM_DX,%_ASM_AX
25529+ jae 1234f
25530+ add %_ASM_DX,%_ASM_AX
25531+1234:
25532+#endif
25533+
25534 ASM_STAC
25535 4: movq -7(%_ASM_AX),%rdx
25536 xor %eax,%eax
25537 ASM_CLAC
25538+ pax_force_retaddr
25539 ret
25540 #else
25541 add $7,%_ASM_AX
25542@@ -102,6 +163,7 @@ ENTRY(__get_user_8)
25543 5: movl -3(%_ASM_AX),%ecx
25544 xor %eax,%eax
25545 ASM_CLAC
25546+ pax_force_retaddr
25547 ret
25548 #endif
25549 CFI_ENDPROC
25550@@ -113,6 +175,7 @@ bad_get_user:
25551 xor %edx,%edx
25552 mov $(-EFAULT),%_ASM_AX
25553 ASM_CLAC
25554+ pax_force_retaddr
25555 ret
25556 CFI_ENDPROC
25557 END(bad_get_user)
25558@@ -124,6 +187,7 @@ bad_get_user_8:
25559 xor %ecx,%ecx
25560 mov $(-EFAULT),%_ASM_AX
25561 ASM_CLAC
25562+ pax_force_retaddr
25563 ret
25564 CFI_ENDPROC
25565 END(bad_get_user_8)
25566diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c
25567index 54fcffe..7be149e 100644
25568--- a/arch/x86/lib/insn.c
25569+++ b/arch/x86/lib/insn.c
25570@@ -20,8 +20,10 @@
25571
25572 #ifdef __KERNEL__
25573 #include <linux/string.h>
25574+#include <asm/pgtable_types.h>
25575 #else
25576 #include <string.h>
25577+#define ktla_ktva(addr) addr
25578 #endif
25579 #include <asm/inat.h>
25580 #include <asm/insn.h>
25581@@ -53,8 +55,8 @@
25582 void insn_init(struct insn *insn, const void *kaddr, int x86_64)
25583 {
25584 memset(insn, 0, sizeof(*insn));
25585- insn->kaddr = kaddr;
25586- insn->next_byte = kaddr;
25587+ insn->kaddr = ktla_ktva(kaddr);
25588+ insn->next_byte = ktla_ktva(kaddr);
25589 insn->x86_64 = x86_64 ? 1 : 0;
25590 insn->opnd_bytes = 4;
25591 if (x86_64)
25592diff --git a/arch/x86/lib/iomap_copy_64.S b/arch/x86/lib/iomap_copy_64.S
25593index 05a95e7..326f2fa 100644
25594--- a/arch/x86/lib/iomap_copy_64.S
25595+++ b/arch/x86/lib/iomap_copy_64.S
25596@@ -17,6 +17,7 @@
25597
25598 #include <linux/linkage.h>
25599 #include <asm/dwarf2.h>
25600+#include <asm/alternative-asm.h>
25601
25602 /*
25603 * override generic version in lib/iomap_copy.c
25604@@ -25,6 +26,7 @@ ENTRY(__iowrite32_copy)
25605 CFI_STARTPROC
25606 movl %edx,%ecx
25607 rep movsd
25608+ pax_force_retaddr
25609 ret
25610 CFI_ENDPROC
25611 ENDPROC(__iowrite32_copy)
25612diff --git a/arch/x86/lib/memcpy_64.S b/arch/x86/lib/memcpy_64.S
25613index 56313a3..aa84a79 100644
25614--- a/arch/x86/lib/memcpy_64.S
25615+++ b/arch/x86/lib/memcpy_64.S
25616@@ -33,6 +33,7 @@
25617 rep movsq
25618 movl %edx, %ecx
25619 rep movsb
25620+ pax_force_retaddr
25621 ret
25622 .Lmemcpy_e:
25623 .previous
25624@@ -49,6 +50,7 @@
25625 movq %rdi, %rax
25626 movq %rdx, %rcx
25627 rep movsb
25628+ pax_force_retaddr
25629 ret
25630 .Lmemcpy_e_e:
25631 .previous
25632@@ -76,13 +78,13 @@ ENTRY(memcpy)
25633 */
25634 movq 0*8(%rsi), %r8
25635 movq 1*8(%rsi), %r9
25636- movq 2*8(%rsi), %r10
25637+ movq 2*8(%rsi), %rcx
25638 movq 3*8(%rsi), %r11
25639 leaq 4*8(%rsi), %rsi
25640
25641 movq %r8, 0*8(%rdi)
25642 movq %r9, 1*8(%rdi)
25643- movq %r10, 2*8(%rdi)
25644+ movq %rcx, 2*8(%rdi)
25645 movq %r11, 3*8(%rdi)
25646 leaq 4*8(%rdi), %rdi
25647 jae .Lcopy_forward_loop
25648@@ -105,12 +107,12 @@ ENTRY(memcpy)
25649 subq $0x20, %rdx
25650 movq -1*8(%rsi), %r8
25651 movq -2*8(%rsi), %r9
25652- movq -3*8(%rsi), %r10
25653+ movq -3*8(%rsi), %rcx
25654 movq -4*8(%rsi), %r11
25655 leaq -4*8(%rsi), %rsi
25656 movq %r8, -1*8(%rdi)
25657 movq %r9, -2*8(%rdi)
25658- movq %r10, -3*8(%rdi)
25659+ movq %rcx, -3*8(%rdi)
25660 movq %r11, -4*8(%rdi)
25661 leaq -4*8(%rdi), %rdi
25662 jae .Lcopy_backward_loop
25663@@ -130,12 +132,13 @@ ENTRY(memcpy)
25664 */
25665 movq 0*8(%rsi), %r8
25666 movq 1*8(%rsi), %r9
25667- movq -2*8(%rsi, %rdx), %r10
25668+ movq -2*8(%rsi, %rdx), %rcx
25669 movq -1*8(%rsi, %rdx), %r11
25670 movq %r8, 0*8(%rdi)
25671 movq %r9, 1*8(%rdi)
25672- movq %r10, -2*8(%rdi, %rdx)
25673+ movq %rcx, -2*8(%rdi, %rdx)
25674 movq %r11, -1*8(%rdi, %rdx)
25675+ pax_force_retaddr
25676 retq
25677 .p2align 4
25678 .Lless_16bytes:
25679@@ -148,6 +151,7 @@ ENTRY(memcpy)
25680 movq -1*8(%rsi, %rdx), %r9
25681 movq %r8, 0*8(%rdi)
25682 movq %r9, -1*8(%rdi, %rdx)
25683+ pax_force_retaddr
25684 retq
25685 .p2align 4
25686 .Lless_8bytes:
25687@@ -161,6 +165,7 @@ ENTRY(memcpy)
25688 movl -4(%rsi, %rdx), %r8d
25689 movl %ecx, (%rdi)
25690 movl %r8d, -4(%rdi, %rdx)
25691+ pax_force_retaddr
25692 retq
25693 .p2align 4
25694 .Lless_3bytes:
25695@@ -179,6 +184,7 @@ ENTRY(memcpy)
25696 movb %cl, (%rdi)
25697
25698 .Lend:
25699+ pax_force_retaddr
25700 retq
25701 CFI_ENDPROC
25702 ENDPROC(memcpy)
25703diff --git a/arch/x86/lib/memmove_64.S b/arch/x86/lib/memmove_64.S
25704index 65268a6..c9518d1 100644
25705--- a/arch/x86/lib/memmove_64.S
25706+++ b/arch/x86/lib/memmove_64.S
25707@@ -61,13 +61,13 @@ ENTRY(memmove)
25708 5:
25709 sub $0x20, %rdx
25710 movq 0*8(%rsi), %r11
25711- movq 1*8(%rsi), %r10
25712+ movq 1*8(%rsi), %rcx
25713 movq 2*8(%rsi), %r9
25714 movq 3*8(%rsi), %r8
25715 leaq 4*8(%rsi), %rsi
25716
25717 movq %r11, 0*8(%rdi)
25718- movq %r10, 1*8(%rdi)
25719+ movq %rcx, 1*8(%rdi)
25720 movq %r9, 2*8(%rdi)
25721 movq %r8, 3*8(%rdi)
25722 leaq 4*8(%rdi), %rdi
25723@@ -81,10 +81,10 @@ ENTRY(memmove)
25724 4:
25725 movq %rdx, %rcx
25726 movq -8(%rsi, %rdx), %r11
25727- lea -8(%rdi, %rdx), %r10
25728+ lea -8(%rdi, %rdx), %r9
25729 shrq $3, %rcx
25730 rep movsq
25731- movq %r11, (%r10)
25732+ movq %r11, (%r9)
25733 jmp 13f
25734 .Lmemmove_end_forward:
25735
25736@@ -95,14 +95,14 @@ ENTRY(memmove)
25737 7:
25738 movq %rdx, %rcx
25739 movq (%rsi), %r11
25740- movq %rdi, %r10
25741+ movq %rdi, %r9
25742 leaq -8(%rsi, %rdx), %rsi
25743 leaq -8(%rdi, %rdx), %rdi
25744 shrq $3, %rcx
25745 std
25746 rep movsq
25747 cld
25748- movq %r11, (%r10)
25749+ movq %r11, (%r9)
25750 jmp 13f
25751
25752 /*
25753@@ -127,13 +127,13 @@ ENTRY(memmove)
25754 8:
25755 subq $0x20, %rdx
25756 movq -1*8(%rsi), %r11
25757- movq -2*8(%rsi), %r10
25758+ movq -2*8(%rsi), %rcx
25759 movq -3*8(%rsi), %r9
25760 movq -4*8(%rsi), %r8
25761 leaq -4*8(%rsi), %rsi
25762
25763 movq %r11, -1*8(%rdi)
25764- movq %r10, -2*8(%rdi)
25765+ movq %rcx, -2*8(%rdi)
25766 movq %r9, -3*8(%rdi)
25767 movq %r8, -4*8(%rdi)
25768 leaq -4*8(%rdi), %rdi
25769@@ -151,11 +151,11 @@ ENTRY(memmove)
25770 * Move data from 16 bytes to 31 bytes.
25771 */
25772 movq 0*8(%rsi), %r11
25773- movq 1*8(%rsi), %r10
25774+ movq 1*8(%rsi), %rcx
25775 movq -2*8(%rsi, %rdx), %r9
25776 movq -1*8(%rsi, %rdx), %r8
25777 movq %r11, 0*8(%rdi)
25778- movq %r10, 1*8(%rdi)
25779+ movq %rcx, 1*8(%rdi)
25780 movq %r9, -2*8(%rdi, %rdx)
25781 movq %r8, -1*8(%rdi, %rdx)
25782 jmp 13f
25783@@ -167,9 +167,9 @@ ENTRY(memmove)
25784 * Move data from 8 bytes to 15 bytes.
25785 */
25786 movq 0*8(%rsi), %r11
25787- movq -1*8(%rsi, %rdx), %r10
25788+ movq -1*8(%rsi, %rdx), %r9
25789 movq %r11, 0*8(%rdi)
25790- movq %r10, -1*8(%rdi, %rdx)
25791+ movq %r9, -1*8(%rdi, %rdx)
25792 jmp 13f
25793 10:
25794 cmpq $4, %rdx
25795@@ -178,9 +178,9 @@ ENTRY(memmove)
25796 * Move data from 4 bytes to 7 bytes.
25797 */
25798 movl (%rsi), %r11d
25799- movl -4(%rsi, %rdx), %r10d
25800+ movl -4(%rsi, %rdx), %r9d
25801 movl %r11d, (%rdi)
25802- movl %r10d, -4(%rdi, %rdx)
25803+ movl %r9d, -4(%rdi, %rdx)
25804 jmp 13f
25805 11:
25806 cmp $2, %rdx
25807@@ -189,9 +189,9 @@ ENTRY(memmove)
25808 * Move data from 2 bytes to 3 bytes.
25809 */
25810 movw (%rsi), %r11w
25811- movw -2(%rsi, %rdx), %r10w
25812+ movw -2(%rsi, %rdx), %r9w
25813 movw %r11w, (%rdi)
25814- movw %r10w, -2(%rdi, %rdx)
25815+ movw %r9w, -2(%rdi, %rdx)
25816 jmp 13f
25817 12:
25818 cmp $1, %rdx
25819@@ -202,6 +202,7 @@ ENTRY(memmove)
25820 movb (%rsi), %r11b
25821 movb %r11b, (%rdi)
25822 13:
25823+ pax_force_retaddr
25824 retq
25825 CFI_ENDPROC
25826
25827@@ -210,6 +211,7 @@ ENTRY(memmove)
25828 /* Forward moving data. */
25829 movq %rdx, %rcx
25830 rep movsb
25831+ pax_force_retaddr
25832 retq
25833 .Lmemmove_end_forward_efs:
25834 .previous
25835diff --git a/arch/x86/lib/memset_64.S b/arch/x86/lib/memset_64.S
25836index 2dcb380..963660a 100644
25837--- a/arch/x86/lib/memset_64.S
25838+++ b/arch/x86/lib/memset_64.S
25839@@ -30,6 +30,7 @@
25840 movl %edx,%ecx
25841 rep stosb
25842 movq %r9,%rax
25843+ pax_force_retaddr
25844 ret
25845 .Lmemset_e:
25846 .previous
25847@@ -52,6 +53,7 @@
25848 movq %rdx,%rcx
25849 rep stosb
25850 movq %r9,%rax
25851+ pax_force_retaddr
25852 ret
25853 .Lmemset_e_e:
25854 .previous
25855@@ -59,7 +61,7 @@
25856 ENTRY(memset)
25857 ENTRY(__memset)
25858 CFI_STARTPROC
25859- movq %rdi,%r10
25860+ movq %rdi,%r11
25861
25862 /* expand byte value */
25863 movzbl %sil,%ecx
25864@@ -117,7 +119,8 @@ ENTRY(__memset)
25865 jnz .Lloop_1
25866
25867 .Lende:
25868- movq %r10,%rax
25869+ movq %r11,%rax
25870+ pax_force_retaddr
25871 ret
25872
25873 CFI_RESTORE_STATE
25874diff --git a/arch/x86/lib/mmx_32.c b/arch/x86/lib/mmx_32.c
25875index c9f2d9b..e7fd2c0 100644
25876--- a/arch/x86/lib/mmx_32.c
25877+++ b/arch/x86/lib/mmx_32.c
25878@@ -29,6 +29,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25879 {
25880 void *p;
25881 int i;
25882+ unsigned long cr0;
25883
25884 if (unlikely(in_interrupt()))
25885 return __memcpy(to, from, len);
25886@@ -39,44 +40,72 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
25887 kernel_fpu_begin();
25888
25889 __asm__ __volatile__ (
25890- "1: prefetch (%0)\n" /* This set is 28 bytes */
25891- " prefetch 64(%0)\n"
25892- " prefetch 128(%0)\n"
25893- " prefetch 192(%0)\n"
25894- " prefetch 256(%0)\n"
25895+ "1: prefetch (%1)\n" /* This set is 28 bytes */
25896+ " prefetch 64(%1)\n"
25897+ " prefetch 128(%1)\n"
25898+ " prefetch 192(%1)\n"
25899+ " prefetch 256(%1)\n"
25900 "2: \n"
25901 ".section .fixup, \"ax\"\n"
25902- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25903+ "3: \n"
25904+
25905+#ifdef CONFIG_PAX_KERNEXEC
25906+ " movl %%cr0, %0\n"
25907+ " movl %0, %%eax\n"
25908+ " andl $0xFFFEFFFF, %%eax\n"
25909+ " movl %%eax, %%cr0\n"
25910+#endif
25911+
25912+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
25913+
25914+#ifdef CONFIG_PAX_KERNEXEC
25915+ " movl %0, %%cr0\n"
25916+#endif
25917+
25918 " jmp 2b\n"
25919 ".previous\n"
25920 _ASM_EXTABLE(1b, 3b)
25921- : : "r" (from));
25922+ : "=&r" (cr0) : "r" (from) : "ax");
25923
25924 for ( ; i > 5; i--) {
25925 __asm__ __volatile__ (
25926- "1: prefetch 320(%0)\n"
25927- "2: movq (%0), %%mm0\n"
25928- " movq 8(%0), %%mm1\n"
25929- " movq 16(%0), %%mm2\n"
25930- " movq 24(%0), %%mm3\n"
25931- " movq %%mm0, (%1)\n"
25932- " movq %%mm1, 8(%1)\n"
25933- " movq %%mm2, 16(%1)\n"
25934- " movq %%mm3, 24(%1)\n"
25935- " movq 32(%0), %%mm0\n"
25936- " movq 40(%0), %%mm1\n"
25937- " movq 48(%0), %%mm2\n"
25938- " movq 56(%0), %%mm3\n"
25939- " movq %%mm0, 32(%1)\n"
25940- " movq %%mm1, 40(%1)\n"
25941- " movq %%mm2, 48(%1)\n"
25942- " movq %%mm3, 56(%1)\n"
25943+ "1: prefetch 320(%1)\n"
25944+ "2: movq (%1), %%mm0\n"
25945+ " movq 8(%1), %%mm1\n"
25946+ " movq 16(%1), %%mm2\n"
25947+ " movq 24(%1), %%mm3\n"
25948+ " movq %%mm0, (%2)\n"
25949+ " movq %%mm1, 8(%2)\n"
25950+ " movq %%mm2, 16(%2)\n"
25951+ " movq %%mm3, 24(%2)\n"
25952+ " movq 32(%1), %%mm0\n"
25953+ " movq 40(%1), %%mm1\n"
25954+ " movq 48(%1), %%mm2\n"
25955+ " movq 56(%1), %%mm3\n"
25956+ " movq %%mm0, 32(%2)\n"
25957+ " movq %%mm1, 40(%2)\n"
25958+ " movq %%mm2, 48(%2)\n"
25959+ " movq %%mm3, 56(%2)\n"
25960 ".section .fixup, \"ax\"\n"
25961- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25962+ "3:\n"
25963+
25964+#ifdef CONFIG_PAX_KERNEXEC
25965+ " movl %%cr0, %0\n"
25966+ " movl %0, %%eax\n"
25967+ " andl $0xFFFEFFFF, %%eax\n"
25968+ " movl %%eax, %%cr0\n"
25969+#endif
25970+
25971+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
25972+
25973+#ifdef CONFIG_PAX_KERNEXEC
25974+ " movl %0, %%cr0\n"
25975+#endif
25976+
25977 " jmp 2b\n"
25978 ".previous\n"
25979 _ASM_EXTABLE(1b, 3b)
25980- : : "r" (from), "r" (to) : "memory");
25981+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
25982
25983 from += 64;
25984 to += 64;
25985@@ -158,6 +187,7 @@ static void fast_clear_page(void *page)
25986 static void fast_copy_page(void *to, void *from)
25987 {
25988 int i;
25989+ unsigned long cr0;
25990
25991 kernel_fpu_begin();
25992
25993@@ -166,42 +196,70 @@ static void fast_copy_page(void *to, void *from)
25994 * but that is for later. -AV
25995 */
25996 __asm__ __volatile__(
25997- "1: prefetch (%0)\n"
25998- " prefetch 64(%0)\n"
25999- " prefetch 128(%0)\n"
26000- " prefetch 192(%0)\n"
26001- " prefetch 256(%0)\n"
26002+ "1: prefetch (%1)\n"
26003+ " prefetch 64(%1)\n"
26004+ " prefetch 128(%1)\n"
26005+ " prefetch 192(%1)\n"
26006+ " prefetch 256(%1)\n"
26007 "2: \n"
26008 ".section .fixup, \"ax\"\n"
26009- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26010+ "3: \n"
26011+
26012+#ifdef CONFIG_PAX_KERNEXEC
26013+ " movl %%cr0, %0\n"
26014+ " movl %0, %%eax\n"
26015+ " andl $0xFFFEFFFF, %%eax\n"
26016+ " movl %%eax, %%cr0\n"
26017+#endif
26018+
26019+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26020+
26021+#ifdef CONFIG_PAX_KERNEXEC
26022+ " movl %0, %%cr0\n"
26023+#endif
26024+
26025 " jmp 2b\n"
26026 ".previous\n"
26027- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26028+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26029
26030 for (i = 0; i < (4096-320)/64; i++) {
26031 __asm__ __volatile__ (
26032- "1: prefetch 320(%0)\n"
26033- "2: movq (%0), %%mm0\n"
26034- " movntq %%mm0, (%1)\n"
26035- " movq 8(%0), %%mm1\n"
26036- " movntq %%mm1, 8(%1)\n"
26037- " movq 16(%0), %%mm2\n"
26038- " movntq %%mm2, 16(%1)\n"
26039- " movq 24(%0), %%mm3\n"
26040- " movntq %%mm3, 24(%1)\n"
26041- " movq 32(%0), %%mm4\n"
26042- " movntq %%mm4, 32(%1)\n"
26043- " movq 40(%0), %%mm5\n"
26044- " movntq %%mm5, 40(%1)\n"
26045- " movq 48(%0), %%mm6\n"
26046- " movntq %%mm6, 48(%1)\n"
26047- " movq 56(%0), %%mm7\n"
26048- " movntq %%mm7, 56(%1)\n"
26049+ "1: prefetch 320(%1)\n"
26050+ "2: movq (%1), %%mm0\n"
26051+ " movntq %%mm0, (%2)\n"
26052+ " movq 8(%1), %%mm1\n"
26053+ " movntq %%mm1, 8(%2)\n"
26054+ " movq 16(%1), %%mm2\n"
26055+ " movntq %%mm2, 16(%2)\n"
26056+ " movq 24(%1), %%mm3\n"
26057+ " movntq %%mm3, 24(%2)\n"
26058+ " movq 32(%1), %%mm4\n"
26059+ " movntq %%mm4, 32(%2)\n"
26060+ " movq 40(%1), %%mm5\n"
26061+ " movntq %%mm5, 40(%2)\n"
26062+ " movq 48(%1), %%mm6\n"
26063+ " movntq %%mm6, 48(%2)\n"
26064+ " movq 56(%1), %%mm7\n"
26065+ " movntq %%mm7, 56(%2)\n"
26066 ".section .fixup, \"ax\"\n"
26067- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26068+ "3:\n"
26069+
26070+#ifdef CONFIG_PAX_KERNEXEC
26071+ " movl %%cr0, %0\n"
26072+ " movl %0, %%eax\n"
26073+ " andl $0xFFFEFFFF, %%eax\n"
26074+ " movl %%eax, %%cr0\n"
26075+#endif
26076+
26077+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26078+
26079+#ifdef CONFIG_PAX_KERNEXEC
26080+ " movl %0, %%cr0\n"
26081+#endif
26082+
26083 " jmp 2b\n"
26084 ".previous\n"
26085- _ASM_EXTABLE(1b, 3b) : : "r" (from), "r" (to) : "memory");
26086+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26087
26088 from += 64;
26089 to += 64;
26090@@ -280,47 +338,76 @@ static void fast_clear_page(void *page)
26091 static void fast_copy_page(void *to, void *from)
26092 {
26093 int i;
26094+ unsigned long cr0;
26095
26096 kernel_fpu_begin();
26097
26098 __asm__ __volatile__ (
26099- "1: prefetch (%0)\n"
26100- " prefetch 64(%0)\n"
26101- " prefetch 128(%0)\n"
26102- " prefetch 192(%0)\n"
26103- " prefetch 256(%0)\n"
26104+ "1: prefetch (%1)\n"
26105+ " prefetch 64(%1)\n"
26106+ " prefetch 128(%1)\n"
26107+ " prefetch 192(%1)\n"
26108+ " prefetch 256(%1)\n"
26109 "2: \n"
26110 ".section .fixup, \"ax\"\n"
26111- "3: movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26112+ "3: \n"
26113+
26114+#ifdef CONFIG_PAX_KERNEXEC
26115+ " movl %%cr0, %0\n"
26116+ " movl %0, %%eax\n"
26117+ " andl $0xFFFEFFFF, %%eax\n"
26118+ " movl %%eax, %%cr0\n"
26119+#endif
26120+
26121+ " movw $0x1AEB, 1b\n" /* jmp on 26 bytes */
26122+
26123+#ifdef CONFIG_PAX_KERNEXEC
26124+ " movl %0, %%cr0\n"
26125+#endif
26126+
26127 " jmp 2b\n"
26128 ".previous\n"
26129- _ASM_EXTABLE(1b, 3b) : : "r" (from));
26130+ _ASM_EXTABLE(1b, 3b) : "=&r" (cr0) : "r" (from) : "ax");
26131
26132 for (i = 0; i < 4096/64; i++) {
26133 __asm__ __volatile__ (
26134- "1: prefetch 320(%0)\n"
26135- "2: movq (%0), %%mm0\n"
26136- " movq 8(%0), %%mm1\n"
26137- " movq 16(%0), %%mm2\n"
26138- " movq 24(%0), %%mm3\n"
26139- " movq %%mm0, (%1)\n"
26140- " movq %%mm1, 8(%1)\n"
26141- " movq %%mm2, 16(%1)\n"
26142- " movq %%mm3, 24(%1)\n"
26143- " movq 32(%0), %%mm0\n"
26144- " movq 40(%0), %%mm1\n"
26145- " movq 48(%0), %%mm2\n"
26146- " movq 56(%0), %%mm3\n"
26147- " movq %%mm0, 32(%1)\n"
26148- " movq %%mm1, 40(%1)\n"
26149- " movq %%mm2, 48(%1)\n"
26150- " movq %%mm3, 56(%1)\n"
26151+ "1: prefetch 320(%1)\n"
26152+ "2: movq (%1), %%mm0\n"
26153+ " movq 8(%1), %%mm1\n"
26154+ " movq 16(%1), %%mm2\n"
26155+ " movq 24(%1), %%mm3\n"
26156+ " movq %%mm0, (%2)\n"
26157+ " movq %%mm1, 8(%2)\n"
26158+ " movq %%mm2, 16(%2)\n"
26159+ " movq %%mm3, 24(%2)\n"
26160+ " movq 32(%1), %%mm0\n"
26161+ " movq 40(%1), %%mm1\n"
26162+ " movq 48(%1), %%mm2\n"
26163+ " movq 56(%1), %%mm3\n"
26164+ " movq %%mm0, 32(%2)\n"
26165+ " movq %%mm1, 40(%2)\n"
26166+ " movq %%mm2, 48(%2)\n"
26167+ " movq %%mm3, 56(%2)\n"
26168 ".section .fixup, \"ax\"\n"
26169- "3: movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26170+ "3:\n"
26171+
26172+#ifdef CONFIG_PAX_KERNEXEC
26173+ " movl %%cr0, %0\n"
26174+ " movl %0, %%eax\n"
26175+ " andl $0xFFFEFFFF, %%eax\n"
26176+ " movl %%eax, %%cr0\n"
26177+#endif
26178+
26179+ " movw $0x05EB, 1b\n" /* jmp on 5 bytes */
26180+
26181+#ifdef CONFIG_PAX_KERNEXEC
26182+ " movl %0, %%cr0\n"
26183+#endif
26184+
26185 " jmp 2b\n"
26186 ".previous\n"
26187 _ASM_EXTABLE(1b, 3b)
26188- : : "r" (from), "r" (to) : "memory");
26189+ : "=&r" (cr0) : "r" (from), "r" (to) : "memory", "ax");
26190
26191 from += 64;
26192 to += 64;
26193diff --git a/arch/x86/lib/msr-reg.S b/arch/x86/lib/msr-reg.S
26194index f6d13ee..aca5f0b 100644
26195--- a/arch/x86/lib/msr-reg.S
26196+++ b/arch/x86/lib/msr-reg.S
26197@@ -3,6 +3,7 @@
26198 #include <asm/dwarf2.h>
26199 #include <asm/asm.h>
26200 #include <asm/msr.h>
26201+#include <asm/alternative-asm.h>
26202
26203 #ifdef CONFIG_X86_64
26204 /*
26205@@ -16,7 +17,7 @@ ENTRY(\op\()_safe_regs)
26206 CFI_STARTPROC
26207 pushq_cfi %rbx
26208 pushq_cfi %rbp
26209- movq %rdi, %r10 /* Save pointer */
26210+ movq %rdi, %r9 /* Save pointer */
26211 xorl %r11d, %r11d /* Return value */
26212 movl (%rdi), %eax
26213 movl 4(%rdi), %ecx
26214@@ -27,16 +28,17 @@ ENTRY(\op\()_safe_regs)
26215 movl 28(%rdi), %edi
26216 CFI_REMEMBER_STATE
26217 1: \op
26218-2: movl %eax, (%r10)
26219+2: movl %eax, (%r9)
26220 movl %r11d, %eax /* Return value */
26221- movl %ecx, 4(%r10)
26222- movl %edx, 8(%r10)
26223- movl %ebx, 12(%r10)
26224- movl %ebp, 20(%r10)
26225- movl %esi, 24(%r10)
26226- movl %edi, 28(%r10)
26227+ movl %ecx, 4(%r9)
26228+ movl %edx, 8(%r9)
26229+ movl %ebx, 12(%r9)
26230+ movl %ebp, 20(%r9)
26231+ movl %esi, 24(%r9)
26232+ movl %edi, 28(%r9)
26233 popq_cfi %rbp
26234 popq_cfi %rbx
26235+ pax_force_retaddr
26236 ret
26237 3:
26238 CFI_RESTORE_STATE
26239diff --git a/arch/x86/lib/putuser.S b/arch/x86/lib/putuser.S
26240index fc6ba17..d4d989d 100644
26241--- a/arch/x86/lib/putuser.S
26242+++ b/arch/x86/lib/putuser.S
26243@@ -16,7 +16,9 @@
26244 #include <asm/errno.h>
26245 #include <asm/asm.h>
26246 #include <asm/smap.h>
26247-
26248+#include <asm/segment.h>
26249+#include <asm/pgtable.h>
26250+#include <asm/alternative-asm.h>
26251
26252 /*
26253 * __put_user_X
26254@@ -30,57 +32,125 @@
26255 * as they get called from within inline assembly.
26256 */
26257
26258-#define ENTER CFI_STARTPROC ; \
26259- GET_THREAD_INFO(%_ASM_BX)
26260-#define EXIT ASM_CLAC ; \
26261- ret ; \
26262+#define ENTER CFI_STARTPROC
26263+#define EXIT ASM_CLAC ; \
26264+ pax_force_retaddr ; \
26265+ ret ; \
26266 CFI_ENDPROC
26267
26268+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26269+#define _DEST %_ASM_CX,%_ASM_BX
26270+#else
26271+#define _DEST %_ASM_CX
26272+#endif
26273+
26274+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_MEMORY_UDEREF)
26275+#define __copyuser_seg gs;
26276+#else
26277+#define __copyuser_seg
26278+#endif
26279+
26280 .text
26281 ENTRY(__put_user_1)
26282 ENTER
26283+
26284+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26285+ GET_THREAD_INFO(%_ASM_BX)
26286 cmp TI_addr_limit(%_ASM_BX),%_ASM_CX
26287 jae bad_put_user
26288 ASM_STAC
26289-1: movb %al,(%_ASM_CX)
26290+
26291+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26292+ mov pax_user_shadow_base,%_ASM_BX
26293+ cmp %_ASM_BX,%_ASM_CX
26294+ jb 1234f
26295+ xor %ebx,%ebx
26296+1234:
26297+#endif
26298+
26299+#endif
26300+
26301+1: __copyuser_seg movb %al,(_DEST)
26302 xor %eax,%eax
26303 EXIT
26304 ENDPROC(__put_user_1)
26305
26306 ENTRY(__put_user_2)
26307 ENTER
26308+
26309+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26310+ GET_THREAD_INFO(%_ASM_BX)
26311 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26312 sub $1,%_ASM_BX
26313 cmp %_ASM_BX,%_ASM_CX
26314 jae bad_put_user
26315 ASM_STAC
26316-2: movw %ax,(%_ASM_CX)
26317+
26318+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26319+ mov pax_user_shadow_base,%_ASM_BX
26320+ cmp %_ASM_BX,%_ASM_CX
26321+ jb 1234f
26322+ xor %ebx,%ebx
26323+1234:
26324+#endif
26325+
26326+#endif
26327+
26328+2: __copyuser_seg movw %ax,(_DEST)
26329 xor %eax,%eax
26330 EXIT
26331 ENDPROC(__put_user_2)
26332
26333 ENTRY(__put_user_4)
26334 ENTER
26335+
26336+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26337+ GET_THREAD_INFO(%_ASM_BX)
26338 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26339 sub $3,%_ASM_BX
26340 cmp %_ASM_BX,%_ASM_CX
26341 jae bad_put_user
26342 ASM_STAC
26343-3: movl %eax,(%_ASM_CX)
26344+
26345+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26346+ mov pax_user_shadow_base,%_ASM_BX
26347+ cmp %_ASM_BX,%_ASM_CX
26348+ jb 1234f
26349+ xor %ebx,%ebx
26350+1234:
26351+#endif
26352+
26353+#endif
26354+
26355+3: __copyuser_seg movl %eax,(_DEST)
26356 xor %eax,%eax
26357 EXIT
26358 ENDPROC(__put_user_4)
26359
26360 ENTRY(__put_user_8)
26361 ENTER
26362+
26363+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_MEMORY_UDEREF)
26364+ GET_THREAD_INFO(%_ASM_BX)
26365 mov TI_addr_limit(%_ASM_BX),%_ASM_BX
26366 sub $7,%_ASM_BX
26367 cmp %_ASM_BX,%_ASM_CX
26368 jae bad_put_user
26369 ASM_STAC
26370-4: mov %_ASM_AX,(%_ASM_CX)
26371+
26372+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
26373+ mov pax_user_shadow_base,%_ASM_BX
26374+ cmp %_ASM_BX,%_ASM_CX
26375+ jb 1234f
26376+ xor %ebx,%ebx
26377+1234:
26378+#endif
26379+
26380+#endif
26381+
26382+4: __copyuser_seg mov %_ASM_AX,(_DEST)
26383 #ifdef CONFIG_X86_32
26384-5: movl %edx,4(%_ASM_CX)
26385+5: __copyuser_seg movl %edx,4(_DEST)
26386 #endif
26387 xor %eax,%eax
26388 EXIT
26389diff --git a/arch/x86/lib/rwlock.S b/arch/x86/lib/rwlock.S
26390index 1cad221..de671ee 100644
26391--- a/arch/x86/lib/rwlock.S
26392+++ b/arch/x86/lib/rwlock.S
26393@@ -16,13 +16,34 @@ ENTRY(__write_lock_failed)
26394 FRAME
26395 0: LOCK_PREFIX
26396 WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26397+
26398+#ifdef CONFIG_PAX_REFCOUNT
26399+ jno 1234f
26400+ LOCK_PREFIX
26401+ WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26402+ int $4
26403+1234:
26404+ _ASM_EXTABLE(1234b, 1234b)
26405+#endif
26406+
26407 1: rep; nop
26408 cmpl $WRITE_LOCK_CMP, (%__lock_ptr)
26409 jne 1b
26410 LOCK_PREFIX
26411 WRITE_LOCK_SUB($RW_LOCK_BIAS) (%__lock_ptr)
26412+
26413+#ifdef CONFIG_PAX_REFCOUNT
26414+ jno 1234f
26415+ LOCK_PREFIX
26416+ WRITE_LOCK_ADD($RW_LOCK_BIAS) (%__lock_ptr)
26417+ int $4
26418+1234:
26419+ _ASM_EXTABLE(1234b, 1234b)
26420+#endif
26421+
26422 jnz 0b
26423 ENDFRAME
26424+ pax_force_retaddr
26425 ret
26426 CFI_ENDPROC
26427 END(__write_lock_failed)
26428@@ -32,13 +53,34 @@ ENTRY(__read_lock_failed)
26429 FRAME
26430 0: LOCK_PREFIX
26431 READ_LOCK_SIZE(inc) (%__lock_ptr)
26432+
26433+#ifdef CONFIG_PAX_REFCOUNT
26434+ jno 1234f
26435+ LOCK_PREFIX
26436+ READ_LOCK_SIZE(dec) (%__lock_ptr)
26437+ int $4
26438+1234:
26439+ _ASM_EXTABLE(1234b, 1234b)
26440+#endif
26441+
26442 1: rep; nop
26443 READ_LOCK_SIZE(cmp) $1, (%__lock_ptr)
26444 js 1b
26445 LOCK_PREFIX
26446 READ_LOCK_SIZE(dec) (%__lock_ptr)
26447+
26448+#ifdef CONFIG_PAX_REFCOUNT
26449+ jno 1234f
26450+ LOCK_PREFIX
26451+ READ_LOCK_SIZE(inc) (%__lock_ptr)
26452+ int $4
26453+1234:
26454+ _ASM_EXTABLE(1234b, 1234b)
26455+#endif
26456+
26457 js 0b
26458 ENDFRAME
26459+ pax_force_retaddr
26460 ret
26461 CFI_ENDPROC
26462 END(__read_lock_failed)
26463diff --git a/arch/x86/lib/rwsem.S b/arch/x86/lib/rwsem.S
26464index 5dff5f0..cadebf4 100644
26465--- a/arch/x86/lib/rwsem.S
26466+++ b/arch/x86/lib/rwsem.S
26467@@ -94,6 +94,7 @@ ENTRY(call_rwsem_down_read_failed)
26468 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26469 CFI_RESTORE __ASM_REG(dx)
26470 restore_common_regs
26471+ pax_force_retaddr
26472 ret
26473 CFI_ENDPROC
26474 ENDPROC(call_rwsem_down_read_failed)
26475@@ -104,6 +105,7 @@ ENTRY(call_rwsem_down_write_failed)
26476 movq %rax,%rdi
26477 call rwsem_down_write_failed
26478 restore_common_regs
26479+ pax_force_retaddr
26480 ret
26481 CFI_ENDPROC
26482 ENDPROC(call_rwsem_down_write_failed)
26483@@ -117,7 +119,8 @@ ENTRY(call_rwsem_wake)
26484 movq %rax,%rdi
26485 call rwsem_wake
26486 restore_common_regs
26487-1: ret
26488+1: pax_force_retaddr
26489+ ret
26490 CFI_ENDPROC
26491 ENDPROC(call_rwsem_wake)
26492
26493@@ -131,6 +134,7 @@ ENTRY(call_rwsem_downgrade_wake)
26494 __ASM_SIZE(pop,_cfi) %__ASM_REG(dx)
26495 CFI_RESTORE __ASM_REG(dx)
26496 restore_common_regs
26497+ pax_force_retaddr
26498 ret
26499 CFI_ENDPROC
26500 ENDPROC(call_rwsem_downgrade_wake)
26501diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
26502index a63efd6..ccecad8 100644
26503--- a/arch/x86/lib/thunk_64.S
26504+++ b/arch/x86/lib/thunk_64.S
26505@@ -8,6 +8,7 @@
26506 #include <linux/linkage.h>
26507 #include <asm/dwarf2.h>
26508 #include <asm/calling.h>
26509+#include <asm/alternative-asm.h>
26510
26511 /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
26512 .macro THUNK name, func, put_ret_addr_in_rdi=0
26513@@ -41,5 +42,6 @@
26514 SAVE_ARGS
26515 restore:
26516 RESTORE_ARGS
26517+ pax_force_retaddr
26518 ret
26519 CFI_ENDPROC
26520diff --git a/arch/x86/lib/usercopy_32.c b/arch/x86/lib/usercopy_32.c
26521index 3eb18ac..6890bc3 100644
26522--- a/arch/x86/lib/usercopy_32.c
26523+++ b/arch/x86/lib/usercopy_32.c
26524@@ -42,11 +42,13 @@ do { \
26525 int __d0; \
26526 might_fault(); \
26527 __asm__ __volatile__( \
26528+ __COPYUSER_SET_ES \
26529 ASM_STAC "\n" \
26530 "0: rep; stosl\n" \
26531 " movl %2,%0\n" \
26532 "1: rep; stosb\n" \
26533 "2: " ASM_CLAC "\n" \
26534+ __COPYUSER_RESTORE_ES \
26535 ".section .fixup,\"ax\"\n" \
26536 "3: lea 0(%2,%0,4),%0\n" \
26537 " jmp 2b\n" \
26538@@ -98,7 +100,7 @@ EXPORT_SYMBOL(__clear_user);
26539
26540 #ifdef CONFIG_X86_INTEL_USERCOPY
26541 static unsigned long
26542-__copy_user_intel(void __user *to, const void *from, unsigned long size)
26543+__generic_copy_to_user_intel(void __user *to, const void *from, unsigned long size)
26544 {
26545 int d0, d1;
26546 __asm__ __volatile__(
26547@@ -110,36 +112,36 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26548 " .align 2,0x90\n"
26549 "3: movl 0(%4), %%eax\n"
26550 "4: movl 4(%4), %%edx\n"
26551- "5: movl %%eax, 0(%3)\n"
26552- "6: movl %%edx, 4(%3)\n"
26553+ "5: "__copyuser_seg" movl %%eax, 0(%3)\n"
26554+ "6: "__copyuser_seg" movl %%edx, 4(%3)\n"
26555 "7: movl 8(%4), %%eax\n"
26556 "8: movl 12(%4),%%edx\n"
26557- "9: movl %%eax, 8(%3)\n"
26558- "10: movl %%edx, 12(%3)\n"
26559+ "9: "__copyuser_seg" movl %%eax, 8(%3)\n"
26560+ "10: "__copyuser_seg" movl %%edx, 12(%3)\n"
26561 "11: movl 16(%4), %%eax\n"
26562 "12: movl 20(%4), %%edx\n"
26563- "13: movl %%eax, 16(%3)\n"
26564- "14: movl %%edx, 20(%3)\n"
26565+ "13: "__copyuser_seg" movl %%eax, 16(%3)\n"
26566+ "14: "__copyuser_seg" movl %%edx, 20(%3)\n"
26567 "15: movl 24(%4), %%eax\n"
26568 "16: movl 28(%4), %%edx\n"
26569- "17: movl %%eax, 24(%3)\n"
26570- "18: movl %%edx, 28(%3)\n"
26571+ "17: "__copyuser_seg" movl %%eax, 24(%3)\n"
26572+ "18: "__copyuser_seg" movl %%edx, 28(%3)\n"
26573 "19: movl 32(%4), %%eax\n"
26574 "20: movl 36(%4), %%edx\n"
26575- "21: movl %%eax, 32(%3)\n"
26576- "22: movl %%edx, 36(%3)\n"
26577+ "21: "__copyuser_seg" movl %%eax, 32(%3)\n"
26578+ "22: "__copyuser_seg" movl %%edx, 36(%3)\n"
26579 "23: movl 40(%4), %%eax\n"
26580 "24: movl 44(%4), %%edx\n"
26581- "25: movl %%eax, 40(%3)\n"
26582- "26: movl %%edx, 44(%3)\n"
26583+ "25: "__copyuser_seg" movl %%eax, 40(%3)\n"
26584+ "26: "__copyuser_seg" movl %%edx, 44(%3)\n"
26585 "27: movl 48(%4), %%eax\n"
26586 "28: movl 52(%4), %%edx\n"
26587- "29: movl %%eax, 48(%3)\n"
26588- "30: movl %%edx, 52(%3)\n"
26589+ "29: "__copyuser_seg" movl %%eax, 48(%3)\n"
26590+ "30: "__copyuser_seg" movl %%edx, 52(%3)\n"
26591 "31: movl 56(%4), %%eax\n"
26592 "32: movl 60(%4), %%edx\n"
26593- "33: movl %%eax, 56(%3)\n"
26594- "34: movl %%edx, 60(%3)\n"
26595+ "33: "__copyuser_seg" movl %%eax, 56(%3)\n"
26596+ "34: "__copyuser_seg" movl %%edx, 60(%3)\n"
26597 " addl $-64, %0\n"
26598 " addl $64, %4\n"
26599 " addl $64, %3\n"
26600@@ -149,10 +151,12 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26601 " shrl $2, %0\n"
26602 " andl $3, %%eax\n"
26603 " cld\n"
26604+ __COPYUSER_SET_ES
26605 "99: rep; movsl\n"
26606 "36: movl %%eax, %0\n"
26607 "37: rep; movsb\n"
26608 "100:\n"
26609+ __COPYUSER_RESTORE_ES
26610 ".section .fixup,\"ax\"\n"
26611 "101: lea 0(%%eax,%0,4),%0\n"
26612 " jmp 100b\n"
26613@@ -202,46 +206,150 @@ __copy_user_intel(void __user *to, const void *from, unsigned long size)
26614 }
26615
26616 static unsigned long
26617+__generic_copy_from_user_intel(void *to, const void __user *from, unsigned long size)
26618+{
26619+ int d0, d1;
26620+ __asm__ __volatile__(
26621+ " .align 2,0x90\n"
26622+ "1: "__copyuser_seg" movl 32(%4), %%eax\n"
26623+ " cmpl $67, %0\n"
26624+ " jbe 3f\n"
26625+ "2: "__copyuser_seg" movl 64(%4), %%eax\n"
26626+ " .align 2,0x90\n"
26627+ "3: "__copyuser_seg" movl 0(%4), %%eax\n"
26628+ "4: "__copyuser_seg" movl 4(%4), %%edx\n"
26629+ "5: movl %%eax, 0(%3)\n"
26630+ "6: movl %%edx, 4(%3)\n"
26631+ "7: "__copyuser_seg" movl 8(%4), %%eax\n"
26632+ "8: "__copyuser_seg" movl 12(%4),%%edx\n"
26633+ "9: movl %%eax, 8(%3)\n"
26634+ "10: movl %%edx, 12(%3)\n"
26635+ "11: "__copyuser_seg" movl 16(%4), %%eax\n"
26636+ "12: "__copyuser_seg" movl 20(%4), %%edx\n"
26637+ "13: movl %%eax, 16(%3)\n"
26638+ "14: movl %%edx, 20(%3)\n"
26639+ "15: "__copyuser_seg" movl 24(%4), %%eax\n"
26640+ "16: "__copyuser_seg" movl 28(%4), %%edx\n"
26641+ "17: movl %%eax, 24(%3)\n"
26642+ "18: movl %%edx, 28(%3)\n"
26643+ "19: "__copyuser_seg" movl 32(%4), %%eax\n"
26644+ "20: "__copyuser_seg" movl 36(%4), %%edx\n"
26645+ "21: movl %%eax, 32(%3)\n"
26646+ "22: movl %%edx, 36(%3)\n"
26647+ "23: "__copyuser_seg" movl 40(%4), %%eax\n"
26648+ "24: "__copyuser_seg" movl 44(%4), %%edx\n"
26649+ "25: movl %%eax, 40(%3)\n"
26650+ "26: movl %%edx, 44(%3)\n"
26651+ "27: "__copyuser_seg" movl 48(%4), %%eax\n"
26652+ "28: "__copyuser_seg" movl 52(%4), %%edx\n"
26653+ "29: movl %%eax, 48(%3)\n"
26654+ "30: movl %%edx, 52(%3)\n"
26655+ "31: "__copyuser_seg" movl 56(%4), %%eax\n"
26656+ "32: "__copyuser_seg" movl 60(%4), %%edx\n"
26657+ "33: movl %%eax, 56(%3)\n"
26658+ "34: movl %%edx, 60(%3)\n"
26659+ " addl $-64, %0\n"
26660+ " addl $64, %4\n"
26661+ " addl $64, %3\n"
26662+ " cmpl $63, %0\n"
26663+ " ja 1b\n"
26664+ "35: movl %0, %%eax\n"
26665+ " shrl $2, %0\n"
26666+ " andl $3, %%eax\n"
26667+ " cld\n"
26668+ "99: rep; "__copyuser_seg" movsl\n"
26669+ "36: movl %%eax, %0\n"
26670+ "37: rep; "__copyuser_seg" movsb\n"
26671+ "100:\n"
26672+ ".section .fixup,\"ax\"\n"
26673+ "101: lea 0(%%eax,%0,4),%0\n"
26674+ " jmp 100b\n"
26675+ ".previous\n"
26676+ _ASM_EXTABLE(1b,100b)
26677+ _ASM_EXTABLE(2b,100b)
26678+ _ASM_EXTABLE(3b,100b)
26679+ _ASM_EXTABLE(4b,100b)
26680+ _ASM_EXTABLE(5b,100b)
26681+ _ASM_EXTABLE(6b,100b)
26682+ _ASM_EXTABLE(7b,100b)
26683+ _ASM_EXTABLE(8b,100b)
26684+ _ASM_EXTABLE(9b,100b)
26685+ _ASM_EXTABLE(10b,100b)
26686+ _ASM_EXTABLE(11b,100b)
26687+ _ASM_EXTABLE(12b,100b)
26688+ _ASM_EXTABLE(13b,100b)
26689+ _ASM_EXTABLE(14b,100b)
26690+ _ASM_EXTABLE(15b,100b)
26691+ _ASM_EXTABLE(16b,100b)
26692+ _ASM_EXTABLE(17b,100b)
26693+ _ASM_EXTABLE(18b,100b)
26694+ _ASM_EXTABLE(19b,100b)
26695+ _ASM_EXTABLE(20b,100b)
26696+ _ASM_EXTABLE(21b,100b)
26697+ _ASM_EXTABLE(22b,100b)
26698+ _ASM_EXTABLE(23b,100b)
26699+ _ASM_EXTABLE(24b,100b)
26700+ _ASM_EXTABLE(25b,100b)
26701+ _ASM_EXTABLE(26b,100b)
26702+ _ASM_EXTABLE(27b,100b)
26703+ _ASM_EXTABLE(28b,100b)
26704+ _ASM_EXTABLE(29b,100b)
26705+ _ASM_EXTABLE(30b,100b)
26706+ _ASM_EXTABLE(31b,100b)
26707+ _ASM_EXTABLE(32b,100b)
26708+ _ASM_EXTABLE(33b,100b)
26709+ _ASM_EXTABLE(34b,100b)
26710+ _ASM_EXTABLE(35b,100b)
26711+ _ASM_EXTABLE(36b,100b)
26712+ _ASM_EXTABLE(37b,100b)
26713+ _ASM_EXTABLE(99b,101b)
26714+ : "=&c"(size), "=&D" (d0), "=&S" (d1)
26715+ : "1"(to), "2"(from), "0"(size)
26716+ : "eax", "edx", "memory");
26717+ return size;
26718+}
26719+
26720+static unsigned long __size_overflow(3)
26721 __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26722 {
26723 int d0, d1;
26724 __asm__ __volatile__(
26725 " .align 2,0x90\n"
26726- "0: movl 32(%4), %%eax\n"
26727+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26728 " cmpl $67, %0\n"
26729 " jbe 2f\n"
26730- "1: movl 64(%4), %%eax\n"
26731+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26732 " .align 2,0x90\n"
26733- "2: movl 0(%4), %%eax\n"
26734- "21: movl 4(%4), %%edx\n"
26735+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26736+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26737 " movl %%eax, 0(%3)\n"
26738 " movl %%edx, 4(%3)\n"
26739- "3: movl 8(%4), %%eax\n"
26740- "31: movl 12(%4),%%edx\n"
26741+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26742+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26743 " movl %%eax, 8(%3)\n"
26744 " movl %%edx, 12(%3)\n"
26745- "4: movl 16(%4), %%eax\n"
26746- "41: movl 20(%4), %%edx\n"
26747+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26748+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26749 " movl %%eax, 16(%3)\n"
26750 " movl %%edx, 20(%3)\n"
26751- "10: movl 24(%4), %%eax\n"
26752- "51: movl 28(%4), %%edx\n"
26753+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26754+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26755 " movl %%eax, 24(%3)\n"
26756 " movl %%edx, 28(%3)\n"
26757- "11: movl 32(%4), %%eax\n"
26758- "61: movl 36(%4), %%edx\n"
26759+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26760+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26761 " movl %%eax, 32(%3)\n"
26762 " movl %%edx, 36(%3)\n"
26763- "12: movl 40(%4), %%eax\n"
26764- "71: movl 44(%4), %%edx\n"
26765+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26766+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26767 " movl %%eax, 40(%3)\n"
26768 " movl %%edx, 44(%3)\n"
26769- "13: movl 48(%4), %%eax\n"
26770- "81: movl 52(%4), %%edx\n"
26771+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26772+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26773 " movl %%eax, 48(%3)\n"
26774 " movl %%edx, 52(%3)\n"
26775- "14: movl 56(%4), %%eax\n"
26776- "91: movl 60(%4), %%edx\n"
26777+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26778+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26779 " movl %%eax, 56(%3)\n"
26780 " movl %%edx, 60(%3)\n"
26781 " addl $-64, %0\n"
26782@@ -253,9 +361,9 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26783 " shrl $2, %0\n"
26784 " andl $3, %%eax\n"
26785 " cld\n"
26786- "6: rep; movsl\n"
26787+ "6: rep; "__copyuser_seg" movsl\n"
26788 " movl %%eax,%0\n"
26789- "7: rep; movsb\n"
26790+ "7: rep; "__copyuser_seg" movsb\n"
26791 "8:\n"
26792 ".section .fixup,\"ax\"\n"
26793 "9: lea 0(%%eax,%0,4),%0\n"
26794@@ -298,48 +406,48 @@ __copy_user_zeroing_intel(void *to, const void __user *from, unsigned long size)
26795 * hyoshiok@miraclelinux.com
26796 */
26797
26798-static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26799+static unsigned long __size_overflow(3) __copy_user_zeroing_intel_nocache(void *to,
26800 const void __user *from, unsigned long size)
26801 {
26802 int d0, d1;
26803
26804 __asm__ __volatile__(
26805 " .align 2,0x90\n"
26806- "0: movl 32(%4), %%eax\n"
26807+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26808 " cmpl $67, %0\n"
26809 " jbe 2f\n"
26810- "1: movl 64(%4), %%eax\n"
26811+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26812 " .align 2,0x90\n"
26813- "2: movl 0(%4), %%eax\n"
26814- "21: movl 4(%4), %%edx\n"
26815+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26816+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26817 " movnti %%eax, 0(%3)\n"
26818 " movnti %%edx, 4(%3)\n"
26819- "3: movl 8(%4), %%eax\n"
26820- "31: movl 12(%4),%%edx\n"
26821+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26822+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26823 " movnti %%eax, 8(%3)\n"
26824 " movnti %%edx, 12(%3)\n"
26825- "4: movl 16(%4), %%eax\n"
26826- "41: movl 20(%4), %%edx\n"
26827+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26828+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26829 " movnti %%eax, 16(%3)\n"
26830 " movnti %%edx, 20(%3)\n"
26831- "10: movl 24(%4), %%eax\n"
26832- "51: movl 28(%4), %%edx\n"
26833+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26834+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26835 " movnti %%eax, 24(%3)\n"
26836 " movnti %%edx, 28(%3)\n"
26837- "11: movl 32(%4), %%eax\n"
26838- "61: movl 36(%4), %%edx\n"
26839+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26840+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26841 " movnti %%eax, 32(%3)\n"
26842 " movnti %%edx, 36(%3)\n"
26843- "12: movl 40(%4), %%eax\n"
26844- "71: movl 44(%4), %%edx\n"
26845+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26846+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26847 " movnti %%eax, 40(%3)\n"
26848 " movnti %%edx, 44(%3)\n"
26849- "13: movl 48(%4), %%eax\n"
26850- "81: movl 52(%4), %%edx\n"
26851+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26852+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26853 " movnti %%eax, 48(%3)\n"
26854 " movnti %%edx, 52(%3)\n"
26855- "14: movl 56(%4), %%eax\n"
26856- "91: movl 60(%4), %%edx\n"
26857+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26858+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26859 " movnti %%eax, 56(%3)\n"
26860 " movnti %%edx, 60(%3)\n"
26861 " addl $-64, %0\n"
26862@@ -352,9 +460,9 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26863 " shrl $2, %0\n"
26864 " andl $3, %%eax\n"
26865 " cld\n"
26866- "6: rep; movsl\n"
26867+ "6: rep; "__copyuser_seg" movsl\n"
26868 " movl %%eax,%0\n"
26869- "7: rep; movsb\n"
26870+ "7: rep; "__copyuser_seg" movsb\n"
26871 "8:\n"
26872 ".section .fixup,\"ax\"\n"
26873 "9: lea 0(%%eax,%0,4),%0\n"
26874@@ -392,48 +500,48 @@ static unsigned long __copy_user_zeroing_intel_nocache(void *to,
26875 return size;
26876 }
26877
26878-static unsigned long __copy_user_intel_nocache(void *to,
26879+static unsigned long __size_overflow(3) __copy_user_intel_nocache(void *to,
26880 const void __user *from, unsigned long size)
26881 {
26882 int d0, d1;
26883
26884 __asm__ __volatile__(
26885 " .align 2,0x90\n"
26886- "0: movl 32(%4), %%eax\n"
26887+ "0: "__copyuser_seg" movl 32(%4), %%eax\n"
26888 " cmpl $67, %0\n"
26889 " jbe 2f\n"
26890- "1: movl 64(%4), %%eax\n"
26891+ "1: "__copyuser_seg" movl 64(%4), %%eax\n"
26892 " .align 2,0x90\n"
26893- "2: movl 0(%4), %%eax\n"
26894- "21: movl 4(%4), %%edx\n"
26895+ "2: "__copyuser_seg" movl 0(%4), %%eax\n"
26896+ "21: "__copyuser_seg" movl 4(%4), %%edx\n"
26897 " movnti %%eax, 0(%3)\n"
26898 " movnti %%edx, 4(%3)\n"
26899- "3: movl 8(%4), %%eax\n"
26900- "31: movl 12(%4),%%edx\n"
26901+ "3: "__copyuser_seg" movl 8(%4), %%eax\n"
26902+ "31: "__copyuser_seg" movl 12(%4),%%edx\n"
26903 " movnti %%eax, 8(%3)\n"
26904 " movnti %%edx, 12(%3)\n"
26905- "4: movl 16(%4), %%eax\n"
26906- "41: movl 20(%4), %%edx\n"
26907+ "4: "__copyuser_seg" movl 16(%4), %%eax\n"
26908+ "41: "__copyuser_seg" movl 20(%4), %%edx\n"
26909 " movnti %%eax, 16(%3)\n"
26910 " movnti %%edx, 20(%3)\n"
26911- "10: movl 24(%4), %%eax\n"
26912- "51: movl 28(%4), %%edx\n"
26913+ "10: "__copyuser_seg" movl 24(%4), %%eax\n"
26914+ "51: "__copyuser_seg" movl 28(%4), %%edx\n"
26915 " movnti %%eax, 24(%3)\n"
26916 " movnti %%edx, 28(%3)\n"
26917- "11: movl 32(%4), %%eax\n"
26918- "61: movl 36(%4), %%edx\n"
26919+ "11: "__copyuser_seg" movl 32(%4), %%eax\n"
26920+ "61: "__copyuser_seg" movl 36(%4), %%edx\n"
26921 " movnti %%eax, 32(%3)\n"
26922 " movnti %%edx, 36(%3)\n"
26923- "12: movl 40(%4), %%eax\n"
26924- "71: movl 44(%4), %%edx\n"
26925+ "12: "__copyuser_seg" movl 40(%4), %%eax\n"
26926+ "71: "__copyuser_seg" movl 44(%4), %%edx\n"
26927 " movnti %%eax, 40(%3)\n"
26928 " movnti %%edx, 44(%3)\n"
26929- "13: movl 48(%4), %%eax\n"
26930- "81: movl 52(%4), %%edx\n"
26931+ "13: "__copyuser_seg" movl 48(%4), %%eax\n"
26932+ "81: "__copyuser_seg" movl 52(%4), %%edx\n"
26933 " movnti %%eax, 48(%3)\n"
26934 " movnti %%edx, 52(%3)\n"
26935- "14: movl 56(%4), %%eax\n"
26936- "91: movl 60(%4), %%edx\n"
26937+ "14: "__copyuser_seg" movl 56(%4), %%eax\n"
26938+ "91: "__copyuser_seg" movl 60(%4), %%edx\n"
26939 " movnti %%eax, 56(%3)\n"
26940 " movnti %%edx, 60(%3)\n"
26941 " addl $-64, %0\n"
26942@@ -446,9 +554,9 @@ static unsigned long __copy_user_intel_nocache(void *to,
26943 " shrl $2, %0\n"
26944 " andl $3, %%eax\n"
26945 " cld\n"
26946- "6: rep; movsl\n"
26947+ "6: rep; "__copyuser_seg" movsl\n"
26948 " movl %%eax,%0\n"
26949- "7: rep; movsb\n"
26950+ "7: rep; "__copyuser_seg" movsb\n"
26951 "8:\n"
26952 ".section .fixup,\"ax\"\n"
26953 "9: lea 0(%%eax,%0,4),%0\n"
26954@@ -488,32 +596,36 @@ static unsigned long __copy_user_intel_nocache(void *to,
26955 */
26956 unsigned long __copy_user_zeroing_intel(void *to, const void __user *from,
26957 unsigned long size);
26958-unsigned long __copy_user_intel(void __user *to, const void *from,
26959+unsigned long __generic_copy_to_user_intel(void __user *to, const void *from,
26960+ unsigned long size);
26961+unsigned long __generic_copy_from_user_intel(void *to, const void __user *from,
26962 unsigned long size);
26963 unsigned long __copy_user_zeroing_intel_nocache(void *to,
26964 const void __user *from, unsigned long size);
26965 #endif /* CONFIG_X86_INTEL_USERCOPY */
26966
26967 /* Generic arbitrary sized copy. */
26968-#define __copy_user(to, from, size) \
26969+#define __copy_user(to, from, size, prefix, set, restore) \
26970 do { \
26971 int __d0, __d1, __d2; \
26972 __asm__ __volatile__( \
26973+ set \
26974 " cmp $7,%0\n" \
26975 " jbe 1f\n" \
26976 " movl %1,%0\n" \
26977 " negl %0\n" \
26978 " andl $7,%0\n" \
26979 " subl %0,%3\n" \
26980- "4: rep; movsb\n" \
26981+ "4: rep; "prefix"movsb\n" \
26982 " movl %3,%0\n" \
26983 " shrl $2,%0\n" \
26984 " andl $3,%3\n" \
26985 " .align 2,0x90\n" \
26986- "0: rep; movsl\n" \
26987+ "0: rep; "prefix"movsl\n" \
26988 " movl %3,%0\n" \
26989- "1: rep; movsb\n" \
26990+ "1: rep; "prefix"movsb\n" \
26991 "2:\n" \
26992+ restore \
26993 ".section .fixup,\"ax\"\n" \
26994 "5: addl %3,%0\n" \
26995 " jmp 2b\n" \
26996@@ -538,14 +650,14 @@ do { \
26997 " negl %0\n" \
26998 " andl $7,%0\n" \
26999 " subl %0,%3\n" \
27000- "4: rep; movsb\n" \
27001+ "4: rep; "__copyuser_seg"movsb\n" \
27002 " movl %3,%0\n" \
27003 " shrl $2,%0\n" \
27004 " andl $3,%3\n" \
27005 " .align 2,0x90\n" \
27006- "0: rep; movsl\n" \
27007+ "0: rep; "__copyuser_seg"movsl\n" \
27008 " movl %3,%0\n" \
27009- "1: rep; movsb\n" \
27010+ "1: rep; "__copyuser_seg"movsb\n" \
27011 "2:\n" \
27012 ".section .fixup,\"ax\"\n" \
27013 "5: addl %3,%0\n" \
27014@@ -572,9 +684,9 @@ unsigned long __copy_to_user_ll(void __user *to, const void *from,
27015 {
27016 stac();
27017 if (movsl_is_ok(to, from, n))
27018- __copy_user(to, from, n);
27019+ __copy_user(to, from, n, "", __COPYUSER_SET_ES, __COPYUSER_RESTORE_ES);
27020 else
27021- n = __copy_user_intel(to, from, n);
27022+ n = __generic_copy_to_user_intel(to, from, n);
27023 clac();
27024 return n;
27025 }
27026@@ -598,10 +710,9 @@ unsigned long __copy_from_user_ll_nozero(void *to, const void __user *from,
27027 {
27028 stac();
27029 if (movsl_is_ok(to, from, n))
27030- __copy_user(to, from, n);
27031+ __copy_user(to, from, n, __copyuser_seg, "", "");
27032 else
27033- n = __copy_user_intel((void __user *)to,
27034- (const void *)from, n);
27035+ n = __generic_copy_from_user_intel(to, from, n);
27036 clac();
27037 return n;
27038 }
27039@@ -632,60 +743,38 @@ unsigned long __copy_from_user_ll_nocache_nozero(void *to, const void __user *fr
27040 if (n > 64 && cpu_has_xmm2)
27041 n = __copy_user_intel_nocache(to, from, n);
27042 else
27043- __copy_user(to, from, n);
27044+ __copy_user(to, from, n, __copyuser_seg, "", "");
27045 #else
27046- __copy_user(to, from, n);
27047+ __copy_user(to, from, n, __copyuser_seg, "", "");
27048 #endif
27049 clac();
27050 return n;
27051 }
27052 EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
27053
27054-/**
27055- * copy_to_user: - Copy a block of data into user space.
27056- * @to: Destination address, in user space.
27057- * @from: Source address, in kernel space.
27058- * @n: Number of bytes to copy.
27059- *
27060- * Context: User context only. This function may sleep.
27061- *
27062- * Copy data from kernel space to user space.
27063- *
27064- * Returns number of bytes that could not be copied.
27065- * On success, this will be zero.
27066- */
27067-unsigned long
27068-copy_to_user(void __user *to, const void *from, unsigned long n)
27069+#ifdef CONFIG_PAX_MEMORY_UDEREF
27070+void __set_fs(mm_segment_t x)
27071 {
27072- if (access_ok(VERIFY_WRITE, to, n))
27073- n = __copy_to_user(to, from, n);
27074- return n;
27075+ switch (x.seg) {
27076+ case 0:
27077+ loadsegment(gs, 0);
27078+ break;
27079+ case TASK_SIZE_MAX:
27080+ loadsegment(gs, __USER_DS);
27081+ break;
27082+ case -1UL:
27083+ loadsegment(gs, __KERNEL_DS);
27084+ break;
27085+ default:
27086+ BUG();
27087+ }
27088 }
27089-EXPORT_SYMBOL(copy_to_user);
27090+EXPORT_SYMBOL(__set_fs);
27091
27092-/**
27093- * copy_from_user: - Copy a block of data from user space.
27094- * @to: Destination address, in kernel space.
27095- * @from: Source address, in user space.
27096- * @n: Number of bytes to copy.
27097- *
27098- * Context: User context only. This function may sleep.
27099- *
27100- * Copy data from user space to kernel space.
27101- *
27102- * Returns number of bytes that could not be copied.
27103- * On success, this will be zero.
27104- *
27105- * If some data could not be copied, this function will pad the copied
27106- * data to the requested size using zero bytes.
27107- */
27108-unsigned long
27109-_copy_from_user(void *to, const void __user *from, unsigned long n)
27110+void set_fs(mm_segment_t x)
27111 {
27112- if (access_ok(VERIFY_READ, from, n))
27113- n = __copy_from_user(to, from, n);
27114- else
27115- memset(to, 0, n);
27116- return n;
27117+ current_thread_info()->addr_limit = x;
27118+ __set_fs(x);
27119 }
27120-EXPORT_SYMBOL(_copy_from_user);
27121+EXPORT_SYMBOL(set_fs);
27122+#endif
27123diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c
27124index 906fea3..5646695 100644
27125--- a/arch/x86/lib/usercopy_64.c
27126+++ b/arch/x86/lib/usercopy_64.c
27127@@ -39,7 +39,7 @@ unsigned long __clear_user(void __user *addr, unsigned long size)
27128 _ASM_EXTABLE(0b,3b)
27129 _ASM_EXTABLE(1b,2b)
27130 : [size8] "=&c"(size), [dst] "=&D" (__d0)
27131- : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(addr),
27132+ : [size1] "r"(size & 7), "[size8]" (size / 8), "[dst]"(____m(addr)),
27133 [zero] "r" (0UL), [eight] "r" (8UL));
27134 clac();
27135 return size;
27136@@ -54,12 +54,11 @@ unsigned long clear_user(void __user *to, unsigned long n)
27137 }
27138 EXPORT_SYMBOL(clear_user);
27139
27140-unsigned long copy_in_user(void __user *to, const void __user *from, unsigned len)
27141+unsigned long copy_in_user(void __user *to, const void __user *from, unsigned long len)
27142 {
27143- if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len)) {
27144- return copy_user_generic((__force void *)to, (__force void *)from, len);
27145- }
27146- return len;
27147+ if (access_ok(VERIFY_WRITE, to, len) && access_ok(VERIFY_READ, from, len))
27148+ return copy_user_generic((void __force_kernel *)____m(to), (void __force_kernel *)____m(from), len);
27149+ return len;
27150 }
27151 EXPORT_SYMBOL(copy_in_user);
27152
27153@@ -69,7 +68,7 @@ EXPORT_SYMBOL(copy_in_user);
27154 * it is not necessary to optimize tail handling.
27155 */
27156 unsigned long
27157-copy_user_handle_tail(char *to, char *from, unsigned len, unsigned zerorest)
27158+copy_user_handle_tail(char __user *to, char __user *from, unsigned long len, unsigned zerorest)
27159 {
27160 char c;
27161 unsigned zero_len;
27162diff --git a/arch/x86/mm/extable.c b/arch/x86/mm/extable.c
27163index 903ec1e..c4166b2 100644
27164--- a/arch/x86/mm/extable.c
27165+++ b/arch/x86/mm/extable.c
27166@@ -6,12 +6,24 @@
27167 static inline unsigned long
27168 ex_insn_addr(const struct exception_table_entry *x)
27169 {
27170- return (unsigned long)&x->insn + x->insn;
27171+ unsigned long reloc = 0;
27172+
27173+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27174+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27175+#endif
27176+
27177+ return (unsigned long)&x->insn + x->insn + reloc;
27178 }
27179 static inline unsigned long
27180 ex_fixup_addr(const struct exception_table_entry *x)
27181 {
27182- return (unsigned long)&x->fixup + x->fixup;
27183+ unsigned long reloc = 0;
27184+
27185+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27186+ reloc = ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27187+#endif
27188+
27189+ return (unsigned long)&x->fixup + x->fixup + reloc;
27190 }
27191
27192 int fixup_exception(struct pt_regs *regs)
27193@@ -20,7 +32,7 @@ int fixup_exception(struct pt_regs *regs)
27194 unsigned long new_ip;
27195
27196 #ifdef CONFIG_PNPBIOS
27197- if (unlikely(SEGMENT_IS_PNP_CODE(regs->cs))) {
27198+ if (unlikely(!v8086_mode(regs) && SEGMENT_IS_PNP_CODE(regs->cs))) {
27199 extern u32 pnp_bios_fault_eip, pnp_bios_fault_esp;
27200 extern u32 pnp_bios_is_utter_crap;
27201 pnp_bios_is_utter_crap = 1;
27202@@ -145,6 +157,13 @@ void sort_extable(struct exception_table_entry *start,
27203 i += 4;
27204 p->fixup -= i;
27205 i += 4;
27206+
27207+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
27208+ BUILD_BUG_ON(!IS_ENABLED(CONFIG_BUILDTIME_EXTABLE_SORT));
27209+ p->insn -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27210+ p->fixup -= ____LOAD_PHYSICAL_ADDR - LOAD_PHYSICAL_ADDR;
27211+#endif
27212+
27213 }
27214 }
27215
27216diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c
27217index 654be4a..d36985f 100644
27218--- a/arch/x86/mm/fault.c
27219+++ b/arch/x86/mm/fault.c
27220@@ -14,11 +14,18 @@
27221 #include <linux/hugetlb.h> /* hstate_index_to_shift */
27222 #include <linux/prefetch.h> /* prefetchw */
27223 #include <linux/context_tracking.h> /* exception_enter(), ... */
27224+#include <linux/unistd.h>
27225+#include <linux/compiler.h>
27226
27227 #include <asm/traps.h> /* dotraplinkage, ... */
27228 #include <asm/pgalloc.h> /* pgd_*(), ... */
27229 #include <asm/kmemcheck.h> /* kmemcheck_*(), ... */
27230 #include <asm/fixmap.h> /* VSYSCALL_START */
27231+#include <asm/tlbflush.h>
27232+
27233+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27234+#include <asm/stacktrace.h>
27235+#endif
27236
27237 /*
27238 * Page fault error code bits:
27239@@ -56,7 +63,7 @@ static inline int __kprobes notify_page_fault(struct pt_regs *regs)
27240 int ret = 0;
27241
27242 /* kprobe_running() needs smp_processor_id() */
27243- if (kprobes_built_in() && !user_mode_vm(regs)) {
27244+ if (kprobes_built_in() && !user_mode(regs)) {
27245 preempt_disable();
27246 if (kprobe_running() && kprobe_fault_handler(regs, 14))
27247 ret = 1;
27248@@ -117,7 +124,10 @@ check_prefetch_opcode(struct pt_regs *regs, unsigned char *instr,
27249 return !instr_lo || (instr_lo>>1) == 1;
27250 case 0x00:
27251 /* Prefetch instruction is 0x0F0D or 0x0F18 */
27252- if (probe_kernel_address(instr, opcode))
27253+ if (user_mode(regs)) {
27254+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27255+ return 0;
27256+ } else if (probe_kernel_address(instr, opcode))
27257 return 0;
27258
27259 *prefetch = (instr_lo == 0xF) &&
27260@@ -151,7 +161,10 @@ is_prefetch(struct pt_regs *regs, unsigned long error_code, unsigned long addr)
27261 while (instr < max_instr) {
27262 unsigned char opcode;
27263
27264- if (probe_kernel_address(instr, opcode))
27265+ if (user_mode(regs)) {
27266+ if (__copy_from_user_inatomic(&opcode, (unsigned char __force_user *)(instr), 1))
27267+ break;
27268+ } else if (probe_kernel_address(instr, opcode))
27269 break;
27270
27271 instr++;
27272@@ -182,6 +195,34 @@ force_sig_info_fault(int si_signo, int si_code, unsigned long address,
27273 force_sig_info(si_signo, &info, tsk);
27274 }
27275
27276+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27277+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address);
27278+#endif
27279+
27280+#ifdef CONFIG_PAX_EMUTRAMP
27281+static int pax_handle_fetch_fault(struct pt_regs *regs);
27282+#endif
27283+
27284+#ifdef CONFIG_PAX_PAGEEXEC
27285+static inline pmd_t * pax_get_pmd(struct mm_struct *mm, unsigned long address)
27286+{
27287+ pgd_t *pgd;
27288+ pud_t *pud;
27289+ pmd_t *pmd;
27290+
27291+ pgd = pgd_offset(mm, address);
27292+ if (!pgd_present(*pgd))
27293+ return NULL;
27294+ pud = pud_offset(pgd, address);
27295+ if (!pud_present(*pud))
27296+ return NULL;
27297+ pmd = pmd_offset(pud, address);
27298+ if (!pmd_present(*pmd))
27299+ return NULL;
27300+ return pmd;
27301+}
27302+#endif
27303+
27304 DEFINE_SPINLOCK(pgd_lock);
27305 LIST_HEAD(pgd_list);
27306
27307@@ -232,10 +273,22 @@ void vmalloc_sync_all(void)
27308 for (address = VMALLOC_START & PMD_MASK;
27309 address >= TASK_SIZE && address < FIXADDR_TOP;
27310 address += PMD_SIZE) {
27311+
27312+#ifdef CONFIG_PAX_PER_CPU_PGD
27313+ unsigned long cpu;
27314+#else
27315 struct page *page;
27316+#endif
27317
27318 spin_lock(&pgd_lock);
27319+
27320+#ifdef CONFIG_PAX_PER_CPU_PGD
27321+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
27322+ pgd_t *pgd = get_cpu_pgd(cpu);
27323+ pmd_t *ret;
27324+#else
27325 list_for_each_entry(page, &pgd_list, lru) {
27326+ pgd_t *pgd;
27327 spinlock_t *pgt_lock;
27328 pmd_t *ret;
27329
27330@@ -243,8 +296,14 @@ void vmalloc_sync_all(void)
27331 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
27332
27333 spin_lock(pgt_lock);
27334- ret = vmalloc_sync_one(page_address(page), address);
27335+ pgd = page_address(page);
27336+#endif
27337+
27338+ ret = vmalloc_sync_one(pgd, address);
27339+
27340+#ifndef CONFIG_PAX_PER_CPU_PGD
27341 spin_unlock(pgt_lock);
27342+#endif
27343
27344 if (!ret)
27345 break;
27346@@ -278,6 +337,11 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27347 * an interrupt in the middle of a task switch..
27348 */
27349 pgd_paddr = read_cr3();
27350+
27351+#ifdef CONFIG_PAX_PER_CPU_PGD
27352+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (pgd_paddr & PHYSICAL_PAGE_MASK));
27353+#endif
27354+
27355 pmd_k = vmalloc_sync_one(__va(pgd_paddr), address);
27356 if (!pmd_k)
27357 return -1;
27358@@ -373,7 +437,14 @@ static noinline __kprobes int vmalloc_fault(unsigned long address)
27359 * happen within a race in page table update. In the later
27360 * case just flush:
27361 */
27362+
27363+#ifdef CONFIG_PAX_PER_CPU_PGD
27364+ BUG_ON(__pa(get_cpu_pgd(smp_processor_id())) != (read_cr3() & PHYSICAL_PAGE_MASK));
27365+ pgd = pgd_offset_cpu(smp_processor_id(), address);
27366+#else
27367 pgd = pgd_offset(current->active_mm, address);
27368+#endif
27369+
27370 pgd_ref = pgd_offset_k(address);
27371 if (pgd_none(*pgd_ref))
27372 return -1;
27373@@ -543,7 +614,7 @@ static int is_errata93(struct pt_regs *regs, unsigned long address)
27374 static int is_errata100(struct pt_regs *regs, unsigned long address)
27375 {
27376 #ifdef CONFIG_X86_64
27377- if ((regs->cs == __USER32_CS || (regs->cs & (1<<2))) && (address >> 32))
27378+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)) && (address >> 32))
27379 return 1;
27380 #endif
27381 return 0;
27382@@ -570,7 +641,7 @@ static int is_f00f_bug(struct pt_regs *regs, unsigned long address)
27383 }
27384
27385 static const char nx_warning[] = KERN_CRIT
27386-"kernel tried to execute NX-protected page - exploit attempt? (uid: %d)\n";
27387+"kernel tried to execute NX-protected page - exploit attempt? (uid: %d, task: %s, pid: %d)\n";
27388
27389 static void
27390 show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27391@@ -579,15 +650,27 @@ show_fault_oops(struct pt_regs *regs, unsigned long error_code,
27392 if (!oops_may_print())
27393 return;
27394
27395- if (error_code & PF_INSTR) {
27396+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR)) {
27397 unsigned int level;
27398
27399 pte_t *pte = lookup_address(address, &level);
27400
27401 if (pte && pte_present(*pte) && !pte_exec(*pte))
27402- printk(nx_warning, from_kuid(&init_user_ns, current_uid()));
27403+ printk(nx_warning, from_kuid_munged(&init_user_ns, current_uid()), current->comm, task_pid_nr(current));
27404 }
27405
27406+#ifdef CONFIG_PAX_KERNEXEC
27407+ if (init_mm.start_code <= address && address < init_mm.end_code) {
27408+ if (current->signal->curr_ip)
27409+ printk(KERN_ERR "PAX: From %pI4: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n",
27410+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
27411+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27412+ else
27413+ printk(KERN_ERR "PAX: %s:%d, uid/euid: %u/%u, attempted to modify kernel code\n", current->comm, task_pid_nr(current),
27414+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
27415+ }
27416+#endif
27417+
27418 printk(KERN_ALERT "BUG: unable to handle kernel ");
27419 if (address < PAGE_SIZE)
27420 printk(KERN_CONT "NULL pointer dereference");
27421@@ -750,6 +833,22 @@ __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
27422 return;
27423 }
27424 #endif
27425+
27426+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27427+ if (pax_is_fetch_fault(regs, error_code, address)) {
27428+
27429+#ifdef CONFIG_PAX_EMUTRAMP
27430+ switch (pax_handle_fetch_fault(regs)) {
27431+ case 2:
27432+ return;
27433+ }
27434+#endif
27435+
27436+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27437+ do_group_exit(SIGKILL);
27438+ }
27439+#endif
27440+
27441 /* Kernel addresses are always protection faults: */
27442 if (address >= TASK_SIZE)
27443 error_code |= PF_PROT;
27444@@ -835,7 +934,7 @@ do_sigbus(struct pt_regs *regs, unsigned long error_code, unsigned long address,
27445 if (fault & (VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
27446 printk(KERN_ERR
27447 "MCE: Killing %s:%d due to hardware memory corruption fault at %lx\n",
27448- tsk->comm, tsk->pid, address);
27449+ tsk->comm, task_pid_nr(tsk), address);
27450 code = BUS_MCEERR_AR;
27451 }
27452 #endif
27453@@ -898,6 +997,99 @@ static int spurious_fault_check(unsigned long error_code, pte_t *pte)
27454 return 1;
27455 }
27456
27457+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27458+static int pax_handle_pageexec_fault(struct pt_regs *regs, struct mm_struct *mm, unsigned long address, unsigned long error_code)
27459+{
27460+ pte_t *pte;
27461+ pmd_t *pmd;
27462+ spinlock_t *ptl;
27463+ unsigned char pte_mask;
27464+
27465+ if ((__supported_pte_mask & _PAGE_NX) || (error_code & (PF_PROT|PF_USER)) != (PF_PROT|PF_USER) || v8086_mode(regs) ||
27466+ !(mm->pax_flags & MF_PAX_PAGEEXEC))
27467+ return 0;
27468+
27469+ /* PaX: it's our fault, let's handle it if we can */
27470+
27471+ /* PaX: take a look at read faults before acquiring any locks */
27472+ if (unlikely(!(error_code & PF_WRITE) && (regs->ip == address))) {
27473+ /* instruction fetch attempt from a protected page in user mode */
27474+ up_read(&mm->mmap_sem);
27475+
27476+#ifdef CONFIG_PAX_EMUTRAMP
27477+ switch (pax_handle_fetch_fault(regs)) {
27478+ case 2:
27479+ return 1;
27480+ }
27481+#endif
27482+
27483+ pax_report_fault(regs, (void *)regs->ip, (void *)regs->sp);
27484+ do_group_exit(SIGKILL);
27485+ }
27486+
27487+ pmd = pax_get_pmd(mm, address);
27488+ if (unlikely(!pmd))
27489+ return 0;
27490+
27491+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
27492+ if (unlikely(!(pte_val(*pte) & _PAGE_PRESENT) || pte_user(*pte))) {
27493+ pte_unmap_unlock(pte, ptl);
27494+ return 0;
27495+ }
27496+
27497+ if (unlikely((error_code & PF_WRITE) && !pte_write(*pte))) {
27498+ /* write attempt to a protected page in user mode */
27499+ pte_unmap_unlock(pte, ptl);
27500+ return 0;
27501+ }
27502+
27503+#ifdef CONFIG_SMP
27504+ if (likely(address > get_limit(regs->cs) && cpu_isset(smp_processor_id(), mm->context.cpu_user_cs_mask)))
27505+#else
27506+ if (likely(address > get_limit(regs->cs)))
27507+#endif
27508+ {
27509+ set_pte(pte, pte_mkread(*pte));
27510+ __flush_tlb_one(address);
27511+ pte_unmap_unlock(pte, ptl);
27512+ up_read(&mm->mmap_sem);
27513+ return 1;
27514+ }
27515+
27516+ pte_mask = _PAGE_ACCESSED | _PAGE_USER | ((error_code & PF_WRITE) << (_PAGE_BIT_DIRTY-1));
27517+
27518+ /*
27519+ * PaX: fill DTLB with user rights and retry
27520+ */
27521+ __asm__ __volatile__ (
27522+ "orb %2,(%1)\n"
27523+#if defined(CONFIG_M586) || defined(CONFIG_M586TSC)
27524+/*
27525+ * PaX: let this uncommented 'invlpg' remind us on the behaviour of Intel's
27526+ * (and AMD's) TLBs. namely, they do not cache PTEs that would raise *any*
27527+ * page fault when examined during a TLB load attempt. this is true not only
27528+ * for PTEs holding a non-present entry but also present entries that will
27529+ * raise a page fault (such as those set up by PaX, or the copy-on-write
27530+ * mechanism). in effect it means that we do *not* need to flush the TLBs
27531+ * for our target pages since their PTEs are simply not in the TLBs at all.
27532+
27533+ * the best thing in omitting it is that we gain around 15-20% speed in the
27534+ * fast path of the page fault handler and can get rid of tracing since we
27535+ * can no longer flush unintended entries.
27536+ */
27537+ "invlpg (%0)\n"
27538+#endif
27539+ __copyuser_seg"testb $0,(%0)\n"
27540+ "xorb %3,(%1)\n"
27541+ :
27542+ : "r" (address), "r" (pte), "q" (pte_mask), "i" (_PAGE_USER)
27543+ : "memory", "cc");
27544+ pte_unmap_unlock(pte, ptl);
27545+ up_read(&mm->mmap_sem);
27546+ return 1;
27547+}
27548+#endif
27549+
27550 /*
27551 * Handle a spurious fault caused by a stale TLB entry.
27552 *
27553@@ -964,6 +1156,9 @@ int show_unhandled_signals = 1;
27554 static inline int
27555 access_error(unsigned long error_code, struct vm_area_struct *vma)
27556 {
27557+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR) && !(vma->vm_flags & VM_EXEC))
27558+ return 1;
27559+
27560 if (error_code & PF_WRITE) {
27561 /* write, present and write, not present: */
27562 if (unlikely(!(vma->vm_flags & VM_WRITE)))
27563@@ -992,7 +1187,7 @@ static inline bool smap_violation(int error_code, struct pt_regs *regs)
27564 if (error_code & PF_USER)
27565 return false;
27566
27567- if (!user_mode_vm(regs) && (regs->flags & X86_EFLAGS_AC))
27568+ if (!user_mode(regs) && (regs->flags & X86_EFLAGS_AC))
27569 return false;
27570
27571 return true;
27572@@ -1008,18 +1203,33 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27573 {
27574 struct vm_area_struct *vma;
27575 struct task_struct *tsk;
27576- unsigned long address;
27577 struct mm_struct *mm;
27578 int fault;
27579 int write = error_code & PF_WRITE;
27580 unsigned int flags = FAULT_FLAG_ALLOW_RETRY | FAULT_FLAG_KILLABLE |
27581 (write ? FAULT_FLAG_WRITE : 0);
27582
27583- tsk = current;
27584- mm = tsk->mm;
27585-
27586 /* Get the faulting address: */
27587- address = read_cr2();
27588+ unsigned long address = read_cr2();
27589+
27590+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
27591+ if (!user_mode(regs) && address < 2 * pax_user_shadow_base) {
27592+ if (!search_exception_tables(regs->ip)) {
27593+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27594+ bad_area_nosemaphore(regs, error_code, address);
27595+ return;
27596+ }
27597+ if (address < pax_user_shadow_base) {
27598+ printk(KERN_ERR "PAX: please report this to pageexec@freemail.hu\n");
27599+ printk(KERN_ERR "PAX: faulting IP: %pS\n", (void *)regs->ip);
27600+ show_trace_log_lvl(NULL, NULL, (void *)regs->sp, regs->bp, KERN_ERR);
27601+ } else
27602+ address -= pax_user_shadow_base;
27603+ }
27604+#endif
27605+
27606+ tsk = current;
27607+ mm = tsk->mm;
27608
27609 /*
27610 * Detect and handle instructions that would cause a page fault for
27611@@ -1080,7 +1290,7 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code)
27612 * User-mode registers count as a user access even for any
27613 * potential system fault or CPU buglet:
27614 */
27615- if (user_mode_vm(regs)) {
27616+ if (user_mode(regs)) {
27617 local_irq_enable();
27618 error_code |= PF_USER;
27619 } else {
27620@@ -1142,6 +1352,11 @@ retry:
27621 might_sleep();
27622 }
27623
27624+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_PAGEEXEC)
27625+ if (pax_handle_pageexec_fault(regs, mm, address, error_code))
27626+ return;
27627+#endif
27628+
27629 vma = find_vma(mm, address);
27630 if (unlikely(!vma)) {
27631 bad_area(regs, error_code, address);
27632@@ -1153,18 +1368,24 @@ retry:
27633 bad_area(regs, error_code, address);
27634 return;
27635 }
27636- if (error_code & PF_USER) {
27637- /*
27638- * Accessing the stack below %sp is always a bug.
27639- * The large cushion allows instructions like enter
27640- * and pusha to work. ("enter $65535, $31" pushes
27641- * 32 pointers and then decrements %sp by 65535.)
27642- */
27643- if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < regs->sp)) {
27644- bad_area(regs, error_code, address);
27645- return;
27646- }
27647+ /*
27648+ * Accessing the stack below %sp is always a bug.
27649+ * The large cushion allows instructions like enter
27650+ * and pusha to work. ("enter $65535, $31" pushes
27651+ * 32 pointers and then decrements %sp by 65535.)
27652+ */
27653+ if (unlikely(address + 65536 + 32 * sizeof(unsigned long) < task_pt_regs(tsk)->sp)) {
27654+ bad_area(regs, error_code, address);
27655+ return;
27656 }
27657+
27658+#ifdef CONFIG_PAX_SEGMEXEC
27659+ if (unlikely((mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end - SEGMEXEC_TASK_SIZE - 1 < address - SEGMEXEC_TASK_SIZE - 1)) {
27660+ bad_area(regs, error_code, address);
27661+ return;
27662+ }
27663+#endif
27664+
27665 if (unlikely(expand_stack(vma, address))) {
27666 bad_area(regs, error_code, address);
27667 return;
27668@@ -1230,3 +1451,292 @@ do_page_fault(struct pt_regs *regs, unsigned long error_code)
27669 __do_page_fault(regs, error_code);
27670 exception_exit(prev_state);
27671 }
27672+
27673+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27674+static bool pax_is_fetch_fault(struct pt_regs *regs, unsigned long error_code, unsigned long address)
27675+{
27676+ struct mm_struct *mm = current->mm;
27677+ unsigned long ip = regs->ip;
27678+
27679+ if (v8086_mode(regs))
27680+ ip = ((regs->cs & 0xffff) << 4) + (ip & 0xffff);
27681+
27682+#ifdef CONFIG_PAX_PAGEEXEC
27683+ if (mm->pax_flags & MF_PAX_PAGEEXEC) {
27684+ if ((__supported_pte_mask & _PAGE_NX) && (error_code & PF_INSTR))
27685+ return true;
27686+ if (!(error_code & (PF_PROT | PF_WRITE)) && ip == address)
27687+ return true;
27688+ return false;
27689+ }
27690+#endif
27691+
27692+#ifdef CONFIG_PAX_SEGMEXEC
27693+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
27694+ if (!(error_code & (PF_PROT | PF_WRITE)) && (ip + SEGMEXEC_TASK_SIZE == address))
27695+ return true;
27696+ return false;
27697+ }
27698+#endif
27699+
27700+ return false;
27701+}
27702+#endif
27703+
27704+#ifdef CONFIG_PAX_EMUTRAMP
27705+static int pax_handle_fetch_fault_32(struct pt_regs *regs)
27706+{
27707+ int err;
27708+
27709+ do { /* PaX: libffi trampoline emulation */
27710+ unsigned char mov, jmp;
27711+ unsigned int addr1, addr2;
27712+
27713+#ifdef CONFIG_X86_64
27714+ if ((regs->ip + 9) >> 32)
27715+ break;
27716+#endif
27717+
27718+ err = get_user(mov, (unsigned char __user *)regs->ip);
27719+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27720+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27721+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27722+
27723+ if (err)
27724+ break;
27725+
27726+ if (mov == 0xB8 && jmp == 0xE9) {
27727+ regs->ax = addr1;
27728+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27729+ return 2;
27730+ }
27731+ } while (0);
27732+
27733+ do { /* PaX: gcc trampoline emulation #1 */
27734+ unsigned char mov1, mov2;
27735+ unsigned short jmp;
27736+ unsigned int addr1, addr2;
27737+
27738+#ifdef CONFIG_X86_64
27739+ if ((regs->ip + 11) >> 32)
27740+ break;
27741+#endif
27742+
27743+ err = get_user(mov1, (unsigned char __user *)regs->ip);
27744+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27745+ err |= get_user(mov2, (unsigned char __user *)(regs->ip + 5));
27746+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27747+ err |= get_user(jmp, (unsigned short __user *)(regs->ip + 10));
27748+
27749+ if (err)
27750+ break;
27751+
27752+ if (mov1 == 0xB9 && mov2 == 0xB8 && jmp == 0xE0FF) {
27753+ regs->cx = addr1;
27754+ regs->ax = addr2;
27755+ regs->ip = addr2;
27756+ return 2;
27757+ }
27758+ } while (0);
27759+
27760+ do { /* PaX: gcc trampoline emulation #2 */
27761+ unsigned char mov, jmp;
27762+ unsigned int addr1, addr2;
27763+
27764+#ifdef CONFIG_X86_64
27765+ if ((regs->ip + 9) >> 32)
27766+ break;
27767+#endif
27768+
27769+ err = get_user(mov, (unsigned char __user *)regs->ip);
27770+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 1));
27771+ err |= get_user(jmp, (unsigned char __user *)(regs->ip + 5));
27772+ err |= get_user(addr2, (unsigned int __user *)(regs->ip + 6));
27773+
27774+ if (err)
27775+ break;
27776+
27777+ if (mov == 0xB9 && jmp == 0xE9) {
27778+ regs->cx = addr1;
27779+ regs->ip = (unsigned int)(regs->ip + addr2 + 10);
27780+ return 2;
27781+ }
27782+ } while (0);
27783+
27784+ return 1; /* PaX in action */
27785+}
27786+
27787+#ifdef CONFIG_X86_64
27788+static int pax_handle_fetch_fault_64(struct pt_regs *regs)
27789+{
27790+ int err;
27791+
27792+ do { /* PaX: libffi trampoline emulation */
27793+ unsigned short mov1, mov2, jmp1;
27794+ unsigned char stcclc, jmp2;
27795+ unsigned long addr1, addr2;
27796+
27797+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27798+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27799+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27800+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27801+ err |= get_user(stcclc, (unsigned char __user *)(regs->ip + 20));
27802+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 21));
27803+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 23));
27804+
27805+ if (err)
27806+ break;
27807+
27808+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && (stcclc == 0xF8 || stcclc == 0xF9) && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27809+ regs->r11 = addr1;
27810+ regs->r10 = addr2;
27811+ if (stcclc == 0xF8)
27812+ regs->flags &= ~X86_EFLAGS_CF;
27813+ else
27814+ regs->flags |= X86_EFLAGS_CF;
27815+ regs->ip = addr1;
27816+ return 2;
27817+ }
27818+ } while (0);
27819+
27820+ do { /* PaX: gcc trampoline emulation #1 */
27821+ unsigned short mov1, mov2, jmp1;
27822+ unsigned char jmp2;
27823+ unsigned int addr1;
27824+ unsigned long addr2;
27825+
27826+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27827+ err |= get_user(addr1, (unsigned int __user *)(regs->ip + 2));
27828+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 6));
27829+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 8));
27830+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 16));
27831+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 18));
27832+
27833+ if (err)
27834+ break;
27835+
27836+ if (mov1 == 0xBB41 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27837+ regs->r11 = addr1;
27838+ regs->r10 = addr2;
27839+ regs->ip = addr1;
27840+ return 2;
27841+ }
27842+ } while (0);
27843+
27844+ do { /* PaX: gcc trampoline emulation #2 */
27845+ unsigned short mov1, mov2, jmp1;
27846+ unsigned char jmp2;
27847+ unsigned long addr1, addr2;
27848+
27849+ err = get_user(mov1, (unsigned short __user *)regs->ip);
27850+ err |= get_user(addr1, (unsigned long __user *)(regs->ip + 2));
27851+ err |= get_user(mov2, (unsigned short __user *)(regs->ip + 10));
27852+ err |= get_user(addr2, (unsigned long __user *)(regs->ip + 12));
27853+ err |= get_user(jmp1, (unsigned short __user *)(regs->ip + 20));
27854+ err |= get_user(jmp2, (unsigned char __user *)(regs->ip + 22));
27855+
27856+ if (err)
27857+ break;
27858+
27859+ if (mov1 == 0xBB49 && mov2 == 0xBA49 && jmp1 == 0xFF49 && jmp2 == 0xE3) {
27860+ regs->r11 = addr1;
27861+ regs->r10 = addr2;
27862+ regs->ip = addr1;
27863+ return 2;
27864+ }
27865+ } while (0);
27866+
27867+ return 1; /* PaX in action */
27868+}
27869+#endif
27870+
27871+/*
27872+ * PaX: decide what to do with offenders (regs->ip = fault address)
27873+ *
27874+ * returns 1 when task should be killed
27875+ * 2 when gcc trampoline was detected
27876+ */
27877+static int pax_handle_fetch_fault(struct pt_regs *regs)
27878+{
27879+ if (v8086_mode(regs))
27880+ return 1;
27881+
27882+ if (!(current->mm->pax_flags & MF_PAX_EMUTRAMP))
27883+ return 1;
27884+
27885+#ifdef CONFIG_X86_32
27886+ return pax_handle_fetch_fault_32(regs);
27887+#else
27888+ if (regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))
27889+ return pax_handle_fetch_fault_32(regs);
27890+ else
27891+ return pax_handle_fetch_fault_64(regs);
27892+#endif
27893+}
27894+#endif
27895+
27896+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
27897+void pax_report_insns(struct pt_regs *regs, void *pc, void *sp)
27898+{
27899+ long i;
27900+
27901+ printk(KERN_ERR "PAX: bytes at PC: ");
27902+ for (i = 0; i < 20; i++) {
27903+ unsigned char c;
27904+ if (get_user(c, (unsigned char __force_user *)pc+i))
27905+ printk(KERN_CONT "?? ");
27906+ else
27907+ printk(KERN_CONT "%02x ", c);
27908+ }
27909+ printk("\n");
27910+
27911+ printk(KERN_ERR "PAX: bytes at SP-%lu: ", (unsigned long)sizeof(long));
27912+ for (i = -1; i < 80 / (long)sizeof(long); i++) {
27913+ unsigned long c;
27914+ if (get_user(c, (unsigned long __force_user *)sp+i)) {
27915+#ifdef CONFIG_X86_32
27916+ printk(KERN_CONT "???????? ");
27917+#else
27918+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT)))
27919+ printk(KERN_CONT "???????? ???????? ");
27920+ else
27921+ printk(KERN_CONT "???????????????? ");
27922+#endif
27923+ } else {
27924+#ifdef CONFIG_X86_64
27925+ if ((regs->cs == __USER32_CS || (regs->cs & SEGMENT_LDT))) {
27926+ printk(KERN_CONT "%08x ", (unsigned int)c);
27927+ printk(KERN_CONT "%08x ", (unsigned int)(c >> 32));
27928+ } else
27929+#endif
27930+ printk(KERN_CONT "%0*lx ", 2 * (int)sizeof(long), c);
27931+ }
27932+ }
27933+ printk("\n");
27934+}
27935+#endif
27936+
27937+/**
27938+ * probe_kernel_write(): safely attempt to write to a location
27939+ * @dst: address to write to
27940+ * @src: pointer to the data that shall be written
27941+ * @size: size of the data chunk
27942+ *
27943+ * Safely write to address @dst from the buffer at @src. If a kernel fault
27944+ * happens, handle that and return -EFAULT.
27945+ */
27946+long notrace probe_kernel_write(void *dst, const void *src, size_t size)
27947+{
27948+ long ret;
27949+ mm_segment_t old_fs = get_fs();
27950+
27951+ set_fs(KERNEL_DS);
27952+ pagefault_disable();
27953+ pax_open_kernel();
27954+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
27955+ pax_close_kernel();
27956+ pagefault_enable();
27957+ set_fs(old_fs);
27958+
27959+ return ret ? -EFAULT : 0;
27960+}
27961diff --git a/arch/x86/mm/gup.c b/arch/x86/mm/gup.c
27962index dd74e46..7d26398 100644
27963--- a/arch/x86/mm/gup.c
27964+++ b/arch/x86/mm/gup.c
27965@@ -255,7 +255,7 @@ int __get_user_pages_fast(unsigned long start, int nr_pages, int write,
27966 addr = start;
27967 len = (unsigned long) nr_pages << PAGE_SHIFT;
27968 end = start + len;
27969- if (unlikely(!access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27970+ if (unlikely(!__access_ok(write ? VERIFY_WRITE : VERIFY_READ,
27971 (void __user *)start, len)))
27972 return 0;
27973
27974diff --git a/arch/x86/mm/highmem_32.c b/arch/x86/mm/highmem_32.c
27975index 252b8f5..4dcfdc1 100644
27976--- a/arch/x86/mm/highmem_32.c
27977+++ b/arch/x86/mm/highmem_32.c
27978@@ -44,7 +44,11 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
27979 idx = type + KM_TYPE_NR*smp_processor_id();
27980 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
27981 BUG_ON(!pte_none(*(kmap_pte-idx)));
27982+
27983+ pax_open_kernel();
27984 set_pte(kmap_pte-idx, mk_pte(page, prot));
27985+ pax_close_kernel();
27986+
27987 arch_flush_lazy_mmu_mode();
27988
27989 return (void *)vaddr;
27990diff --git a/arch/x86/mm/hugetlbpage.c b/arch/x86/mm/hugetlbpage.c
27991index ae1aa71..d9bea75 100644
27992--- a/arch/x86/mm/hugetlbpage.c
27993+++ b/arch/x86/mm/hugetlbpage.c
27994@@ -271,23 +271,30 @@ follow_huge_pud(struct mm_struct *mm, unsigned long address,
27995 #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA
27996 static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file,
27997 unsigned long addr, unsigned long len,
27998- unsigned long pgoff, unsigned long flags)
27999+ unsigned long pgoff, unsigned long flags, unsigned long offset)
28000 {
28001 struct hstate *h = hstate_file(file);
28002 struct vm_unmapped_area_info info;
28003-
28004+
28005 info.flags = 0;
28006 info.length = len;
28007 info.low_limit = TASK_UNMAPPED_BASE;
28008+
28009+#ifdef CONFIG_PAX_RANDMMAP
28010+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28011+ info.low_limit += current->mm->delta_mmap;
28012+#endif
28013+
28014 info.high_limit = TASK_SIZE;
28015 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28016 info.align_offset = 0;
28017+ info.threadstack_offset = offset;
28018 return vm_unmapped_area(&info);
28019 }
28020
28021 static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28022 unsigned long addr0, unsigned long len,
28023- unsigned long pgoff, unsigned long flags)
28024+ unsigned long pgoff, unsigned long flags, unsigned long offset)
28025 {
28026 struct hstate *h = hstate_file(file);
28027 struct vm_unmapped_area_info info;
28028@@ -299,6 +306,7 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28029 info.high_limit = current->mm->mmap_base;
28030 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
28031 info.align_offset = 0;
28032+ info.threadstack_offset = offset;
28033 addr = vm_unmapped_area(&info);
28034
28035 /*
28036@@ -311,6 +319,12 @@ static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file,
28037 VM_BUG_ON(addr != -ENOMEM);
28038 info.flags = 0;
28039 info.low_limit = TASK_UNMAPPED_BASE;
28040+
28041+#ifdef CONFIG_PAX_RANDMMAP
28042+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
28043+ info.low_limit += current->mm->delta_mmap;
28044+#endif
28045+
28046 info.high_limit = TASK_SIZE;
28047 addr = vm_unmapped_area(&info);
28048 }
28049@@ -325,10 +339,20 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28050 struct hstate *h = hstate_file(file);
28051 struct mm_struct *mm = current->mm;
28052 struct vm_area_struct *vma;
28053+ unsigned long pax_task_size = TASK_SIZE;
28054+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
28055
28056 if (len & ~huge_page_mask(h))
28057 return -EINVAL;
28058- if (len > TASK_SIZE)
28059+
28060+#ifdef CONFIG_PAX_SEGMEXEC
28061+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28062+ pax_task_size = SEGMEXEC_TASK_SIZE;
28063+#endif
28064+
28065+ pax_task_size -= PAGE_SIZE;
28066+
28067+ if (len > pax_task_size)
28068 return -ENOMEM;
28069
28070 if (flags & MAP_FIXED) {
28071@@ -337,19 +361,22 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
28072 return addr;
28073 }
28074
28075+#ifdef CONFIG_PAX_RANDMMAP
28076+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
28077+#endif
28078+
28079 if (addr) {
28080 addr = ALIGN(addr, huge_page_size(h));
28081 vma = find_vma(mm, addr);
28082- if (TASK_SIZE - len >= addr &&
28083- (!vma || addr + len <= vma->vm_start))
28084+ if (pax_task_size - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
28085 return addr;
28086 }
28087 if (mm->get_unmapped_area == arch_get_unmapped_area)
28088 return hugetlb_get_unmapped_area_bottomup(file, addr, len,
28089- pgoff, flags);
28090+ pgoff, flags, offset);
28091 else
28092 return hugetlb_get_unmapped_area_topdown(file, addr, len,
28093- pgoff, flags);
28094+ pgoff, flags, offset);
28095 }
28096
28097 #endif /*HAVE_ARCH_HUGETLB_UNMAPPED_AREA*/
28098diff --git a/arch/x86/mm/init.c b/arch/x86/mm/init.c
28099index 1f34e92..d252637 100644
28100--- a/arch/x86/mm/init.c
28101+++ b/arch/x86/mm/init.c
28102@@ -4,6 +4,7 @@
28103 #include <linux/swap.h>
28104 #include <linux/memblock.h>
28105 #include <linux/bootmem.h> /* for max_low_pfn */
28106+#include <linux/tboot.h>
28107
28108 #include <asm/cacheflush.h>
28109 #include <asm/e820.h>
28110@@ -17,6 +18,8 @@
28111 #include <asm/proto.h>
28112 #include <asm/dma.h> /* for MAX_DMA_PFN */
28113 #include <asm/microcode.h>
28114+#include <asm/desc.h>
28115+#include <asm/bios_ebda.h>
28116
28117 #include "mm_internal.h"
28118
28119@@ -465,7 +468,15 @@ void __init init_mem_mapping(void)
28120 early_ioremap_page_table_range_init();
28121 #endif
28122
28123+#ifdef CONFIG_PAX_PER_CPU_PGD
28124+ clone_pgd_range(get_cpu_pgd(0) + KERNEL_PGD_BOUNDARY,
28125+ swapper_pg_dir + KERNEL_PGD_BOUNDARY,
28126+ KERNEL_PGD_PTRS);
28127+ load_cr3(get_cpu_pgd(0));
28128+#else
28129 load_cr3(swapper_pg_dir);
28130+#endif
28131+
28132 __flush_tlb_all();
28133
28134 early_memtest(0, max_pfn_mapped << PAGE_SHIFT);
28135@@ -481,10 +492,40 @@ void __init init_mem_mapping(void)
28136 * Access has to be given to non-kernel-ram areas as well, these contain the PCI
28137 * mmio resources as well as potential bios/acpi data regions.
28138 */
28139+
28140+#ifdef CONFIG_GRKERNSEC_KMEM
28141+static unsigned int ebda_start __read_only;
28142+static unsigned int ebda_end __read_only;
28143+#endif
28144+
28145 int devmem_is_allowed(unsigned long pagenr)
28146 {
28147- if (pagenr < 256)
28148+#ifdef CONFIG_GRKERNSEC_KMEM
28149+ /* allow BDA */
28150+ if (!pagenr)
28151 return 1;
28152+ /* allow EBDA */
28153+ if (pagenr >= ebda_start && pagenr < ebda_end)
28154+ return 1;
28155+ /* if tboot is in use, allow access to its hardcoded serial log range */
28156+ if (tboot_enabled() && ((0x60000 >> PAGE_SHIFT) <= pagenr) && (pagenr < (0x68000 >> PAGE_SHIFT)))
28157+ return 1;
28158+#else
28159+ if (!pagenr)
28160+ return 1;
28161+#ifdef CONFIG_VM86
28162+ if (pagenr < (ISA_START_ADDRESS >> PAGE_SHIFT))
28163+ return 1;
28164+#endif
28165+#endif
28166+
28167+ if ((ISA_START_ADDRESS >> PAGE_SHIFT) <= pagenr && pagenr < (ISA_END_ADDRESS >> PAGE_SHIFT))
28168+ return 1;
28169+#ifdef CONFIG_GRKERNSEC_KMEM
28170+ /* throw out everything else below 1MB */
28171+ if (pagenr <= 256)
28172+ return 0;
28173+#endif
28174 if (iomem_is_exclusive(pagenr << PAGE_SHIFT))
28175 return 0;
28176 if (!page_is_ram(pagenr))
28177@@ -538,8 +579,117 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
28178 #endif
28179 }
28180
28181+#ifdef CONFIG_GRKERNSEC_KMEM
28182+static inline void gr_init_ebda(void)
28183+{
28184+ unsigned int ebda_addr;
28185+ unsigned int ebda_size = 0;
28186+
28187+ ebda_addr = get_bios_ebda();
28188+ if (ebda_addr) {
28189+ ebda_size = *(unsigned char *)phys_to_virt(ebda_addr);
28190+ ebda_size <<= 10;
28191+ }
28192+ if (ebda_addr && ebda_size) {
28193+ ebda_start = ebda_addr >> PAGE_SHIFT;
28194+ ebda_end = min((unsigned int)PAGE_ALIGN(ebda_addr + ebda_size), (unsigned int)0xa0000) >> PAGE_SHIFT;
28195+ } else {
28196+ ebda_start = 0x9f000 >> PAGE_SHIFT;
28197+ ebda_end = 0xa0000 >> PAGE_SHIFT;
28198+ }
28199+}
28200+#else
28201+static inline void gr_init_ebda(void) { }
28202+#endif
28203+
28204 void free_initmem(void)
28205 {
28206+#ifdef CONFIG_PAX_KERNEXEC
28207+#ifdef CONFIG_X86_32
28208+ /* PaX: limit KERNEL_CS to actual size */
28209+ unsigned long addr, limit;
28210+ struct desc_struct d;
28211+ int cpu;
28212+#else
28213+ pgd_t *pgd;
28214+ pud_t *pud;
28215+ pmd_t *pmd;
28216+ unsigned long addr, end;
28217+#endif
28218+#endif
28219+
28220+ gr_init_ebda();
28221+
28222+#ifdef CONFIG_PAX_KERNEXEC
28223+#ifdef CONFIG_X86_32
28224+ limit = paravirt_enabled() ? ktva_ktla(0xffffffff) : (unsigned long)&_etext;
28225+ limit = (limit - 1UL) >> PAGE_SHIFT;
28226+
28227+ memset(__LOAD_PHYSICAL_ADDR + PAGE_OFFSET, POISON_FREE_INITMEM, PAGE_SIZE);
28228+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
28229+ pack_descriptor(&d, get_desc_base(&get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_CS]), limit, 0x9B, 0xC);
28230+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEL_CS, &d, DESCTYPE_S);
28231+ write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_KERNEXEC_KERNEL_CS, &d, DESCTYPE_S);
28232+ }
28233+
28234+ /* PaX: make KERNEL_CS read-only */
28235+ addr = PFN_ALIGN(ktla_ktva((unsigned long)&_text));
28236+ if (!paravirt_enabled())
28237+ set_memory_ro(addr, (PFN_ALIGN(_sdata) - addr) >> PAGE_SHIFT);
28238+/*
28239+ for (addr = ktla_ktva((unsigned long)&_text); addr < (unsigned long)&_sdata; addr += PMD_SIZE) {
28240+ pgd = pgd_offset_k(addr);
28241+ pud = pud_offset(pgd, addr);
28242+ pmd = pmd_offset(pud, addr);
28243+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28244+ }
28245+*/
28246+#ifdef CONFIG_X86_PAE
28247+ set_memory_nx(PFN_ALIGN(__init_begin), (PFN_ALIGN(__init_end) - PFN_ALIGN(__init_begin)) >> PAGE_SHIFT);
28248+/*
28249+ for (addr = (unsigned long)&__init_begin; addr < (unsigned long)&__init_end; addr += PMD_SIZE) {
28250+ pgd = pgd_offset_k(addr);
28251+ pud = pud_offset(pgd, addr);
28252+ pmd = pmd_offset(pud, addr);
28253+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28254+ }
28255+*/
28256+#endif
28257+
28258+#ifdef CONFIG_MODULES
28259+ set_memory_4k((unsigned long)MODULES_EXEC_VADDR, (MODULES_EXEC_END - MODULES_EXEC_VADDR) >> PAGE_SHIFT);
28260+#endif
28261+
28262+#else
28263+ /* PaX: make kernel code/rodata read-only, rest non-executable */
28264+ for (addr = __START_KERNEL_map; addr < __START_KERNEL_map + KERNEL_IMAGE_SIZE; addr += PMD_SIZE) {
28265+ pgd = pgd_offset_k(addr);
28266+ pud = pud_offset(pgd, addr);
28267+ pmd = pmd_offset(pud, addr);
28268+ if (!pmd_present(*pmd))
28269+ continue;
28270+ if ((unsigned long)_text <= addr && addr < (unsigned long)_sdata)
28271+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28272+ else
28273+ set_pmd(pmd, __pmd(pmd_val(*pmd) | (_PAGE_NX & __supported_pte_mask)));
28274+ }
28275+
28276+ addr = (unsigned long)__va(__pa(__START_KERNEL_map));
28277+ end = addr + KERNEL_IMAGE_SIZE;
28278+ for (; addr < end; addr += PMD_SIZE) {
28279+ pgd = pgd_offset_k(addr);
28280+ pud = pud_offset(pgd, addr);
28281+ pmd = pmd_offset(pud, addr);
28282+ if (!pmd_present(*pmd))
28283+ continue;
28284+ if ((unsigned long)__va(__pa(_text)) <= addr && addr < (unsigned long)__va(__pa(_sdata)))
28285+ set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_RW));
28286+ }
28287+#endif
28288+
28289+ flush_tlb_all();
28290+#endif
28291+
28292 free_init_pages("unused kernel memory",
28293 (unsigned long)(&__init_begin),
28294 (unsigned long)(&__init_end));
28295diff --git a/arch/x86/mm/init_32.c b/arch/x86/mm/init_32.c
28296index 3ac7e31..89611b7 100644
28297--- a/arch/x86/mm/init_32.c
28298+++ b/arch/x86/mm/init_32.c
28299@@ -62,33 +62,6 @@ static noinline int do_test_wp_bit(void);
28300 bool __read_mostly __vmalloc_start_set = false;
28301
28302 /*
28303- * Creates a middle page table and puts a pointer to it in the
28304- * given global directory entry. This only returns the gd entry
28305- * in non-PAE compilation mode, since the middle layer is folded.
28306- */
28307-static pmd_t * __init one_md_table_init(pgd_t *pgd)
28308-{
28309- pud_t *pud;
28310- pmd_t *pmd_table;
28311-
28312-#ifdef CONFIG_X86_PAE
28313- if (!(pgd_val(*pgd) & _PAGE_PRESENT)) {
28314- pmd_table = (pmd_t *)alloc_low_page();
28315- paravirt_alloc_pmd(&init_mm, __pa(pmd_table) >> PAGE_SHIFT);
28316- set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT));
28317- pud = pud_offset(pgd, 0);
28318- BUG_ON(pmd_table != pmd_offset(pud, 0));
28319-
28320- return pmd_table;
28321- }
28322-#endif
28323- pud = pud_offset(pgd, 0);
28324- pmd_table = pmd_offset(pud, 0);
28325-
28326- return pmd_table;
28327-}
28328-
28329-/*
28330 * Create a page table and place a pointer to it in a middle page
28331 * directory entry:
28332 */
28333@@ -98,13 +71,28 @@ static pte_t * __init one_page_table_init(pmd_t *pmd)
28334 pte_t *page_table = (pte_t *)alloc_low_page();
28335
28336 paravirt_alloc_pte(&init_mm, __pa(page_table) >> PAGE_SHIFT);
28337+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
28338+ set_pmd(pmd, __pmd(__pa(page_table) | _KERNPG_TABLE));
28339+#else
28340 set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE));
28341+#endif
28342 BUG_ON(page_table != pte_offset_kernel(pmd, 0));
28343 }
28344
28345 return pte_offset_kernel(pmd, 0);
28346 }
28347
28348+static pmd_t * __init one_md_table_init(pgd_t *pgd)
28349+{
28350+ pud_t *pud;
28351+ pmd_t *pmd_table;
28352+
28353+ pud = pud_offset(pgd, 0);
28354+ pmd_table = pmd_offset(pud, 0);
28355+
28356+ return pmd_table;
28357+}
28358+
28359 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
28360 {
28361 int pgd_idx = pgd_index(vaddr);
28362@@ -208,6 +196,7 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28363 int pgd_idx, pmd_idx;
28364 unsigned long vaddr;
28365 pgd_t *pgd;
28366+ pud_t *pud;
28367 pmd_t *pmd;
28368 pte_t *pte = NULL;
28369 unsigned long count = page_table_range_init_count(start, end);
28370@@ -222,8 +211,13 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28371 pgd = pgd_base + pgd_idx;
28372
28373 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
28374- pmd = one_md_table_init(pgd);
28375- pmd = pmd + pmd_index(vaddr);
28376+ pud = pud_offset(pgd, vaddr);
28377+ pmd = pmd_offset(pud, vaddr);
28378+
28379+#ifdef CONFIG_X86_PAE
28380+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28381+#endif
28382+
28383 for (; (pmd_idx < PTRS_PER_PMD) && (vaddr != end);
28384 pmd++, pmd_idx++) {
28385 pte = page_table_kmap_check(one_page_table_init(pmd),
28386@@ -235,11 +229,20 @@ page_table_range_init(unsigned long start, unsigned long end, pgd_t *pgd_base)
28387 }
28388 }
28389
28390-static inline int is_kernel_text(unsigned long addr)
28391+static inline int is_kernel_text(unsigned long start, unsigned long end)
28392 {
28393- if (addr >= (unsigned long)_text && addr <= (unsigned long)__init_end)
28394- return 1;
28395- return 0;
28396+ if ((start > ktla_ktva((unsigned long)_etext) ||
28397+ end <= ktla_ktva((unsigned long)_stext)) &&
28398+ (start > ktla_ktva((unsigned long)_einittext) ||
28399+ end <= ktla_ktva((unsigned long)_sinittext)) &&
28400+
28401+#ifdef CONFIG_ACPI_SLEEP
28402+ (start > (unsigned long)__va(acpi_wakeup_address) + 0x4000 || end <= (unsigned long)__va(acpi_wakeup_address)) &&
28403+#endif
28404+
28405+ (start > (unsigned long)__va(0xfffff) || end <= (unsigned long)__va(0xc0000)))
28406+ return 0;
28407+ return 1;
28408 }
28409
28410 /*
28411@@ -256,9 +259,10 @@ kernel_physical_mapping_init(unsigned long start,
28412 unsigned long last_map_addr = end;
28413 unsigned long start_pfn, end_pfn;
28414 pgd_t *pgd_base = swapper_pg_dir;
28415- int pgd_idx, pmd_idx, pte_ofs;
28416+ unsigned int pgd_idx, pmd_idx, pte_ofs;
28417 unsigned long pfn;
28418 pgd_t *pgd;
28419+ pud_t *pud;
28420 pmd_t *pmd;
28421 pte_t *pte;
28422 unsigned pages_2m, pages_4k;
28423@@ -291,8 +295,13 @@ repeat:
28424 pfn = start_pfn;
28425 pgd_idx = pgd_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28426 pgd = pgd_base + pgd_idx;
28427- for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) {
28428- pmd = one_md_table_init(pgd);
28429+ for (; pgd_idx < PTRS_PER_PGD && pfn < max_low_pfn; pgd++, pgd_idx++) {
28430+ pud = pud_offset(pgd, 0);
28431+ pmd = pmd_offset(pud, 0);
28432+
28433+#ifdef CONFIG_X86_PAE
28434+ paravirt_alloc_pmd(&init_mm, __pa(pmd) >> PAGE_SHIFT);
28435+#endif
28436
28437 if (pfn >= end_pfn)
28438 continue;
28439@@ -304,14 +313,13 @@ repeat:
28440 #endif
28441 for (; pmd_idx < PTRS_PER_PMD && pfn < end_pfn;
28442 pmd++, pmd_idx++) {
28443- unsigned int addr = pfn * PAGE_SIZE + PAGE_OFFSET;
28444+ unsigned long address = pfn * PAGE_SIZE + PAGE_OFFSET;
28445
28446 /*
28447 * Map with big pages if possible, otherwise
28448 * create normal page tables:
28449 */
28450 if (use_pse) {
28451- unsigned int addr2;
28452 pgprot_t prot = PAGE_KERNEL_LARGE;
28453 /*
28454 * first pass will use the same initial
28455@@ -322,11 +330,7 @@ repeat:
28456 _PAGE_PSE);
28457
28458 pfn &= PMD_MASK >> PAGE_SHIFT;
28459- addr2 = (pfn + PTRS_PER_PTE-1) * PAGE_SIZE +
28460- PAGE_OFFSET + PAGE_SIZE-1;
28461-
28462- if (is_kernel_text(addr) ||
28463- is_kernel_text(addr2))
28464+ if (is_kernel_text(address, address + PMD_SIZE))
28465 prot = PAGE_KERNEL_LARGE_EXEC;
28466
28467 pages_2m++;
28468@@ -343,7 +347,7 @@ repeat:
28469 pte_ofs = pte_index((pfn<<PAGE_SHIFT) + PAGE_OFFSET);
28470 pte += pte_ofs;
28471 for (; pte_ofs < PTRS_PER_PTE && pfn < end_pfn;
28472- pte++, pfn++, pte_ofs++, addr += PAGE_SIZE) {
28473+ pte++, pfn++, pte_ofs++, address += PAGE_SIZE) {
28474 pgprot_t prot = PAGE_KERNEL;
28475 /*
28476 * first pass will use the same initial
28477@@ -351,7 +355,7 @@ repeat:
28478 */
28479 pgprot_t init_prot = __pgprot(PTE_IDENT_ATTR);
28480
28481- if (is_kernel_text(addr))
28482+ if (is_kernel_text(address, address + PAGE_SIZE))
28483 prot = PAGE_KERNEL_EXEC;
28484
28485 pages_4k++;
28486@@ -474,7 +478,7 @@ void __init native_pagetable_init(void)
28487
28488 pud = pud_offset(pgd, va);
28489 pmd = pmd_offset(pud, va);
28490- if (!pmd_present(*pmd))
28491+ if (!pmd_present(*pmd)) // PAX TODO || pmd_large(*pmd))
28492 break;
28493
28494 /* should not be large page here */
28495@@ -532,12 +536,10 @@ void __init early_ioremap_page_table_range_init(void)
28496
28497 static void __init pagetable_init(void)
28498 {
28499- pgd_t *pgd_base = swapper_pg_dir;
28500-
28501- permanent_kmaps_init(pgd_base);
28502+ permanent_kmaps_init(swapper_pg_dir);
28503 }
28504
28505-pteval_t __supported_pte_mask __read_mostly = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28506+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_GLOBAL | _PAGE_IOMAP);
28507 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28508
28509 /* user-defined highmem size */
28510@@ -772,7 +774,7 @@ void __init mem_init(void)
28511 after_bootmem = 1;
28512
28513 codesize = (unsigned long) &_etext - (unsigned long) &_text;
28514- datasize = (unsigned long) &_edata - (unsigned long) &_etext;
28515+ datasize = (unsigned long) &_edata - (unsigned long) &_sdata;
28516 initsize = (unsigned long) &__init_end - (unsigned long) &__init_begin;
28517
28518 printk(KERN_INFO "Memory: %luk/%luk available (%dk kernel code, "
28519@@ -813,10 +815,10 @@ void __init mem_init(void)
28520 ((unsigned long)&__init_end -
28521 (unsigned long)&__init_begin) >> 10,
28522
28523- (unsigned long)&_etext, (unsigned long)&_edata,
28524- ((unsigned long)&_edata - (unsigned long)&_etext) >> 10,
28525+ (unsigned long)&_sdata, (unsigned long)&_edata,
28526+ ((unsigned long)&_edata - (unsigned long)&_sdata) >> 10,
28527
28528- (unsigned long)&_text, (unsigned long)&_etext,
28529+ ktla_ktva((unsigned long)&_text), ktla_ktva((unsigned long)&_etext),
28530 ((unsigned long)&_etext - (unsigned long)&_text) >> 10);
28531
28532 /*
28533@@ -906,6 +908,7 @@ void set_kernel_text_rw(void)
28534 if (!kernel_set_to_readonly)
28535 return;
28536
28537+ start = ktla_ktva(start);
28538 pr_debug("Set kernel text: %lx - %lx for read write\n",
28539 start, start+size);
28540
28541@@ -920,6 +923,7 @@ void set_kernel_text_ro(void)
28542 if (!kernel_set_to_readonly)
28543 return;
28544
28545+ start = ktla_ktva(start);
28546 pr_debug("Set kernel text: %lx - %lx for read only\n",
28547 start, start+size);
28548
28549@@ -948,6 +952,7 @@ void mark_rodata_ro(void)
28550 unsigned long start = PFN_ALIGN(_text);
28551 unsigned long size = PFN_ALIGN(_etext) - start;
28552
28553+ start = ktla_ktva(start);
28554 set_pages_ro(virt_to_page(start), size >> PAGE_SHIFT);
28555 printk(KERN_INFO "Write protecting the kernel text: %luk\n",
28556 size >> 10);
28557diff --git a/arch/x86/mm/init_64.c b/arch/x86/mm/init_64.c
28558index bb00c46..f31d2f0 100644
28559--- a/arch/x86/mm/init_64.c
28560+++ b/arch/x86/mm/init_64.c
28561@@ -151,7 +151,7 @@ early_param("gbpages", parse_direct_gbpages_on);
28562 * around without checking the pgd every time.
28563 */
28564
28565-pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
28566+pteval_t __supported_pte_mask __read_only = ~(_PAGE_NX | _PAGE_IOMAP);
28567 EXPORT_SYMBOL_GPL(__supported_pte_mask);
28568
28569 int force_personality32;
28570@@ -184,12 +184,22 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28571
28572 for (address = start; address <= end; address += PGDIR_SIZE) {
28573 const pgd_t *pgd_ref = pgd_offset_k(address);
28574+
28575+#ifdef CONFIG_PAX_PER_CPU_PGD
28576+ unsigned long cpu;
28577+#else
28578 struct page *page;
28579+#endif
28580
28581 if (pgd_none(*pgd_ref))
28582 continue;
28583
28584 spin_lock(&pgd_lock);
28585+
28586+#ifdef CONFIG_PAX_PER_CPU_PGD
28587+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
28588+ pgd_t *pgd = pgd_offset_cpu(cpu, address);
28589+#else
28590 list_for_each_entry(page, &pgd_list, lru) {
28591 pgd_t *pgd;
28592 spinlock_t *pgt_lock;
28593@@ -198,6 +208,7 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28594 /* the pgt_lock only for Xen */
28595 pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
28596 spin_lock(pgt_lock);
28597+#endif
28598
28599 if (pgd_none(*pgd))
28600 set_pgd(pgd, *pgd_ref);
28601@@ -205,7 +216,10 @@ void sync_global_pgds(unsigned long start, unsigned long end)
28602 BUG_ON(pgd_page_vaddr(*pgd)
28603 != pgd_page_vaddr(*pgd_ref));
28604
28605+#ifndef CONFIG_PAX_PER_CPU_PGD
28606 spin_unlock(pgt_lock);
28607+#endif
28608+
28609 }
28610 spin_unlock(&pgd_lock);
28611 }
28612@@ -238,7 +252,7 @@ static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
28613 {
28614 if (pgd_none(*pgd)) {
28615 pud_t *pud = (pud_t *)spp_getpage();
28616- pgd_populate(&init_mm, pgd, pud);
28617+ pgd_populate_kernel(&init_mm, pgd, pud);
28618 if (pud != pud_offset(pgd, 0))
28619 printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
28620 pud, pud_offset(pgd, 0));
28621@@ -250,7 +264,7 @@ static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
28622 {
28623 if (pud_none(*pud)) {
28624 pmd_t *pmd = (pmd_t *) spp_getpage();
28625- pud_populate(&init_mm, pud, pmd);
28626+ pud_populate_kernel(&init_mm, pud, pmd);
28627 if (pmd != pmd_offset(pud, 0))
28628 printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
28629 pmd, pmd_offset(pud, 0));
28630@@ -279,7 +293,9 @@ void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
28631 pmd = fill_pmd(pud, vaddr);
28632 pte = fill_pte(pmd, vaddr);
28633
28634+ pax_open_kernel();
28635 set_pte(pte, new_pte);
28636+ pax_close_kernel();
28637
28638 /*
28639 * It's enough to flush this one mapping.
28640@@ -338,14 +354,12 @@ static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
28641 pgd = pgd_offset_k((unsigned long)__va(phys));
28642 if (pgd_none(*pgd)) {
28643 pud = (pud_t *) spp_getpage();
28644- set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
28645- _PAGE_USER));
28646+ set_pgd(pgd, __pgd(__pa(pud) | _PAGE_TABLE));
28647 }
28648 pud = pud_offset(pgd, (unsigned long)__va(phys));
28649 if (pud_none(*pud)) {
28650 pmd = (pmd_t *) spp_getpage();
28651- set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
28652- _PAGE_USER));
28653+ set_pud(pud, __pud(__pa(pmd) | _PAGE_TABLE));
28654 }
28655 pmd = pmd_offset(pud, phys);
28656 BUG_ON(!pmd_none(*pmd));
28657@@ -586,7 +600,7 @@ phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
28658 prot);
28659
28660 spin_lock(&init_mm.page_table_lock);
28661- pud_populate(&init_mm, pud, pmd);
28662+ pud_populate_kernel(&init_mm, pud, pmd);
28663 spin_unlock(&init_mm.page_table_lock);
28664 }
28665 __flush_tlb_all();
28666@@ -627,7 +641,7 @@ kernel_physical_mapping_init(unsigned long start,
28667 page_size_mask);
28668
28669 spin_lock(&init_mm.page_table_lock);
28670- pgd_populate(&init_mm, pgd, pud);
28671+ pgd_populate_kernel(&init_mm, pgd, pud);
28672 spin_unlock(&init_mm.page_table_lock);
28673 pgd_changed = true;
28674 }
28675@@ -1221,8 +1235,8 @@ int kern_addr_valid(unsigned long addr)
28676 static struct vm_area_struct gate_vma = {
28677 .vm_start = VSYSCALL_START,
28678 .vm_end = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
28679- .vm_page_prot = PAGE_READONLY_EXEC,
28680- .vm_flags = VM_READ | VM_EXEC
28681+ .vm_page_prot = PAGE_READONLY,
28682+ .vm_flags = VM_READ
28683 };
28684
28685 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
28686@@ -1256,7 +1270,7 @@ int in_gate_area_no_mm(unsigned long addr)
28687
28688 const char *arch_vma_name(struct vm_area_struct *vma)
28689 {
28690- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
28691+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
28692 return "[vdso]";
28693 if (vma == &gate_vma)
28694 return "[vsyscall]";
28695diff --git a/arch/x86/mm/iomap_32.c b/arch/x86/mm/iomap_32.c
28696index 7b179b4..6bd17777 100644
28697--- a/arch/x86/mm/iomap_32.c
28698+++ b/arch/x86/mm/iomap_32.c
28699@@ -64,7 +64,11 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
28700 type = kmap_atomic_idx_push();
28701 idx = type + KM_TYPE_NR * smp_processor_id();
28702 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
28703+
28704+ pax_open_kernel();
28705 set_pte(kmap_pte - idx, pfn_pte(pfn, prot));
28706+ pax_close_kernel();
28707+
28708 arch_flush_lazy_mmu_mode();
28709
28710 return (void *)vaddr;
28711diff --git a/arch/x86/mm/ioremap.c b/arch/x86/mm/ioremap.c
28712index 9a1e658..da003f3 100644
28713--- a/arch/x86/mm/ioremap.c
28714+++ b/arch/x86/mm/ioremap.c
28715@@ -97,7 +97,7 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
28716 for (pfn = phys_addr >> PAGE_SHIFT; pfn <= last_pfn; pfn++) {
28717 int is_ram = page_is_ram(pfn);
28718
28719- if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
28720+ if (is_ram && pfn_valid(pfn) && (pfn >= 0x100 || !PageReserved(pfn_to_page(pfn))))
28721 return NULL;
28722 WARN_ON_ONCE(is_ram);
28723 }
28724@@ -256,7 +256,7 @@ EXPORT_SYMBOL(ioremap_prot);
28725 *
28726 * Caller must ensure there is only one unmapping for the same pointer.
28727 */
28728-void iounmap(volatile void __iomem *addr)
28729+void iounmap(const volatile void __iomem *addr)
28730 {
28731 struct vm_struct *p, *o;
28732
28733@@ -310,6 +310,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28734
28735 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
28736 if (page_is_ram(start >> PAGE_SHIFT))
28737+#ifdef CONFIG_HIGHMEM
28738+ if ((start >> PAGE_SHIFT) < max_low_pfn)
28739+#endif
28740 return __va(phys);
28741
28742 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
28743@@ -322,6 +325,9 @@ void *xlate_dev_mem_ptr(unsigned long phys)
28744 void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
28745 {
28746 if (page_is_ram(phys >> PAGE_SHIFT))
28747+#ifdef CONFIG_HIGHMEM
28748+ if ((phys >> PAGE_SHIFT) < max_low_pfn)
28749+#endif
28750 return;
28751
28752 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
28753@@ -339,7 +345,7 @@ static int __init early_ioremap_debug_setup(char *str)
28754 early_param("early_ioremap_debug", early_ioremap_debug_setup);
28755
28756 static __initdata int after_paging_init;
28757-static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
28758+static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __read_only __aligned(PAGE_SIZE);
28759
28760 static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
28761 {
28762@@ -376,8 +382,7 @@ void __init early_ioremap_init(void)
28763 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
28764
28765 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
28766- memset(bm_pte, 0, sizeof(bm_pte));
28767- pmd_populate_kernel(&init_mm, pmd, bm_pte);
28768+ pmd_populate_user(&init_mm, pmd, bm_pte);
28769
28770 /*
28771 * The boot-ioremap range spans multiple pmds, for which
28772diff --git a/arch/x86/mm/kmemcheck/kmemcheck.c b/arch/x86/mm/kmemcheck/kmemcheck.c
28773index d87dd6d..bf3fa66 100644
28774--- a/arch/x86/mm/kmemcheck/kmemcheck.c
28775+++ b/arch/x86/mm/kmemcheck/kmemcheck.c
28776@@ -622,9 +622,9 @@ bool kmemcheck_fault(struct pt_regs *regs, unsigned long address,
28777 * memory (e.g. tracked pages)? For now, we need this to avoid
28778 * invoking kmemcheck for PnP BIOS calls.
28779 */
28780- if (regs->flags & X86_VM_MASK)
28781+ if (v8086_mode(regs))
28782 return false;
28783- if (regs->cs != __KERNEL_CS)
28784+ if (regs->cs != __KERNEL_CS && regs->cs != __KERNEXEC_KERNEL_CS)
28785 return false;
28786
28787 pte = kmemcheck_pte_lookup(address);
28788diff --git a/arch/x86/mm/mmap.c b/arch/x86/mm/mmap.c
28789index 845df68..1d8d29f 100644
28790--- a/arch/x86/mm/mmap.c
28791+++ b/arch/x86/mm/mmap.c
28792@@ -52,7 +52,7 @@ static unsigned int stack_maxrandom_size(void)
28793 * Leave an at least ~128 MB hole with possible stack randomization.
28794 */
28795 #define MIN_GAP (128*1024*1024UL + stack_maxrandom_size())
28796-#define MAX_GAP (TASK_SIZE/6*5)
28797+#define MAX_GAP (pax_task_size/6*5)
28798
28799 static int mmap_is_legacy(void)
28800 {
28801@@ -82,27 +82,40 @@ static unsigned long mmap_rnd(void)
28802 return rnd << PAGE_SHIFT;
28803 }
28804
28805-static unsigned long mmap_base(void)
28806+static unsigned long mmap_base(struct mm_struct *mm)
28807 {
28808 unsigned long gap = rlimit(RLIMIT_STACK);
28809+ unsigned long pax_task_size = TASK_SIZE;
28810+
28811+#ifdef CONFIG_PAX_SEGMEXEC
28812+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28813+ pax_task_size = SEGMEXEC_TASK_SIZE;
28814+#endif
28815
28816 if (gap < MIN_GAP)
28817 gap = MIN_GAP;
28818 else if (gap > MAX_GAP)
28819 gap = MAX_GAP;
28820
28821- return PAGE_ALIGN(TASK_SIZE - gap - mmap_rnd());
28822+ return PAGE_ALIGN(pax_task_size - gap - mmap_rnd());
28823 }
28824
28825 /*
28826 * Bottom-up (legacy) layout on X86_32 did not support randomization, X86_64
28827 * does, but not when emulating X86_32
28828 */
28829-static unsigned long mmap_legacy_base(void)
28830+static unsigned long mmap_legacy_base(struct mm_struct *mm)
28831 {
28832- if (mmap_is_ia32())
28833+ if (mmap_is_ia32()) {
28834+
28835+#ifdef CONFIG_PAX_SEGMEXEC
28836+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
28837+ return SEGMEXEC_TASK_UNMAPPED_BASE;
28838+ else
28839+#endif
28840+
28841 return TASK_UNMAPPED_BASE;
28842- else
28843+ } else
28844 return TASK_UNMAPPED_BASE + mmap_rnd();
28845 }
28846
28847@@ -113,11 +126,23 @@ static unsigned long mmap_legacy_base(void)
28848 void arch_pick_mmap_layout(struct mm_struct *mm)
28849 {
28850 if (mmap_is_legacy()) {
28851- mm->mmap_base = mmap_legacy_base();
28852+ mm->mmap_base = mmap_legacy_base(mm);
28853+
28854+#ifdef CONFIG_PAX_RANDMMAP
28855+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28856+ mm->mmap_base += mm->delta_mmap;
28857+#endif
28858+
28859 mm->get_unmapped_area = arch_get_unmapped_area;
28860 mm->unmap_area = arch_unmap_area;
28861 } else {
28862- mm->mmap_base = mmap_base();
28863+ mm->mmap_base = mmap_base(mm);
28864+
28865+#ifdef CONFIG_PAX_RANDMMAP
28866+ if (mm->pax_flags & MF_PAX_RANDMMAP)
28867+ mm->mmap_base -= mm->delta_mmap + mm->delta_stack;
28868+#endif
28869+
28870 mm->get_unmapped_area = arch_get_unmapped_area_topdown;
28871 mm->unmap_area = arch_unmap_area_topdown;
28872 }
28873diff --git a/arch/x86/mm/mmio-mod.c b/arch/x86/mm/mmio-mod.c
28874index dc0b727..f612039 100644
28875--- a/arch/x86/mm/mmio-mod.c
28876+++ b/arch/x86/mm/mmio-mod.c
28877@@ -194,7 +194,7 @@ static void pre(struct kmmio_probe *p, struct pt_regs *regs,
28878 break;
28879 default:
28880 {
28881- unsigned char *ip = (unsigned char *)instptr;
28882+ unsigned char *ip = (unsigned char *)ktla_ktva(instptr);
28883 my_trace->opcode = MMIO_UNKNOWN_OP;
28884 my_trace->width = 0;
28885 my_trace->value = (*ip) << 16 | *(ip + 1) << 8 |
28886@@ -234,7 +234,7 @@ static void post(struct kmmio_probe *p, unsigned long condition,
28887 static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28888 void __iomem *addr)
28889 {
28890- static atomic_t next_id;
28891+ static atomic_unchecked_t next_id;
28892 struct remap_trace *trace = kmalloc(sizeof(*trace), GFP_KERNEL);
28893 /* These are page-unaligned. */
28894 struct mmiotrace_map map = {
28895@@ -258,7 +258,7 @@ static void ioremap_trace_core(resource_size_t offset, unsigned long size,
28896 .private = trace
28897 },
28898 .phys = offset,
28899- .id = atomic_inc_return(&next_id)
28900+ .id = atomic_inc_return_unchecked(&next_id)
28901 };
28902 map.map_id = trace->id;
28903
28904@@ -290,7 +290,7 @@ void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
28905 ioremap_trace_core(offset, size, addr);
28906 }
28907
28908-static void iounmap_trace_core(volatile void __iomem *addr)
28909+static void iounmap_trace_core(const volatile void __iomem *addr)
28910 {
28911 struct mmiotrace_map map = {
28912 .phys = 0,
28913@@ -328,7 +328,7 @@ not_enabled:
28914 }
28915 }
28916
28917-void mmiotrace_iounmap(volatile void __iomem *addr)
28918+void mmiotrace_iounmap(const volatile void __iomem *addr)
28919 {
28920 might_sleep();
28921 if (is_enabled()) /* recheck and proper locking in *_core() */
28922diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
28923index a71c4e2..301ae44 100644
28924--- a/arch/x86/mm/numa.c
28925+++ b/arch/x86/mm/numa.c
28926@@ -474,7 +474,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
28927 return true;
28928 }
28929
28930-static int __init numa_register_memblks(struct numa_meminfo *mi)
28931+static int __init __intentional_overflow(-1) numa_register_memblks(struct numa_meminfo *mi)
28932 {
28933 unsigned long uninitialized_var(pfn_align);
28934 int i, nid;
28935diff --git a/arch/x86/mm/pageattr-test.c b/arch/x86/mm/pageattr-test.c
28936index d0b1773..4c3327c 100644
28937--- a/arch/x86/mm/pageattr-test.c
28938+++ b/arch/x86/mm/pageattr-test.c
28939@@ -36,7 +36,7 @@ enum {
28940
28941 static int pte_testbit(pte_t pte)
28942 {
28943- return pte_flags(pte) & _PAGE_UNUSED1;
28944+ return pte_flags(pte) & _PAGE_CPA_TEST;
28945 }
28946
28947 struct split_state {
28948diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
28949index bb32480..aef8278 100644
28950--- a/arch/x86/mm/pageattr.c
28951+++ b/arch/x86/mm/pageattr.c
28952@@ -261,7 +261,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28953 */
28954 #ifdef CONFIG_PCI_BIOS
28955 if (pcibios_enabled && within(pfn, BIOS_BEGIN >> PAGE_SHIFT, BIOS_END >> PAGE_SHIFT))
28956- pgprot_val(forbidden) |= _PAGE_NX;
28957+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28958 #endif
28959
28960 /*
28961@@ -269,9 +269,10 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28962 * Does not cover __inittext since that is gone later on. On
28963 * 64bit we do not enforce !NX on the low mapping
28964 */
28965- if (within(address, (unsigned long)_text, (unsigned long)_etext))
28966- pgprot_val(forbidden) |= _PAGE_NX;
28967+ if (within(address, ktla_ktva((unsigned long)_text), ktla_ktva((unsigned long)_etext)))
28968+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28969
28970+#ifdef CONFIG_DEBUG_RODATA
28971 /*
28972 * The .rodata section needs to be read-only. Using the pfn
28973 * catches all aliases.
28974@@ -279,6 +280,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28975 if (within(pfn, __pa_symbol(__start_rodata) >> PAGE_SHIFT,
28976 __pa_symbol(__end_rodata) >> PAGE_SHIFT))
28977 pgprot_val(forbidden) |= _PAGE_RW;
28978+#endif
28979
28980 #if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA)
28981 /*
28982@@ -317,6 +319,13 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
28983 }
28984 #endif
28985
28986+#ifdef CONFIG_PAX_KERNEXEC
28987+ if (within(pfn, __pa(ktla_ktva((unsigned long)&_text)), __pa((unsigned long)&_sdata))) {
28988+ pgprot_val(forbidden) |= _PAGE_RW;
28989+ pgprot_val(forbidden) |= _PAGE_NX & __supported_pte_mask;
28990+ }
28991+#endif
28992+
28993 prot = __pgprot(pgprot_val(prot) & ~pgprot_val(forbidden));
28994
28995 return prot;
28996@@ -400,23 +409,37 @@ EXPORT_SYMBOL_GPL(slow_virt_to_phys);
28997 static void __set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
28998 {
28999 /* change init_mm */
29000+ pax_open_kernel();
29001 set_pte_atomic(kpte, pte);
29002+
29003 #ifdef CONFIG_X86_32
29004 if (!SHARED_KERNEL_PMD) {
29005+
29006+#ifdef CONFIG_PAX_PER_CPU_PGD
29007+ unsigned long cpu;
29008+#else
29009 struct page *page;
29010+#endif
29011
29012+#ifdef CONFIG_PAX_PER_CPU_PGD
29013+ for (cpu = 0; cpu < nr_cpu_ids; ++cpu) {
29014+ pgd_t *pgd = get_cpu_pgd(cpu);
29015+#else
29016 list_for_each_entry(page, &pgd_list, lru) {
29017- pgd_t *pgd;
29018+ pgd_t *pgd = (pgd_t *)page_address(page);
29019+#endif
29020+
29021 pud_t *pud;
29022 pmd_t *pmd;
29023
29024- pgd = (pgd_t *)page_address(page) + pgd_index(address);
29025+ pgd += pgd_index(address);
29026 pud = pud_offset(pgd, address);
29027 pmd = pmd_offset(pud, address);
29028 set_pte_atomic((pte_t *)pmd, pte);
29029 }
29030 }
29031 #endif
29032+ pax_close_kernel();
29033 }
29034
29035 static int
29036diff --git a/arch/x86/mm/pat.c b/arch/x86/mm/pat.c
29037index 6574388..87e9bef 100644
29038--- a/arch/x86/mm/pat.c
29039+++ b/arch/x86/mm/pat.c
29040@@ -376,7 +376,7 @@ int free_memtype(u64 start, u64 end)
29041
29042 if (!entry) {
29043 printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
29044- current->comm, current->pid, start, end - 1);
29045+ current->comm, task_pid_nr(current), start, end - 1);
29046 return -EINVAL;
29047 }
29048
29049@@ -506,8 +506,8 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
29050
29051 while (cursor < to) {
29052 if (!devmem_is_allowed(pfn)) {
29053- printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx]\n",
29054- current->comm, from, to - 1);
29055+ printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx] (%#010Lx)\n",
29056+ current->comm, from, to - 1, cursor);
29057 return 0;
29058 }
29059 cursor += PAGE_SIZE;
29060@@ -577,7 +577,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size, unsigned long flags)
29061 if (ioremap_change_attr((unsigned long)__va(base), id_sz, flags) < 0) {
29062 printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
29063 "for [mem %#010Lx-%#010Lx]\n",
29064- current->comm, current->pid,
29065+ current->comm, task_pid_nr(current),
29066 cattr_name(flags),
29067 base, (unsigned long long)(base + size-1));
29068 return -EINVAL;
29069@@ -612,7 +612,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29070 flags = lookup_memtype(paddr);
29071 if (want_flags != flags) {
29072 printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
29073- current->comm, current->pid,
29074+ current->comm, task_pid_nr(current),
29075 cattr_name(want_flags),
29076 (unsigned long long)paddr,
29077 (unsigned long long)(paddr + size - 1),
29078@@ -634,7 +634,7 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
29079 free_memtype(paddr, paddr + size);
29080 printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
29081 " for [mem %#010Lx-%#010Lx], got %s\n",
29082- current->comm, current->pid,
29083+ current->comm, task_pid_nr(current),
29084 cattr_name(want_flags),
29085 (unsigned long long)paddr,
29086 (unsigned long long)(paddr + size - 1),
29087diff --git a/arch/x86/mm/pat_rbtree.c b/arch/x86/mm/pat_rbtree.c
29088index 415f6c4..d319983 100644
29089--- a/arch/x86/mm/pat_rbtree.c
29090+++ b/arch/x86/mm/pat_rbtree.c
29091@@ -160,7 +160,7 @@ success:
29092
29093 failure:
29094 printk(KERN_INFO "%s:%d conflicting memory types "
29095- "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
29096+ "%Lx-%Lx %s<->%s\n", current->comm, task_pid_nr(current), start,
29097 end, cattr_name(found_type), cattr_name(match->type));
29098 return -EBUSY;
29099 }
29100diff --git a/arch/x86/mm/pf_in.c b/arch/x86/mm/pf_in.c
29101index 9f0614d..92ae64a 100644
29102--- a/arch/x86/mm/pf_in.c
29103+++ b/arch/x86/mm/pf_in.c
29104@@ -148,7 +148,7 @@ enum reason_type get_ins_type(unsigned long ins_addr)
29105 int i;
29106 enum reason_type rv = OTHERS;
29107
29108- p = (unsigned char *)ins_addr;
29109+ p = (unsigned char *)ktla_ktva(ins_addr);
29110 p += skip_prefix(p, &prf);
29111 p += get_opcode(p, &opcode);
29112
29113@@ -168,7 +168,7 @@ static unsigned int get_ins_reg_width(unsigned long ins_addr)
29114 struct prefix_bits prf;
29115 int i;
29116
29117- p = (unsigned char *)ins_addr;
29118+ p = (unsigned char *)ktla_ktva(ins_addr);
29119 p += skip_prefix(p, &prf);
29120 p += get_opcode(p, &opcode);
29121
29122@@ -191,7 +191,7 @@ unsigned int get_ins_mem_width(unsigned long ins_addr)
29123 struct prefix_bits prf;
29124 int i;
29125
29126- p = (unsigned char *)ins_addr;
29127+ p = (unsigned char *)ktla_ktva(ins_addr);
29128 p += skip_prefix(p, &prf);
29129 p += get_opcode(p, &opcode);
29130
29131@@ -415,7 +415,7 @@ unsigned long get_ins_reg_val(unsigned long ins_addr, struct pt_regs *regs)
29132 struct prefix_bits prf;
29133 int i;
29134
29135- p = (unsigned char *)ins_addr;
29136+ p = (unsigned char *)ktla_ktva(ins_addr);
29137 p += skip_prefix(p, &prf);
29138 p += get_opcode(p, &opcode);
29139 for (i = 0; i < ARRAY_SIZE(reg_rop); i++)
29140@@ -470,7 +470,7 @@ unsigned long get_ins_imm_val(unsigned long ins_addr)
29141 struct prefix_bits prf;
29142 int i;
29143
29144- p = (unsigned char *)ins_addr;
29145+ p = (unsigned char *)ktla_ktva(ins_addr);
29146 p += skip_prefix(p, &prf);
29147 p += get_opcode(p, &opcode);
29148 for (i = 0; i < ARRAY_SIZE(imm_wop); i++)
29149diff --git a/arch/x86/mm/pgtable.c b/arch/x86/mm/pgtable.c
29150index 17fda6a..489c74a 100644
29151--- a/arch/x86/mm/pgtable.c
29152+++ b/arch/x86/mm/pgtable.c
29153@@ -91,10 +91,64 @@ static inline void pgd_list_del(pgd_t *pgd)
29154 list_del(&page->lru);
29155 }
29156
29157-#define UNSHARED_PTRS_PER_PGD \
29158- (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29159+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29160+pgdval_t clone_pgd_mask __read_only = ~_PAGE_PRESENT;
29161
29162+void __shadow_user_pgds(pgd_t *dst, const pgd_t *src)
29163+{
29164+ unsigned int count = USER_PGD_PTRS;
29165
29166+ while (count--)
29167+ *dst++ = __pgd((pgd_val(*src++) | (_PAGE_NX & __supported_pte_mask)) & ~_PAGE_USER);
29168+}
29169+#endif
29170+
29171+#ifdef CONFIG_PAX_PER_CPU_PGD
29172+void __clone_user_pgds(pgd_t *dst, const pgd_t *src)
29173+{
29174+ unsigned int count = USER_PGD_PTRS;
29175+
29176+ while (count--) {
29177+ pgd_t pgd;
29178+
29179+#ifdef CONFIG_X86_64
29180+ pgd = __pgd(pgd_val(*src++) | _PAGE_USER);
29181+#else
29182+ pgd = *src++;
29183+#endif
29184+
29185+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
29186+ pgd = __pgd(pgd_val(pgd) & clone_pgd_mask);
29187+#endif
29188+
29189+ *dst++ = pgd;
29190+ }
29191+
29192+}
29193+#endif
29194+
29195+#ifdef CONFIG_X86_64
29196+#define pxd_t pud_t
29197+#define pyd_t pgd_t
29198+#define paravirt_release_pxd(pfn) paravirt_release_pud(pfn)
29199+#define pxd_free(mm, pud) pud_free((mm), (pud))
29200+#define pyd_populate(mm, pgd, pud) pgd_populate((mm), (pgd), (pud))
29201+#define pyd_offset(mm, address) pgd_offset((mm), (address))
29202+#define PYD_SIZE PGDIR_SIZE
29203+#else
29204+#define pxd_t pmd_t
29205+#define pyd_t pud_t
29206+#define paravirt_release_pxd(pfn) paravirt_release_pmd(pfn)
29207+#define pxd_free(mm, pud) pmd_free((mm), (pud))
29208+#define pyd_populate(mm, pgd, pud) pud_populate((mm), (pgd), (pud))
29209+#define pyd_offset(mm, address) pud_offset((mm), (address))
29210+#define PYD_SIZE PUD_SIZE
29211+#endif
29212+
29213+#ifdef CONFIG_PAX_PER_CPU_PGD
29214+static inline void pgd_ctor(struct mm_struct *mm, pgd_t *pgd) {}
29215+static inline void pgd_dtor(pgd_t *pgd) {}
29216+#else
29217 static void pgd_set_mm(pgd_t *pgd, struct mm_struct *mm)
29218 {
29219 BUILD_BUG_ON(sizeof(virt_to_page(pgd)->index) < sizeof(mm));
29220@@ -135,6 +189,7 @@ static void pgd_dtor(pgd_t *pgd)
29221 pgd_list_del(pgd);
29222 spin_unlock(&pgd_lock);
29223 }
29224+#endif
29225
29226 /*
29227 * List of all pgd's needed for non-PAE so it can invalidate entries
29228@@ -147,7 +202,7 @@ static void pgd_dtor(pgd_t *pgd)
29229 * -- nyc
29230 */
29231
29232-#ifdef CONFIG_X86_PAE
29233+#if defined(CONFIG_X86_32) && defined(CONFIG_X86_PAE)
29234 /*
29235 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
29236 * updating the top-level pagetable entries to guarantee the
29237@@ -159,7 +214,7 @@ static void pgd_dtor(pgd_t *pgd)
29238 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
29239 * and initialize the kernel pmds here.
29240 */
29241-#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
29242+#define PREALLOCATED_PXDS (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
29243
29244 void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29245 {
29246@@ -177,36 +232,38 @@ void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
29247 */
29248 flush_tlb_mm(mm);
29249 }
29250+#elif defined(CONFIG_X86_64) && defined(CONFIG_PAX_PER_CPU_PGD)
29251+#define PREALLOCATED_PXDS USER_PGD_PTRS
29252 #else /* !CONFIG_X86_PAE */
29253
29254 /* No need to prepopulate any pagetable entries in non-PAE modes. */
29255-#define PREALLOCATED_PMDS 0
29256+#define PREALLOCATED_PXDS 0
29257
29258 #endif /* CONFIG_X86_PAE */
29259
29260-static void free_pmds(pmd_t *pmds[])
29261+static void free_pxds(pxd_t *pxds[])
29262 {
29263 int i;
29264
29265- for(i = 0; i < PREALLOCATED_PMDS; i++)
29266- if (pmds[i])
29267- free_page((unsigned long)pmds[i]);
29268+ for(i = 0; i < PREALLOCATED_PXDS; i++)
29269+ if (pxds[i])
29270+ free_page((unsigned long)pxds[i]);
29271 }
29272
29273-static int preallocate_pmds(pmd_t *pmds[])
29274+static int preallocate_pxds(pxd_t *pxds[])
29275 {
29276 int i;
29277 bool failed = false;
29278
29279- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29280- pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
29281- if (pmd == NULL)
29282+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29283+ pxd_t *pxd = (pxd_t *)__get_free_page(PGALLOC_GFP);
29284+ if (pxd == NULL)
29285 failed = true;
29286- pmds[i] = pmd;
29287+ pxds[i] = pxd;
29288 }
29289
29290 if (failed) {
29291- free_pmds(pmds);
29292+ free_pxds(pxds);
29293 return -ENOMEM;
29294 }
29295
29296@@ -219,51 +276,55 @@ static int preallocate_pmds(pmd_t *pmds[])
29297 * preallocate which never got a corresponding vma will need to be
29298 * freed manually.
29299 */
29300-static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
29301+static void pgd_mop_up_pxds(struct mm_struct *mm, pgd_t *pgdp)
29302 {
29303 int i;
29304
29305- for(i = 0; i < PREALLOCATED_PMDS; i++) {
29306+ for(i = 0; i < PREALLOCATED_PXDS; i++) {
29307 pgd_t pgd = pgdp[i];
29308
29309 if (pgd_val(pgd) != 0) {
29310- pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
29311+ pxd_t *pxd = (pxd_t *)pgd_page_vaddr(pgd);
29312
29313- pgdp[i] = native_make_pgd(0);
29314+ set_pgd(pgdp + i, native_make_pgd(0));
29315
29316- paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
29317- pmd_free(mm, pmd);
29318+ paravirt_release_pxd(pgd_val(pgd) >> PAGE_SHIFT);
29319+ pxd_free(mm, pxd);
29320 }
29321 }
29322 }
29323
29324-static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
29325+static void pgd_prepopulate_pxd(struct mm_struct *mm, pgd_t *pgd, pxd_t *pxds[])
29326 {
29327- pud_t *pud;
29328+ pyd_t *pyd;
29329 unsigned long addr;
29330 int i;
29331
29332- if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
29333+ if (PREALLOCATED_PXDS == 0) /* Work around gcc-3.4.x bug */
29334 return;
29335
29336- pud = pud_offset(pgd, 0);
29337+#ifdef CONFIG_X86_64
29338+ pyd = pyd_offset(mm, 0L);
29339+#else
29340+ pyd = pyd_offset(pgd, 0L);
29341+#endif
29342
29343- for (addr = i = 0; i < PREALLOCATED_PMDS;
29344- i++, pud++, addr += PUD_SIZE) {
29345- pmd_t *pmd = pmds[i];
29346+ for (addr = i = 0; i < PREALLOCATED_PXDS;
29347+ i++, pyd++, addr += PYD_SIZE) {
29348+ pxd_t *pxd = pxds[i];
29349
29350 if (i >= KERNEL_PGD_BOUNDARY)
29351- memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29352- sizeof(pmd_t) * PTRS_PER_PMD);
29353+ memcpy(pxd, (pxd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
29354+ sizeof(pxd_t) * PTRS_PER_PMD);
29355
29356- pud_populate(mm, pud, pmd);
29357+ pyd_populate(mm, pyd, pxd);
29358 }
29359 }
29360
29361 pgd_t *pgd_alloc(struct mm_struct *mm)
29362 {
29363 pgd_t *pgd;
29364- pmd_t *pmds[PREALLOCATED_PMDS];
29365+ pxd_t *pxds[PREALLOCATED_PXDS];
29366
29367 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
29368
29369@@ -272,11 +333,11 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29370
29371 mm->pgd = pgd;
29372
29373- if (preallocate_pmds(pmds) != 0)
29374+ if (preallocate_pxds(pxds) != 0)
29375 goto out_free_pgd;
29376
29377 if (paravirt_pgd_alloc(mm) != 0)
29378- goto out_free_pmds;
29379+ goto out_free_pxds;
29380
29381 /*
29382 * Make sure that pre-populating the pmds is atomic with
29383@@ -286,14 +347,14 @@ pgd_t *pgd_alloc(struct mm_struct *mm)
29384 spin_lock(&pgd_lock);
29385
29386 pgd_ctor(mm, pgd);
29387- pgd_prepopulate_pmd(mm, pgd, pmds);
29388+ pgd_prepopulate_pxd(mm, pgd, pxds);
29389
29390 spin_unlock(&pgd_lock);
29391
29392 return pgd;
29393
29394-out_free_pmds:
29395- free_pmds(pmds);
29396+out_free_pxds:
29397+ free_pxds(pxds);
29398 out_free_pgd:
29399 free_page((unsigned long)pgd);
29400 out:
29401@@ -302,7 +363,7 @@ out:
29402
29403 void pgd_free(struct mm_struct *mm, pgd_t *pgd)
29404 {
29405- pgd_mop_up_pmds(mm, pgd);
29406+ pgd_mop_up_pxds(mm, pgd);
29407 pgd_dtor(pgd);
29408 paravirt_pgd_free(mm, pgd);
29409 free_page((unsigned long)pgd);
29410diff --git a/arch/x86/mm/pgtable_32.c b/arch/x86/mm/pgtable_32.c
29411index a69bcb8..19068ab 100644
29412--- a/arch/x86/mm/pgtable_32.c
29413+++ b/arch/x86/mm/pgtable_32.c
29414@@ -47,10 +47,13 @@ void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
29415 return;
29416 }
29417 pte = pte_offset_kernel(pmd, vaddr);
29418+
29419+ pax_open_kernel();
29420 if (pte_val(pteval))
29421 set_pte_at(&init_mm, vaddr, pte, pteval);
29422 else
29423 pte_clear(&init_mm, vaddr, pte);
29424+ pax_close_kernel();
29425
29426 /*
29427 * It's enough to flush this one mapping.
29428diff --git a/arch/x86/mm/physaddr.c b/arch/x86/mm/physaddr.c
29429index e666cbb..61788c45 100644
29430--- a/arch/x86/mm/physaddr.c
29431+++ b/arch/x86/mm/physaddr.c
29432@@ -10,7 +10,7 @@
29433 #ifdef CONFIG_X86_64
29434
29435 #ifdef CONFIG_DEBUG_VIRTUAL
29436-unsigned long __phys_addr(unsigned long x)
29437+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29438 {
29439 unsigned long y = x - __START_KERNEL_map;
29440
29441@@ -67,7 +67,7 @@ EXPORT_SYMBOL(__virt_addr_valid);
29442 #else
29443
29444 #ifdef CONFIG_DEBUG_VIRTUAL
29445-unsigned long __phys_addr(unsigned long x)
29446+unsigned long __intentional_overflow(-1) __phys_addr(unsigned long x)
29447 {
29448 unsigned long phys_addr = x - PAGE_OFFSET;
29449 /* VMALLOC_* aren't constants */
29450diff --git a/arch/x86/mm/setup_nx.c b/arch/x86/mm/setup_nx.c
29451index 410531d..0f16030 100644
29452--- a/arch/x86/mm/setup_nx.c
29453+++ b/arch/x86/mm/setup_nx.c
29454@@ -5,8 +5,10 @@
29455 #include <asm/pgtable.h>
29456 #include <asm/proto.h>
29457
29458+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29459 static int disable_nx __cpuinitdata;
29460
29461+#ifndef CONFIG_PAX_PAGEEXEC
29462 /*
29463 * noexec = on|off
29464 *
29465@@ -28,12 +30,17 @@ static int __init noexec_setup(char *str)
29466 return 0;
29467 }
29468 early_param("noexec", noexec_setup);
29469+#endif
29470+
29471+#endif
29472
29473 void __cpuinit x86_configure_nx(void)
29474 {
29475+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
29476 if (cpu_has_nx && !disable_nx)
29477 __supported_pte_mask |= _PAGE_NX;
29478 else
29479+#endif
29480 __supported_pte_mask &= ~_PAGE_NX;
29481 }
29482
29483diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
29484index 282375f..e03a98f 100644
29485--- a/arch/x86/mm/tlb.c
29486+++ b/arch/x86/mm/tlb.c
29487@@ -48,7 +48,11 @@ void leave_mm(int cpu)
29488 BUG();
29489 if (cpumask_test_cpu(cpu, mm_cpumask(active_mm))) {
29490 cpumask_clear_cpu(cpu, mm_cpumask(active_mm));
29491+
29492+#ifndef CONFIG_PAX_PER_CPU_PGD
29493 load_cr3(swapper_pg_dir);
29494+#endif
29495+
29496 }
29497 }
29498 EXPORT_SYMBOL_GPL(leave_mm);
29499diff --git a/arch/x86/net/bpf_jit.S b/arch/x86/net/bpf_jit.S
29500index 877b9a1..a8ecf42 100644
29501--- a/arch/x86/net/bpf_jit.S
29502+++ b/arch/x86/net/bpf_jit.S
29503@@ -9,6 +9,7 @@
29504 */
29505 #include <linux/linkage.h>
29506 #include <asm/dwarf2.h>
29507+#include <asm/alternative-asm.h>
29508
29509 /*
29510 * Calling convention :
29511@@ -35,6 +36,7 @@ sk_load_word_positive_offset:
29512 jle bpf_slow_path_word
29513 mov (SKBDATA,%rsi),%eax
29514 bswap %eax /* ntohl() */
29515+ pax_force_retaddr
29516 ret
29517
29518 sk_load_half:
29519@@ -52,6 +54,7 @@ sk_load_half_positive_offset:
29520 jle bpf_slow_path_half
29521 movzwl (SKBDATA,%rsi),%eax
29522 rol $8,%ax # ntohs()
29523+ pax_force_retaddr
29524 ret
29525
29526 sk_load_byte:
29527@@ -66,6 +69,7 @@ sk_load_byte_positive_offset:
29528 cmp %esi,%r9d /* if (offset >= hlen) goto bpf_slow_path_byte */
29529 jle bpf_slow_path_byte
29530 movzbl (SKBDATA,%rsi),%eax
29531+ pax_force_retaddr
29532 ret
29533
29534 /**
29535@@ -87,6 +91,7 @@ sk_load_byte_msh_positive_offset:
29536 movzbl (SKBDATA,%rsi),%ebx
29537 and $15,%bl
29538 shl $2,%bl
29539+ pax_force_retaddr
29540 ret
29541
29542 /* rsi contains offset and can be scratched */
29543@@ -109,6 +114,7 @@ bpf_slow_path_word:
29544 js bpf_error
29545 mov -12(%rbp),%eax
29546 bswap %eax
29547+ pax_force_retaddr
29548 ret
29549
29550 bpf_slow_path_half:
29551@@ -117,12 +123,14 @@ bpf_slow_path_half:
29552 mov -12(%rbp),%ax
29553 rol $8,%ax
29554 movzwl %ax,%eax
29555+ pax_force_retaddr
29556 ret
29557
29558 bpf_slow_path_byte:
29559 bpf_slow_path_common(1)
29560 js bpf_error
29561 movzbl -12(%rbp),%eax
29562+ pax_force_retaddr
29563 ret
29564
29565 bpf_slow_path_byte_msh:
29566@@ -133,6 +141,7 @@ bpf_slow_path_byte_msh:
29567 and $15,%al
29568 shl $2,%al
29569 xchg %eax,%ebx
29570+ pax_force_retaddr
29571 ret
29572
29573 #define sk_negative_common(SIZE) \
29574@@ -157,6 +166,7 @@ sk_load_word_negative_offset:
29575 sk_negative_common(4)
29576 mov (%rax), %eax
29577 bswap %eax
29578+ pax_force_retaddr
29579 ret
29580
29581 bpf_slow_path_half_neg:
29582@@ -168,6 +178,7 @@ sk_load_half_negative_offset:
29583 mov (%rax),%ax
29584 rol $8,%ax
29585 movzwl %ax,%eax
29586+ pax_force_retaddr
29587 ret
29588
29589 bpf_slow_path_byte_neg:
29590@@ -177,6 +188,7 @@ sk_load_byte_negative_offset:
29591 .globl sk_load_byte_negative_offset
29592 sk_negative_common(1)
29593 movzbl (%rax), %eax
29594+ pax_force_retaddr
29595 ret
29596
29597 bpf_slow_path_byte_msh_neg:
29598@@ -190,6 +202,7 @@ sk_load_byte_msh_negative_offset:
29599 and $15,%al
29600 shl $2,%al
29601 xchg %eax,%ebx
29602+ pax_force_retaddr
29603 ret
29604
29605 bpf_error:
29606@@ -197,4 +210,5 @@ bpf_error:
29607 xor %eax,%eax
29608 mov -8(%rbp),%rbx
29609 leaveq
29610+ pax_force_retaddr
29611 ret
29612diff --git a/arch/x86/net/bpf_jit_comp.c b/arch/x86/net/bpf_jit_comp.c
29613index f66b540..3e88dfb 100644
29614--- a/arch/x86/net/bpf_jit_comp.c
29615+++ b/arch/x86/net/bpf_jit_comp.c
29616@@ -12,6 +12,7 @@
29617 #include <linux/netdevice.h>
29618 #include <linux/filter.h>
29619 #include <linux/if_vlan.h>
29620+#include <linux/random.h>
29621
29622 /*
29623 * Conventions :
29624@@ -49,13 +50,90 @@ static inline u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
29625 return ptr + len;
29626 }
29627
29628+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29629+#define MAX_INSTR_CODE_SIZE 96
29630+#else
29631+#define MAX_INSTR_CODE_SIZE 64
29632+#endif
29633+
29634 #define EMIT(bytes, len) do { prog = emit_code(prog, bytes, len); } while (0)
29635
29636 #define EMIT1(b1) EMIT(b1, 1)
29637 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
29638 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
29639 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
29640+
29641+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29642+/* original constant will appear in ecx */
29643+#define DILUTE_CONST_SEQUENCE(_off, _key) \
29644+do { \
29645+ /* mov ecx, randkey */ \
29646+ EMIT1(0xb9); \
29647+ EMIT(_key, 4); \
29648+ /* xor ecx, randkey ^ off */ \
29649+ EMIT2(0x81, 0xf1); \
29650+ EMIT((_key) ^ (_off), 4); \
29651+} while (0)
29652+
29653+#define EMIT1_off32(b1, _off) \
29654+do { \
29655+ switch (b1) { \
29656+ case 0x05: /* add eax, imm32 */ \
29657+ case 0x2d: /* sub eax, imm32 */ \
29658+ case 0x25: /* and eax, imm32 */ \
29659+ case 0x0d: /* or eax, imm32 */ \
29660+ case 0xb8: /* mov eax, imm32 */ \
29661+ case 0x35: /* xor eax, imm32 */ \
29662+ case 0x3d: /* cmp eax, imm32 */ \
29663+ case 0xa9: /* test eax, imm32 */ \
29664+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29665+ EMIT2((b1) - 4, 0xc8); /* convert imm instruction to eax, ecx */\
29666+ break; \
29667+ case 0xbb: /* mov ebx, imm32 */ \
29668+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29669+ /* mov ebx, ecx */ \
29670+ EMIT2(0x89, 0xcb); \
29671+ break; \
29672+ case 0xbe: /* mov esi, imm32 */ \
29673+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29674+ /* mov esi, ecx */ \
29675+ EMIT2(0x89, 0xce); \
29676+ break; \
29677+ case 0xe8: /* call rel imm32, always to known funcs */ \
29678+ EMIT1(b1); \
29679+ EMIT(_off, 4); \
29680+ break; \
29681+ case 0xe9: /* jmp rel imm32 */ \
29682+ EMIT1(b1); \
29683+ EMIT(_off, 4); \
29684+ /* prevent fall-through, we're not called if off = 0 */ \
29685+ EMIT(0xcccccccc, 4); \
29686+ EMIT(0xcccccccc, 4); \
29687+ break; \
29688+ default: \
29689+ BUILD_BUG(); \
29690+ } \
29691+} while (0)
29692+
29693+#define EMIT2_off32(b1, b2, _off) \
29694+do { \
29695+ if ((b1) == 0x8d && (b2) == 0xb3) { /* lea esi, [rbx+imm32] */ \
29696+ EMIT2(0x8d, 0xb3); /* lea esi, [rbx+randkey] */ \
29697+ EMIT(randkey, 4); \
29698+ EMIT2(0x8d, 0xb6); /* lea esi, [esi+off-randkey] */ \
29699+ EMIT((_off) - randkey, 4); \
29700+ } else if ((b1) == 0x69 && (b2) == 0xc0) { /* imul eax, imm32 */\
29701+ DILUTE_CONST_SEQUENCE(_off, randkey); \
29702+ /* imul eax, ecx */ \
29703+ EMIT3(0x0f, 0xaf, 0xc1); \
29704+ } else { \
29705+ BUILD_BUG(); \
29706+ } \
29707+} while (0)
29708+#else
29709 #define EMIT1_off32(b1, off) do { EMIT1(b1); EMIT(off, 4);} while (0)
29710+#define EMIT2_off32(b1, b2, off) do { EMIT2(b1, b2); EMIT(off, 4);} while (0)
29711+#endif
29712
29713 #define CLEAR_A() EMIT2(0x31, 0xc0) /* xor %eax,%eax */
29714 #define CLEAR_X() EMIT2(0x31, 0xdb) /* xor %ebx,%ebx */
29715@@ -90,6 +168,24 @@ do { \
29716 #define X86_JBE 0x76
29717 #define X86_JA 0x77
29718
29719+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29720+#define APPEND_FLOW_VERIFY() \
29721+do { \
29722+ /* mov ecx, randkey */ \
29723+ EMIT1(0xb9); \
29724+ EMIT(randkey, 4); \
29725+ /* cmp ecx, randkey */ \
29726+ EMIT2(0x81, 0xf9); \
29727+ EMIT(randkey, 4); \
29728+ /* jz after 8 int 3s */ \
29729+ EMIT2(0x74, 0x08); \
29730+ EMIT(0xcccccccc, 4); \
29731+ EMIT(0xcccccccc, 4); \
29732+} while (0)
29733+#else
29734+#define APPEND_FLOW_VERIFY() do { } while (0)
29735+#endif
29736+
29737 #define EMIT_COND_JMP(op, offset) \
29738 do { \
29739 if (is_near(offset)) \
29740@@ -97,6 +193,7 @@ do { \
29741 else { \
29742 EMIT2(0x0f, op + 0x10); \
29743 EMIT(offset, 4); /* jxx .+off32 */ \
29744+ APPEND_FLOW_VERIFY(); \
29745 } \
29746 } while (0)
29747
29748@@ -121,6 +218,11 @@ static inline void bpf_flush_icache(void *start, void *end)
29749 set_fs(old_fs);
29750 }
29751
29752+struct bpf_jit_work {
29753+ struct work_struct work;
29754+ void *image;
29755+};
29756+
29757 #define CHOOSE_LOAD_FUNC(K, func) \
29758 ((int)K < 0 ? ((int)K >= SKF_LL_OFF ? func##_negative_offset : func) : func##_positive_offset)
29759
29760@@ -146,7 +248,7 @@ static int pkt_type_offset(void)
29761
29762 void bpf_jit_compile(struct sk_filter *fp)
29763 {
29764- u8 temp[64];
29765+ u8 temp[MAX_INSTR_CODE_SIZE];
29766 u8 *prog;
29767 unsigned int proglen, oldproglen = 0;
29768 int ilen, i;
29769@@ -159,6 +261,9 @@ void bpf_jit_compile(struct sk_filter *fp)
29770 unsigned int *addrs;
29771 const struct sock_filter *filter = fp->insns;
29772 int flen = fp->len;
29773+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29774+ unsigned int randkey;
29775+#endif
29776
29777 if (!bpf_jit_enable)
29778 return;
29779@@ -167,11 +272,19 @@ void bpf_jit_compile(struct sk_filter *fp)
29780 if (addrs == NULL)
29781 return;
29782
29783+ fp->work = kmalloc(sizeof(*fp->work), GFP_KERNEL);
29784+ if (!fp->work)
29785+ goto out;
29786+
29787+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29788+ randkey = get_random_int();
29789+#endif
29790+
29791 /* Before first pass, make a rough estimation of addrs[]
29792- * each bpf instruction is translated to less than 64 bytes
29793+ * each bpf instruction is translated to less than MAX_INSTR_CODE_SIZE bytes
29794 */
29795 for (proglen = 0, i = 0; i < flen; i++) {
29796- proglen += 64;
29797+ proglen += MAX_INSTR_CODE_SIZE;
29798 addrs[i] = proglen;
29799 }
29800 cleanup_addr = proglen; /* epilogue address */
29801@@ -282,10 +395,8 @@ void bpf_jit_compile(struct sk_filter *fp)
29802 case BPF_S_ALU_MUL_K: /* A *= K */
29803 if (is_imm8(K))
29804 EMIT3(0x6b, 0xc0, K); /* imul imm8,%eax,%eax */
29805- else {
29806- EMIT2(0x69, 0xc0); /* imul imm32,%eax */
29807- EMIT(K, 4);
29808- }
29809+ else
29810+ EMIT2_off32(0x69, 0xc0, K); /* imul imm32,%eax */
29811 break;
29812 case BPF_S_ALU_DIV_X: /* A /= X; */
29813 seen |= SEEN_XREG;
29814@@ -325,13 +436,23 @@ void bpf_jit_compile(struct sk_filter *fp)
29815 break;
29816 case BPF_S_ALU_MOD_K: /* A %= K; */
29817 EMIT2(0x31, 0xd2); /* xor %edx,%edx */
29818+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29819+ DILUTE_CONST_SEQUENCE(K, randkey);
29820+#else
29821 EMIT1(0xb9);EMIT(K, 4); /* mov imm32,%ecx */
29822+#endif
29823 EMIT2(0xf7, 0xf1); /* div %ecx */
29824 EMIT2(0x89, 0xd0); /* mov %edx,%eax */
29825 break;
29826 case BPF_S_ALU_DIV_K: /* A = reciprocal_divide(A, K); */
29827+#ifdef CONFIG_GRKERNSEC_JIT_HARDEN
29828+ DILUTE_CONST_SEQUENCE(K, randkey);
29829+ // imul rax, rcx
29830+ EMIT4(0x48, 0x0f, 0xaf, 0xc1);
29831+#else
29832 EMIT3(0x48, 0x69, 0xc0); /* imul imm32,%rax,%rax */
29833 EMIT(K, 4);
29834+#endif
29835 EMIT4(0x48, 0xc1, 0xe8, 0x20); /* shr $0x20,%rax */
29836 break;
29837 case BPF_S_ALU_AND_X:
29838@@ -602,8 +723,7 @@ common_load_ind: seen |= SEEN_DATAREF | SEEN_XREG;
29839 if (is_imm8(K)) {
29840 EMIT3(0x8d, 0x73, K); /* lea imm8(%rbx), %esi */
29841 } else {
29842- EMIT2(0x8d, 0xb3); /* lea imm32(%rbx),%esi */
29843- EMIT(K, 4);
29844+ EMIT2_off32(0x8d, 0xb3, K); /* lea imm32(%rbx),%esi */
29845 }
29846 } else {
29847 EMIT2(0x89,0xde); /* mov %ebx,%esi */
29848@@ -686,17 +806,18 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29849 break;
29850 default:
29851 /* hmm, too complex filter, give up with jit compiler */
29852- goto out;
29853+ goto error;
29854 }
29855 ilen = prog - temp;
29856 if (image) {
29857 if (unlikely(proglen + ilen > oldproglen)) {
29858 pr_err("bpb_jit_compile fatal error\n");
29859- kfree(addrs);
29860- module_free(NULL, image);
29861- return;
29862+ module_free_exec(NULL, image);
29863+ goto error;
29864 }
29865+ pax_open_kernel();
29866 memcpy(image + proglen, temp, ilen);
29867+ pax_close_kernel();
29868 }
29869 proglen += ilen;
29870 addrs[i] = proglen;
29871@@ -717,11 +838,9 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29872 break;
29873 }
29874 if (proglen == oldproglen) {
29875- image = module_alloc(max_t(unsigned int,
29876- proglen,
29877- sizeof(struct work_struct)));
29878+ image = module_alloc_exec(proglen);
29879 if (!image)
29880- goto out;
29881+ goto error;
29882 }
29883 oldproglen = proglen;
29884 }
29885@@ -732,7 +851,10 @@ cond_branch: f_offset = addrs[i + filter[i].jf] - addrs[i];
29886 if (image) {
29887 bpf_flush_icache(image, image + proglen);
29888 fp->bpf_func = (void *)image;
29889- }
29890+ } else
29891+error:
29892+ kfree(fp->work);
29893+
29894 out:
29895 kfree(addrs);
29896 return;
29897@@ -740,18 +862,20 @@ out:
29898
29899 static void jit_free_defer(struct work_struct *arg)
29900 {
29901- module_free(NULL, arg);
29902+ module_free_exec(NULL, ((struct bpf_jit_work *)arg)->image);
29903+ kfree(arg);
29904 }
29905
29906 /* run from softirq, we must use a work_struct to call
29907- * module_free() from process context
29908+ * module_free_exec() from process context
29909 */
29910 void bpf_jit_free(struct sk_filter *fp)
29911 {
29912 if (fp->bpf_func != sk_run_filter) {
29913- struct work_struct *work = (struct work_struct *)fp->bpf_func;
29914+ struct work_struct *work = &fp->work->work;
29915
29916 INIT_WORK(work, jit_free_defer);
29917+ fp->work->image = fp->bpf_func;
29918 schedule_work(work);
29919 }
29920 }
29921diff --git a/arch/x86/oprofile/backtrace.c b/arch/x86/oprofile/backtrace.c
29922index d6aa6e8..266395a 100644
29923--- a/arch/x86/oprofile/backtrace.c
29924+++ b/arch/x86/oprofile/backtrace.c
29925@@ -46,11 +46,11 @@ dump_user_backtrace_32(struct stack_frame_ia32 *head)
29926 struct stack_frame_ia32 *fp;
29927 unsigned long bytes;
29928
29929- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29930+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29931 if (bytes != sizeof(bufhead))
29932 return NULL;
29933
29934- fp = (struct stack_frame_ia32 *) compat_ptr(bufhead[0].next_frame);
29935+ fp = (struct stack_frame_ia32 __force_kernel *) compat_ptr(bufhead[0].next_frame);
29936
29937 oprofile_add_trace(bufhead[0].return_address);
29938
29939@@ -92,7 +92,7 @@ static struct stack_frame *dump_user_backtrace(struct stack_frame *head)
29940 struct stack_frame bufhead[2];
29941 unsigned long bytes;
29942
29943- bytes = copy_from_user_nmi(bufhead, head, sizeof(bufhead));
29944+ bytes = copy_from_user_nmi(bufhead, (const char __force_user *)head, sizeof(bufhead));
29945 if (bytes != sizeof(bufhead))
29946 return NULL;
29947
29948@@ -111,7 +111,7 @@ x86_backtrace(struct pt_regs * const regs, unsigned int depth)
29949 {
29950 struct stack_frame *head = (struct stack_frame *)frame_pointer(regs);
29951
29952- if (!user_mode_vm(regs)) {
29953+ if (!user_mode(regs)) {
29954 unsigned long stack = kernel_stack_pointer(regs);
29955 if (depth)
29956 dump_trace(NULL, regs, (unsigned long *)stack, 0,
29957diff --git a/arch/x86/oprofile/nmi_int.c b/arch/x86/oprofile/nmi_int.c
29958index 48768df..ba9143c 100644
29959--- a/arch/x86/oprofile/nmi_int.c
29960+++ b/arch/x86/oprofile/nmi_int.c
29961@@ -23,6 +23,7 @@
29962 #include <asm/nmi.h>
29963 #include <asm/msr.h>
29964 #include <asm/apic.h>
29965+#include <asm/pgtable.h>
29966
29967 #include "op_counter.h"
29968 #include "op_x86_model.h"
29969@@ -774,8 +775,11 @@ int __init op_nmi_init(struct oprofile_operations *ops)
29970 if (ret)
29971 return ret;
29972
29973- if (!model->num_virt_counters)
29974- model->num_virt_counters = model->num_counters;
29975+ if (!model->num_virt_counters) {
29976+ pax_open_kernel();
29977+ *(unsigned int *)&model->num_virt_counters = model->num_counters;
29978+ pax_close_kernel();
29979+ }
29980
29981 mux_init(ops);
29982
29983diff --git a/arch/x86/oprofile/op_model_amd.c b/arch/x86/oprofile/op_model_amd.c
29984index b2b9443..be58856 100644
29985--- a/arch/x86/oprofile/op_model_amd.c
29986+++ b/arch/x86/oprofile/op_model_amd.c
29987@@ -519,9 +519,11 @@ static int op_amd_init(struct oprofile_operations *ops)
29988 num_counters = AMD64_NUM_COUNTERS;
29989 }
29990
29991- op_amd_spec.num_counters = num_counters;
29992- op_amd_spec.num_controls = num_counters;
29993- op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29994+ pax_open_kernel();
29995+ *(unsigned int *)&op_amd_spec.num_counters = num_counters;
29996+ *(unsigned int *)&op_amd_spec.num_controls = num_counters;
29997+ *(unsigned int *)&op_amd_spec.num_virt_counters = max(num_counters, NUM_VIRT_COUNTERS);
29998+ pax_close_kernel();
29999
30000 return 0;
30001 }
30002diff --git a/arch/x86/oprofile/op_model_ppro.c b/arch/x86/oprofile/op_model_ppro.c
30003index d90528e..0127e2b 100644
30004--- a/arch/x86/oprofile/op_model_ppro.c
30005+++ b/arch/x86/oprofile/op_model_ppro.c
30006@@ -19,6 +19,7 @@
30007 #include <asm/msr.h>
30008 #include <asm/apic.h>
30009 #include <asm/nmi.h>
30010+#include <asm/pgtable.h>
30011
30012 #include "op_x86_model.h"
30013 #include "op_counter.h"
30014@@ -221,8 +222,10 @@ static void arch_perfmon_setup_counters(void)
30015
30016 num_counters = min((int)eax.split.num_counters, OP_MAX_COUNTER);
30017
30018- op_arch_perfmon_spec.num_counters = num_counters;
30019- op_arch_perfmon_spec.num_controls = num_counters;
30020+ pax_open_kernel();
30021+ *(unsigned int *)&op_arch_perfmon_spec.num_counters = num_counters;
30022+ *(unsigned int *)&op_arch_perfmon_spec.num_controls = num_counters;
30023+ pax_close_kernel();
30024 }
30025
30026 static int arch_perfmon_init(struct oprofile_operations *ignore)
30027diff --git a/arch/x86/oprofile/op_x86_model.h b/arch/x86/oprofile/op_x86_model.h
30028index 71e8a67..6a313bb 100644
30029--- a/arch/x86/oprofile/op_x86_model.h
30030+++ b/arch/x86/oprofile/op_x86_model.h
30031@@ -52,7 +52,7 @@ struct op_x86_model_spec {
30032 void (*switch_ctrl)(struct op_x86_model_spec const *model,
30033 struct op_msrs const * const msrs);
30034 #endif
30035-};
30036+} __do_const;
30037
30038 struct op_counter_config;
30039
30040diff --git a/arch/x86/pci/amd_bus.c b/arch/x86/pci/amd_bus.c
30041index e9e6ed5..e47ae67 100644
30042--- a/arch/x86/pci/amd_bus.c
30043+++ b/arch/x86/pci/amd_bus.c
30044@@ -337,7 +337,7 @@ static int __cpuinit amd_cpu_notify(struct notifier_block *self,
30045 return NOTIFY_OK;
30046 }
30047
30048-static struct notifier_block __cpuinitdata amd_cpu_notifier = {
30049+static struct notifier_block amd_cpu_notifier = {
30050 .notifier_call = amd_cpu_notify,
30051 };
30052
30053diff --git a/arch/x86/pci/irq.c b/arch/x86/pci/irq.c
30054index 372e9b8..e775a6c 100644
30055--- a/arch/x86/pci/irq.c
30056+++ b/arch/x86/pci/irq.c
30057@@ -50,7 +50,7 @@ struct irq_router {
30058 struct irq_router_handler {
30059 u16 vendor;
30060 int (*probe)(struct irq_router *r, struct pci_dev *router, u16 device);
30061-};
30062+} __do_const;
30063
30064 int (*pcibios_enable_irq)(struct pci_dev *dev) = pirq_enable_irq;
30065 void (*pcibios_disable_irq)(struct pci_dev *dev) = NULL;
30066@@ -794,7 +794,7 @@ static __init int pico_router_probe(struct irq_router *r, struct pci_dev *router
30067 return 0;
30068 }
30069
30070-static __initdata struct irq_router_handler pirq_routers[] = {
30071+static __initconst const struct irq_router_handler pirq_routers[] = {
30072 { PCI_VENDOR_ID_INTEL, intel_router_probe },
30073 { PCI_VENDOR_ID_AL, ali_router_probe },
30074 { PCI_VENDOR_ID_ITE, ite_router_probe },
30075@@ -821,7 +821,7 @@ static struct pci_dev *pirq_router_dev;
30076 static void __init pirq_find_router(struct irq_router *r)
30077 {
30078 struct irq_routing_table *rt = pirq_table;
30079- struct irq_router_handler *h;
30080+ const struct irq_router_handler *h;
30081
30082 #ifdef CONFIG_PCI_BIOS
30083 if (!rt->signature) {
30084@@ -1094,7 +1094,7 @@ static int __init fix_acer_tm360_irqrouting(const struct dmi_system_id *d)
30085 return 0;
30086 }
30087
30088-static struct dmi_system_id __initdata pciirq_dmi_table[] = {
30089+static const struct dmi_system_id __initconst pciirq_dmi_table[] = {
30090 {
30091 .callback = fix_broken_hp_bios_irq9,
30092 .ident = "HP Pavilion N5400 Series Laptop",
30093diff --git a/arch/x86/pci/mrst.c b/arch/x86/pci/mrst.c
30094index 6eb18c4..20d83de 100644
30095--- a/arch/x86/pci/mrst.c
30096+++ b/arch/x86/pci/mrst.c
30097@@ -238,7 +238,9 @@ int __init pci_mrst_init(void)
30098 printk(KERN_INFO "Intel MID platform detected, using MID PCI ops\n");
30099 pci_mmcfg_late_init();
30100 pcibios_enable_irq = mrst_pci_irq_enable;
30101- pci_root_ops = pci_mrst_ops;
30102+ pax_open_kernel();
30103+ memcpy((void *)&pci_root_ops, &pci_mrst_ops, sizeof(pci_mrst_ops));
30104+ pax_close_kernel();
30105 pci_soc_mode = 1;
30106 /* Continue with standard init */
30107 return 1;
30108diff --git a/arch/x86/pci/pcbios.c b/arch/x86/pci/pcbios.c
30109index c77b24a..c979855 100644
30110--- a/arch/x86/pci/pcbios.c
30111+++ b/arch/x86/pci/pcbios.c
30112@@ -79,7 +79,7 @@ union bios32 {
30113 static struct {
30114 unsigned long address;
30115 unsigned short segment;
30116-} bios32_indirect = { 0, __KERNEL_CS };
30117+} bios32_indirect __read_only = { 0, __PCIBIOS_CS };
30118
30119 /*
30120 * Returns the entry point for the given service, NULL on error
30121@@ -92,37 +92,80 @@ static unsigned long bios32_service(unsigned long service)
30122 unsigned long length; /* %ecx */
30123 unsigned long entry; /* %edx */
30124 unsigned long flags;
30125+ struct desc_struct d, *gdt;
30126
30127 local_irq_save(flags);
30128- __asm__("lcall *(%%edi); cld"
30129+
30130+ gdt = get_cpu_gdt_table(smp_processor_id());
30131+
30132+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x9B, 0xC);
30133+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30134+ pack_descriptor(&d, 0UL, 0xFFFFFUL, 0x93, 0xC);
30135+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30136+
30137+ __asm__("movw %w7, %%ds; lcall *(%%edi); push %%ss; pop %%ds; cld"
30138 : "=a" (return_code),
30139 "=b" (address),
30140 "=c" (length),
30141 "=d" (entry)
30142 : "0" (service),
30143 "1" (0),
30144- "D" (&bios32_indirect));
30145+ "D" (&bios32_indirect),
30146+ "r"(__PCIBIOS_DS)
30147+ : "memory");
30148+
30149+ pax_open_kernel();
30150+ gdt[GDT_ENTRY_PCIBIOS_CS].a = 0;
30151+ gdt[GDT_ENTRY_PCIBIOS_CS].b = 0;
30152+ gdt[GDT_ENTRY_PCIBIOS_DS].a = 0;
30153+ gdt[GDT_ENTRY_PCIBIOS_DS].b = 0;
30154+ pax_close_kernel();
30155+
30156 local_irq_restore(flags);
30157
30158 switch (return_code) {
30159- case 0:
30160- return address + entry;
30161- case 0x80: /* Not present */
30162- printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30163- return 0;
30164- default: /* Shouldn't happen */
30165- printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30166- service, return_code);
30167+ case 0: {
30168+ int cpu;
30169+ unsigned char flags;
30170+
30171+ printk(KERN_INFO "bios32_service: base:%08lx length:%08lx entry:%08lx\n", address, length, entry);
30172+ if (address >= 0xFFFF0 || length > 0x100000 - address || length <= entry) {
30173+ printk(KERN_WARNING "bios32_service: not valid\n");
30174 return 0;
30175+ }
30176+ address = address + PAGE_OFFSET;
30177+ length += 16UL; /* some BIOSs underreport this... */
30178+ flags = 4;
30179+ if (length >= 64*1024*1024) {
30180+ length >>= PAGE_SHIFT;
30181+ flags |= 8;
30182+ }
30183+
30184+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
30185+ gdt = get_cpu_gdt_table(cpu);
30186+ pack_descriptor(&d, address, length, 0x9b, flags);
30187+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_CS, &d, DESCTYPE_S);
30188+ pack_descriptor(&d, address, length, 0x93, flags);
30189+ write_gdt_entry(gdt, GDT_ENTRY_PCIBIOS_DS, &d, DESCTYPE_S);
30190+ }
30191+ return entry;
30192+ }
30193+ case 0x80: /* Not present */
30194+ printk(KERN_WARNING "bios32_service(0x%lx): not present\n", service);
30195+ return 0;
30196+ default: /* Shouldn't happen */
30197+ printk(KERN_WARNING "bios32_service(0x%lx): returned 0x%x -- BIOS bug!\n",
30198+ service, return_code);
30199+ return 0;
30200 }
30201 }
30202
30203 static struct {
30204 unsigned long address;
30205 unsigned short segment;
30206-} pci_indirect = { 0, __KERNEL_CS };
30207+} pci_indirect __read_only = { 0, __PCIBIOS_CS };
30208
30209-static int pci_bios_present;
30210+static int pci_bios_present __read_only;
30211
30212 static int check_pcibios(void)
30213 {
30214@@ -131,11 +174,13 @@ static int check_pcibios(void)
30215 unsigned long flags, pcibios_entry;
30216
30217 if ((pcibios_entry = bios32_service(PCI_SERVICE))) {
30218- pci_indirect.address = pcibios_entry + PAGE_OFFSET;
30219+ pci_indirect.address = pcibios_entry;
30220
30221 local_irq_save(flags);
30222- __asm__(
30223- "lcall *(%%edi); cld\n\t"
30224+ __asm__("movw %w6, %%ds\n\t"
30225+ "lcall *%%ss:(%%edi); cld\n\t"
30226+ "push %%ss\n\t"
30227+ "pop %%ds\n\t"
30228 "jc 1f\n\t"
30229 "xor %%ah, %%ah\n"
30230 "1:"
30231@@ -144,7 +189,8 @@ static int check_pcibios(void)
30232 "=b" (ebx),
30233 "=c" (ecx)
30234 : "1" (PCIBIOS_PCI_BIOS_PRESENT),
30235- "D" (&pci_indirect)
30236+ "D" (&pci_indirect),
30237+ "r" (__PCIBIOS_DS)
30238 : "memory");
30239 local_irq_restore(flags);
30240
30241@@ -189,7 +235,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30242
30243 switch (len) {
30244 case 1:
30245- __asm__("lcall *(%%esi); cld\n\t"
30246+ __asm__("movw %w6, %%ds\n\t"
30247+ "lcall *%%ss:(%%esi); cld\n\t"
30248+ "push %%ss\n\t"
30249+ "pop %%ds\n\t"
30250 "jc 1f\n\t"
30251 "xor %%ah, %%ah\n"
30252 "1:"
30253@@ -198,7 +247,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30254 : "1" (PCIBIOS_READ_CONFIG_BYTE),
30255 "b" (bx),
30256 "D" ((long)reg),
30257- "S" (&pci_indirect));
30258+ "S" (&pci_indirect),
30259+ "r" (__PCIBIOS_DS));
30260 /*
30261 * Zero-extend the result beyond 8 bits, do not trust the
30262 * BIOS having done it:
30263@@ -206,7 +256,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30264 *value &= 0xff;
30265 break;
30266 case 2:
30267- __asm__("lcall *(%%esi); cld\n\t"
30268+ __asm__("movw %w6, %%ds\n\t"
30269+ "lcall *%%ss:(%%esi); cld\n\t"
30270+ "push %%ss\n\t"
30271+ "pop %%ds\n\t"
30272 "jc 1f\n\t"
30273 "xor %%ah, %%ah\n"
30274 "1:"
30275@@ -215,7 +268,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30276 : "1" (PCIBIOS_READ_CONFIG_WORD),
30277 "b" (bx),
30278 "D" ((long)reg),
30279- "S" (&pci_indirect));
30280+ "S" (&pci_indirect),
30281+ "r" (__PCIBIOS_DS));
30282 /*
30283 * Zero-extend the result beyond 16 bits, do not trust the
30284 * BIOS having done it:
30285@@ -223,7 +277,10 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30286 *value &= 0xffff;
30287 break;
30288 case 4:
30289- __asm__("lcall *(%%esi); cld\n\t"
30290+ __asm__("movw %w6, %%ds\n\t"
30291+ "lcall *%%ss:(%%esi); cld\n\t"
30292+ "push %%ss\n\t"
30293+ "pop %%ds\n\t"
30294 "jc 1f\n\t"
30295 "xor %%ah, %%ah\n"
30296 "1:"
30297@@ -232,7 +289,8 @@ static int pci_bios_read(unsigned int seg, unsigned int bus,
30298 : "1" (PCIBIOS_READ_CONFIG_DWORD),
30299 "b" (bx),
30300 "D" ((long)reg),
30301- "S" (&pci_indirect));
30302+ "S" (&pci_indirect),
30303+ "r" (__PCIBIOS_DS));
30304 break;
30305 }
30306
30307@@ -256,7 +314,10 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30308
30309 switch (len) {
30310 case 1:
30311- __asm__("lcall *(%%esi); cld\n\t"
30312+ __asm__("movw %w6, %%ds\n\t"
30313+ "lcall *%%ss:(%%esi); cld\n\t"
30314+ "push %%ss\n\t"
30315+ "pop %%ds\n\t"
30316 "jc 1f\n\t"
30317 "xor %%ah, %%ah\n"
30318 "1:"
30319@@ -265,10 +326,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30320 "c" (value),
30321 "b" (bx),
30322 "D" ((long)reg),
30323- "S" (&pci_indirect));
30324+ "S" (&pci_indirect),
30325+ "r" (__PCIBIOS_DS));
30326 break;
30327 case 2:
30328- __asm__("lcall *(%%esi); cld\n\t"
30329+ __asm__("movw %w6, %%ds\n\t"
30330+ "lcall *%%ss:(%%esi); cld\n\t"
30331+ "push %%ss\n\t"
30332+ "pop %%ds\n\t"
30333 "jc 1f\n\t"
30334 "xor %%ah, %%ah\n"
30335 "1:"
30336@@ -277,10 +342,14 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30337 "c" (value),
30338 "b" (bx),
30339 "D" ((long)reg),
30340- "S" (&pci_indirect));
30341+ "S" (&pci_indirect),
30342+ "r" (__PCIBIOS_DS));
30343 break;
30344 case 4:
30345- __asm__("lcall *(%%esi); cld\n\t"
30346+ __asm__("movw %w6, %%ds\n\t"
30347+ "lcall *%%ss:(%%esi); cld\n\t"
30348+ "push %%ss\n\t"
30349+ "pop %%ds\n\t"
30350 "jc 1f\n\t"
30351 "xor %%ah, %%ah\n"
30352 "1:"
30353@@ -289,7 +358,8 @@ static int pci_bios_write(unsigned int seg, unsigned int bus,
30354 "c" (value),
30355 "b" (bx),
30356 "D" ((long)reg),
30357- "S" (&pci_indirect));
30358+ "S" (&pci_indirect),
30359+ "r" (__PCIBIOS_DS));
30360 break;
30361 }
30362
30363@@ -394,10 +464,13 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30364
30365 DBG("PCI: Fetching IRQ routing table... ");
30366 __asm__("push %%es\n\t"
30367+ "movw %w8, %%ds\n\t"
30368 "push %%ds\n\t"
30369 "pop %%es\n\t"
30370- "lcall *(%%esi); cld\n\t"
30371+ "lcall *%%ss:(%%esi); cld\n\t"
30372 "pop %%es\n\t"
30373+ "push %%ss\n\t"
30374+ "pop %%ds\n"
30375 "jc 1f\n\t"
30376 "xor %%ah, %%ah\n"
30377 "1:"
30378@@ -408,7 +481,8 @@ struct irq_routing_table * pcibios_get_irq_routing_table(void)
30379 "1" (0),
30380 "D" ((long) &opt),
30381 "S" (&pci_indirect),
30382- "m" (opt)
30383+ "m" (opt),
30384+ "r" (__PCIBIOS_DS)
30385 : "memory");
30386 DBG("OK ret=%d, size=%d, map=%x\n", ret, opt.size, map);
30387 if (ret & 0xff00)
30388@@ -432,7 +506,10 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30389 {
30390 int ret;
30391
30392- __asm__("lcall *(%%esi); cld\n\t"
30393+ __asm__("movw %w5, %%ds\n\t"
30394+ "lcall *%%ss:(%%esi); cld\n\t"
30395+ "push %%ss\n\t"
30396+ "pop %%ds\n"
30397 "jc 1f\n\t"
30398 "xor %%ah, %%ah\n"
30399 "1:"
30400@@ -440,7 +517,8 @@ int pcibios_set_irq_routing(struct pci_dev *dev, int pin, int irq)
30401 : "0" (PCIBIOS_SET_PCI_HW_INT),
30402 "b" ((dev->bus->number << 8) | dev->devfn),
30403 "c" ((irq << 8) | (pin + 10)),
30404- "S" (&pci_indirect));
30405+ "S" (&pci_indirect),
30406+ "r" (__PCIBIOS_DS));
30407 return !(ret & 0xff00);
30408 }
30409 EXPORT_SYMBOL(pcibios_set_irq_routing);
30410diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
30411index 40e4469..0592924 100644
30412--- a/arch/x86/platform/efi/efi_32.c
30413+++ b/arch/x86/platform/efi/efi_32.c
30414@@ -44,11 +44,22 @@ void efi_call_phys_prelog(void)
30415 {
30416 struct desc_ptr gdt_descr;
30417
30418+#ifdef CONFIG_PAX_KERNEXEC
30419+ struct desc_struct d;
30420+#endif
30421+
30422 local_irq_save(efi_rt_eflags);
30423
30424 load_cr3(initial_page_table);
30425 __flush_tlb_all();
30426
30427+#ifdef CONFIG_PAX_KERNEXEC
30428+ pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
30429+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30430+ pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
30431+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30432+#endif
30433+
30434 gdt_descr.address = __pa(get_cpu_gdt_table(0));
30435 gdt_descr.size = GDT_SIZE - 1;
30436 load_gdt(&gdt_descr);
30437@@ -58,11 +69,24 @@ void efi_call_phys_epilog(void)
30438 {
30439 struct desc_ptr gdt_descr;
30440
30441+#ifdef CONFIG_PAX_KERNEXEC
30442+ struct desc_struct d;
30443+
30444+ memset(&d, 0, sizeof d);
30445+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
30446+ write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
30447+#endif
30448+
30449 gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
30450 gdt_descr.size = GDT_SIZE - 1;
30451 load_gdt(&gdt_descr);
30452
30453+#ifdef CONFIG_PAX_PER_CPU_PGD
30454+ load_cr3(get_cpu_pgd(smp_processor_id()));
30455+#else
30456 load_cr3(swapper_pg_dir);
30457+#endif
30458+
30459 __flush_tlb_all();
30460
30461 local_irq_restore(efi_rt_eflags);
30462diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
30463index 39a0e7f1..ecc2f1e 100644
30464--- a/arch/x86/platform/efi/efi_64.c
30465+++ b/arch/x86/platform/efi/efi_64.c
30466@@ -76,6 +76,11 @@ void __init efi_call_phys_prelog(void)
30467 vaddress = (unsigned long)__va(pgd * PGDIR_SIZE);
30468 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), *pgd_offset_k(vaddress));
30469 }
30470+
30471+#ifdef CONFIG_PAX_PER_CPU_PGD
30472+ load_cr3(swapper_pg_dir);
30473+#endif
30474+
30475 __flush_tlb_all();
30476 }
30477
30478@@ -89,6 +94,11 @@ void __init efi_call_phys_epilog(void)
30479 for (pgd = 0; pgd < n_pgds; pgd++)
30480 set_pgd(pgd_offset_k(pgd * PGDIR_SIZE), save_pgd[pgd]);
30481 kfree(save_pgd);
30482+
30483+#ifdef CONFIG_PAX_PER_CPU_PGD
30484+ load_cr3(get_cpu_pgd(smp_processor_id()));
30485+#endif
30486+
30487 __flush_tlb_all();
30488 local_irq_restore(efi_flags);
30489 early_code_mapping_set_exec(0);
30490diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
30491index fbe66e6..eae5e38 100644
30492--- a/arch/x86/platform/efi/efi_stub_32.S
30493+++ b/arch/x86/platform/efi/efi_stub_32.S
30494@@ -6,7 +6,9 @@
30495 */
30496
30497 #include <linux/linkage.h>
30498+#include <linux/init.h>
30499 #include <asm/page_types.h>
30500+#include <asm/segment.h>
30501
30502 /*
30503 * efi_call_phys(void *, ...) is a function with variable parameters.
30504@@ -20,7 +22,7 @@
30505 * service functions will comply with gcc calling convention, too.
30506 */
30507
30508-.text
30509+__INIT
30510 ENTRY(efi_call_phys)
30511 /*
30512 * 0. The function can only be called in Linux kernel. So CS has been
30513@@ -36,10 +38,24 @@ ENTRY(efi_call_phys)
30514 * The mapping of lower virtual memory has been created in prelog and
30515 * epilog.
30516 */
30517- movl $1f, %edx
30518- subl $__PAGE_OFFSET, %edx
30519- jmp *%edx
30520+#ifdef CONFIG_PAX_KERNEXEC
30521+ movl $(__KERNEXEC_EFI_DS), %edx
30522+ mov %edx, %ds
30523+ mov %edx, %es
30524+ mov %edx, %ss
30525+ addl $2f,(1f)
30526+ ljmp *(1f)
30527+
30528+__INITDATA
30529+1: .long __LOAD_PHYSICAL_ADDR, __KERNEXEC_EFI_CS
30530+.previous
30531+
30532+2:
30533+ subl $2b,(1b)
30534+#else
30535+ jmp 1f-__PAGE_OFFSET
30536 1:
30537+#endif
30538
30539 /*
30540 * 2. Now on the top of stack is the return
30541@@ -47,14 +63,8 @@ ENTRY(efi_call_phys)
30542 * parameter 2, ..., param n. To make things easy, we save the return
30543 * address of efi_call_phys in a global variable.
30544 */
30545- popl %edx
30546- movl %edx, saved_return_addr
30547- /* get the function pointer into ECX*/
30548- popl %ecx
30549- movl %ecx, efi_rt_function_ptr
30550- movl $2f, %edx
30551- subl $__PAGE_OFFSET, %edx
30552- pushl %edx
30553+ popl (saved_return_addr)
30554+ popl (efi_rt_function_ptr)
30555
30556 /*
30557 * 3. Clear PG bit in %CR0.
30558@@ -73,9 +83,8 @@ ENTRY(efi_call_phys)
30559 /*
30560 * 5. Call the physical function.
30561 */
30562- jmp *%ecx
30563+ call *(efi_rt_function_ptr-__PAGE_OFFSET)
30564
30565-2:
30566 /*
30567 * 6. After EFI runtime service returns, control will return to
30568 * following instruction. We'd better readjust stack pointer first.
30569@@ -88,35 +97,36 @@ ENTRY(efi_call_phys)
30570 movl %cr0, %edx
30571 orl $0x80000000, %edx
30572 movl %edx, %cr0
30573- jmp 1f
30574-1:
30575+
30576 /*
30577 * 8. Now restore the virtual mode from flat mode by
30578 * adding EIP with PAGE_OFFSET.
30579 */
30580- movl $1f, %edx
30581- jmp *%edx
30582+#ifdef CONFIG_PAX_KERNEXEC
30583+ movl $(__KERNEL_DS), %edx
30584+ mov %edx, %ds
30585+ mov %edx, %es
30586+ mov %edx, %ss
30587+ ljmp $(__KERNEL_CS),$1f
30588+#else
30589+ jmp 1f+__PAGE_OFFSET
30590+#endif
30591 1:
30592
30593 /*
30594 * 9. Balance the stack. And because EAX contain the return value,
30595 * we'd better not clobber it.
30596 */
30597- leal efi_rt_function_ptr, %edx
30598- movl (%edx), %ecx
30599- pushl %ecx
30600+ pushl (efi_rt_function_ptr)
30601
30602 /*
30603- * 10. Push the saved return address onto the stack and return.
30604+ * 10. Return to the saved return address.
30605 */
30606- leal saved_return_addr, %edx
30607- movl (%edx), %ecx
30608- pushl %ecx
30609- ret
30610+ jmpl *(saved_return_addr)
30611 ENDPROC(efi_call_phys)
30612 .previous
30613
30614-.data
30615+__INITDATA
30616 saved_return_addr:
30617 .long 0
30618 efi_rt_function_ptr:
30619diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
30620index 4c07cca..2c8427d 100644
30621--- a/arch/x86/platform/efi/efi_stub_64.S
30622+++ b/arch/x86/platform/efi/efi_stub_64.S
30623@@ -7,6 +7,7 @@
30624 */
30625
30626 #include <linux/linkage.h>
30627+#include <asm/alternative-asm.h>
30628
30629 #define SAVE_XMM \
30630 mov %rsp, %rax; \
30631@@ -40,6 +41,7 @@ ENTRY(efi_call0)
30632 call *%rdi
30633 addq $32, %rsp
30634 RESTORE_XMM
30635+ pax_force_retaddr 0, 1
30636 ret
30637 ENDPROC(efi_call0)
30638
30639@@ -50,6 +52,7 @@ ENTRY(efi_call1)
30640 call *%rdi
30641 addq $32, %rsp
30642 RESTORE_XMM
30643+ pax_force_retaddr 0, 1
30644 ret
30645 ENDPROC(efi_call1)
30646
30647@@ -60,6 +63,7 @@ ENTRY(efi_call2)
30648 call *%rdi
30649 addq $32, %rsp
30650 RESTORE_XMM
30651+ pax_force_retaddr 0, 1
30652 ret
30653 ENDPROC(efi_call2)
30654
30655@@ -71,6 +75,7 @@ ENTRY(efi_call3)
30656 call *%rdi
30657 addq $32, %rsp
30658 RESTORE_XMM
30659+ pax_force_retaddr 0, 1
30660 ret
30661 ENDPROC(efi_call3)
30662
30663@@ -83,6 +88,7 @@ ENTRY(efi_call4)
30664 call *%rdi
30665 addq $32, %rsp
30666 RESTORE_XMM
30667+ pax_force_retaddr 0, 1
30668 ret
30669 ENDPROC(efi_call4)
30670
30671@@ -96,6 +102,7 @@ ENTRY(efi_call5)
30672 call *%rdi
30673 addq $48, %rsp
30674 RESTORE_XMM
30675+ pax_force_retaddr 0, 1
30676 ret
30677 ENDPROC(efi_call5)
30678
30679@@ -112,5 +119,6 @@ ENTRY(efi_call6)
30680 call *%rdi
30681 addq $48, %rsp
30682 RESTORE_XMM
30683+ pax_force_retaddr 0, 1
30684 ret
30685 ENDPROC(efi_call6)
30686diff --git a/arch/x86/platform/mrst/mrst.c b/arch/x86/platform/mrst/mrst.c
30687index a0a0a43..a48e233 100644
30688--- a/arch/x86/platform/mrst/mrst.c
30689+++ b/arch/x86/platform/mrst/mrst.c
30690@@ -78,13 +78,15 @@ struct sfi_rtc_table_entry sfi_mrtc_array[SFI_MRTC_MAX];
30691 EXPORT_SYMBOL_GPL(sfi_mrtc_array);
30692 int sfi_mrtc_num;
30693
30694-static void mrst_power_off(void)
30695+static __noreturn void mrst_power_off(void)
30696 {
30697+ BUG();
30698 }
30699
30700-static void mrst_reboot(void)
30701+static __noreturn void mrst_reboot(void)
30702 {
30703 intel_scu_ipc_simple_command(IPCMSG_COLD_BOOT, 0);
30704+ BUG();
30705 }
30706
30707 /* parse all the mtimer info to a static mtimer array */
30708diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
30709index d6ee929..3637cb5 100644
30710--- a/arch/x86/platform/olpc/olpc_dt.c
30711+++ b/arch/x86/platform/olpc/olpc_dt.c
30712@@ -156,7 +156,7 @@ void * __init prom_early_alloc(unsigned long size)
30713 return res;
30714 }
30715
30716-static struct of_pdt_ops prom_olpc_ops __initdata = {
30717+static struct of_pdt_ops prom_olpc_ops __initconst = {
30718 .nextprop = olpc_dt_nextprop,
30719 .getproplen = olpc_dt_getproplen,
30720 .getproperty = olpc_dt_getproperty,
30721diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c
30722index 1cf5b30..fd45732 100644
30723--- a/arch/x86/power/cpu.c
30724+++ b/arch/x86/power/cpu.c
30725@@ -137,11 +137,8 @@ static void do_fpu_end(void)
30726 static void fix_processor_context(void)
30727 {
30728 int cpu = smp_processor_id();
30729- struct tss_struct *t = &per_cpu(init_tss, cpu);
30730-#ifdef CONFIG_X86_64
30731- struct desc_struct *desc = get_cpu_gdt_table(cpu);
30732- tss_desc tss;
30733-#endif
30734+ struct tss_struct *t = init_tss + cpu;
30735+
30736 set_tss_desc(cpu, t); /*
30737 * This just modifies memory; should not be
30738 * necessary. But... This is necessary, because
30739@@ -150,10 +147,6 @@ static void fix_processor_context(void)
30740 */
30741
30742 #ifdef CONFIG_X86_64
30743- memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc));
30744- tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */
30745- write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS);
30746-
30747 syscall_init(); /* This sets MSR_*STAR and related */
30748 #endif
30749 load_TR_desc(); /* This does ltr */
30750diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
30751index a44f457..9140171 100644
30752--- a/arch/x86/realmode/init.c
30753+++ b/arch/x86/realmode/init.c
30754@@ -70,7 +70,13 @@ void __init setup_real_mode(void)
30755 __va(real_mode_header->trampoline_header);
30756
30757 #ifdef CONFIG_X86_32
30758- trampoline_header->start = __pa_symbol(startup_32_smp);
30759+ trampoline_header->start = __pa_symbol(ktla_ktva(startup_32_smp));
30760+
30761+#ifdef CONFIG_PAX_KERNEXEC
30762+ trampoline_header->start -= LOAD_PHYSICAL_ADDR;
30763+#endif
30764+
30765+ trampoline_header->boot_cs = __BOOT_CS;
30766 trampoline_header->gdt_limit = __BOOT_DS + 7;
30767 trampoline_header->gdt_base = __pa_symbol(boot_gdt);
30768 #else
30769@@ -86,7 +92,7 @@ void __init setup_real_mode(void)
30770 *trampoline_cr4_features = read_cr4();
30771
30772 trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
30773- trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd;
30774+ trampoline_pgd[0] = init_level4_pgt[pgd_index(__PAGE_OFFSET)].pgd & ~_PAGE_NX;
30775 trampoline_pgd[511] = init_level4_pgt[511].pgd;
30776 #endif
30777 }
30778diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
30779index 8869287..d577672 100644
30780--- a/arch/x86/realmode/rm/Makefile
30781+++ b/arch/x86/realmode/rm/Makefile
30782@@ -78,5 +78,8 @@ KBUILD_CFLAGS := $(LINUXINCLUDE) -m32 -g -Os -D_SETUP -D__KERNEL__ -D_WAKEUP \
30783 $(call cc-option, -fno-unit-at-a-time)) \
30784 $(call cc-option, -fno-stack-protector) \
30785 $(call cc-option, -mpreferred-stack-boundary=2)
30786+ifdef CONSTIFY_PLUGIN
30787+KBUILD_CFLAGS += -fplugin-arg-constify_plugin-no-constify
30788+endif
30789 KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
30790 GCOV_PROFILE := n
30791diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
30792index a28221d..93c40f1 100644
30793--- a/arch/x86/realmode/rm/header.S
30794+++ b/arch/x86/realmode/rm/header.S
30795@@ -30,7 +30,9 @@ GLOBAL(real_mode_header)
30796 #endif
30797 /* APM/BIOS reboot */
30798 .long pa_machine_real_restart_asm
30799-#ifdef CONFIG_X86_64
30800+#ifdef CONFIG_X86_32
30801+ .long __KERNEL_CS
30802+#else
30803 .long __KERNEL32_CS
30804 #endif
30805 END(real_mode_header)
30806diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
30807index c1b2791..f9e31c7 100644
30808--- a/arch/x86/realmode/rm/trampoline_32.S
30809+++ b/arch/x86/realmode/rm/trampoline_32.S
30810@@ -25,6 +25,12 @@
30811 #include <asm/page_types.h>
30812 #include "realmode.h"
30813
30814+#ifdef CONFIG_PAX_KERNEXEC
30815+#define ta(X) (X)
30816+#else
30817+#define ta(X) (pa_ ## X)
30818+#endif
30819+
30820 .text
30821 .code16
30822
30823@@ -39,8 +45,6 @@ ENTRY(trampoline_start)
30824
30825 cli # We should be safe anyway
30826
30827- movl tr_start, %eax # where we need to go
30828-
30829 movl $0xA5A5A5A5, trampoline_status
30830 # write marker for master knows we're running
30831
30832@@ -56,7 +60,7 @@ ENTRY(trampoline_start)
30833 movw $1, %dx # protected mode (PE) bit
30834 lmsw %dx # into protected mode
30835
30836- ljmpl $__BOOT_CS, $pa_startup_32
30837+ ljmpl *(trampoline_header)
30838
30839 .section ".text32","ax"
30840 .code32
30841@@ -67,7 +71,7 @@ ENTRY(startup_32) # note: also used from wakeup_asm.S
30842 .balign 8
30843 GLOBAL(trampoline_header)
30844 tr_start: .space 4
30845- tr_gdt_pad: .space 2
30846+ tr_boot_cs: .space 2
30847 tr_gdt: .space 6
30848 END(trampoline_header)
30849
30850diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
30851index bb360dc..3e5945f 100644
30852--- a/arch/x86/realmode/rm/trampoline_64.S
30853+++ b/arch/x86/realmode/rm/trampoline_64.S
30854@@ -107,7 +107,7 @@ ENTRY(startup_32)
30855 wrmsr
30856
30857 # Enable paging and in turn activate Long Mode
30858- movl $(X86_CR0_PG | X86_CR0_WP | X86_CR0_PE), %eax
30859+ movl $(X86_CR0_PG | X86_CR0_PE), %eax
30860 movl %eax, %cr0
30861
30862 /*
30863diff --git a/arch/x86/tools/Makefile b/arch/x86/tools/Makefile
30864index e812034..c747134 100644
30865--- a/arch/x86/tools/Makefile
30866+++ b/arch/x86/tools/Makefile
30867@@ -37,7 +37,7 @@ $(obj)/test_get_len.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/in
30868
30869 $(obj)/insn_sanity.o: $(srctree)/arch/x86/lib/insn.c $(srctree)/arch/x86/lib/inat.c $(srctree)/arch/x86/include/asm/inat_types.h $(srctree)/arch/x86/include/asm/inat.h $(srctree)/arch/x86/include/asm/insn.h $(objtree)/arch/x86/lib/inat-tables.c
30870
30871-HOST_EXTRACFLAGS += -I$(srctree)/tools/include
30872+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -ggdb
30873 hostprogs-y += relocs
30874 relocs-objs := relocs_32.o relocs_64.o relocs_common.o
30875 relocs: $(obj)/relocs
30876diff --git a/arch/x86/tools/relocs.c b/arch/x86/tools/relocs.c
30877index f7bab68..b6d9886 100644
30878--- a/arch/x86/tools/relocs.c
30879+++ b/arch/x86/tools/relocs.c
30880@@ -1,5 +1,7 @@
30881 /* This is included from relocs_32/64.c */
30882
30883+#include "../../../include/generated/autoconf.h"
30884+
30885 #define ElfW(type) _ElfW(ELF_BITS, type)
30886 #define _ElfW(bits, type) __ElfW(bits, type)
30887 #define __ElfW(bits, type) Elf##bits##_##type
30888@@ -11,6 +13,7 @@
30889 #define Elf_Sym ElfW(Sym)
30890
30891 static Elf_Ehdr ehdr;
30892+static Elf_Phdr *phdr;
30893
30894 struct relocs {
30895 uint32_t *offset;
30896@@ -383,9 +386,39 @@ static void read_ehdr(FILE *fp)
30897 }
30898 }
30899
30900+static void read_phdrs(FILE *fp)
30901+{
30902+ unsigned int i;
30903+
30904+ phdr = calloc(ehdr.e_phnum, sizeof(Elf_Phdr));
30905+ if (!phdr) {
30906+ die("Unable to allocate %d program headers\n",
30907+ ehdr.e_phnum);
30908+ }
30909+ if (fseek(fp, ehdr.e_phoff, SEEK_SET) < 0) {
30910+ die("Seek to %d failed: %s\n",
30911+ ehdr.e_phoff, strerror(errno));
30912+ }
30913+ if (fread(phdr, sizeof(*phdr), ehdr.e_phnum, fp) != ehdr.e_phnum) {
30914+ die("Cannot read ELF program headers: %s\n",
30915+ strerror(errno));
30916+ }
30917+ for(i = 0; i < ehdr.e_phnum; i++) {
30918+ phdr[i].p_type = elf_word_to_cpu(phdr[i].p_type);
30919+ phdr[i].p_offset = elf_off_to_cpu(phdr[i].p_offset);
30920+ phdr[i].p_vaddr = elf_addr_to_cpu(phdr[i].p_vaddr);
30921+ phdr[i].p_paddr = elf_addr_to_cpu(phdr[i].p_paddr);
30922+ phdr[i].p_filesz = elf_word_to_cpu(phdr[i].p_filesz);
30923+ phdr[i].p_memsz = elf_word_to_cpu(phdr[i].p_memsz);
30924+ phdr[i].p_flags = elf_word_to_cpu(phdr[i].p_flags);
30925+ phdr[i].p_align = elf_word_to_cpu(phdr[i].p_align);
30926+ }
30927+
30928+}
30929+
30930 static void read_shdrs(FILE *fp)
30931 {
30932- int i;
30933+ unsigned int i;
30934 Elf_Shdr shdr;
30935
30936 secs = calloc(ehdr.e_shnum, sizeof(struct section));
30937@@ -420,7 +453,7 @@ static void read_shdrs(FILE *fp)
30938
30939 static void read_strtabs(FILE *fp)
30940 {
30941- int i;
30942+ unsigned int i;
30943 for (i = 0; i < ehdr.e_shnum; i++) {
30944 struct section *sec = &secs[i];
30945 if (sec->shdr.sh_type != SHT_STRTAB) {
30946@@ -445,7 +478,7 @@ static void read_strtabs(FILE *fp)
30947
30948 static void read_symtabs(FILE *fp)
30949 {
30950- int i,j;
30951+ unsigned int i,j;
30952 for (i = 0; i < ehdr.e_shnum; i++) {
30953 struct section *sec = &secs[i];
30954 if (sec->shdr.sh_type != SHT_SYMTAB) {
30955@@ -476,9 +509,11 @@ static void read_symtabs(FILE *fp)
30956 }
30957
30958
30959-static void read_relocs(FILE *fp)
30960+static void read_relocs(FILE *fp, int use_real_mode)
30961 {
30962- int i,j;
30963+ unsigned int i,j;
30964+ uint32_t base;
30965+
30966 for (i = 0; i < ehdr.e_shnum; i++) {
30967 struct section *sec = &secs[i];
30968 if (sec->shdr.sh_type != SHT_REL_TYPE) {
30969@@ -498,9 +533,22 @@ static void read_relocs(FILE *fp)
30970 die("Cannot read symbol table: %s\n",
30971 strerror(errno));
30972 }
30973+ base = 0;
30974+
30975+#ifdef CONFIG_X86_32
30976+ for (j = 0; !use_real_mode && j < ehdr.e_phnum; j++) {
30977+ if (phdr[j].p_type != PT_LOAD )
30978+ continue;
30979+ if (secs[sec->shdr.sh_info].shdr.sh_offset < phdr[j].p_offset || secs[sec->shdr.sh_info].shdr.sh_offset >= phdr[j].p_offset + phdr[j].p_filesz)
30980+ continue;
30981+ base = CONFIG_PAGE_OFFSET + phdr[j].p_paddr - phdr[j].p_vaddr;
30982+ break;
30983+ }
30984+#endif
30985+
30986 for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
30987 Elf_Rel *rel = &sec->reltab[j];
30988- rel->r_offset = elf_addr_to_cpu(rel->r_offset);
30989+ rel->r_offset = elf_addr_to_cpu(rel->r_offset) + base;
30990 rel->r_info = elf_xword_to_cpu(rel->r_info);
30991 #if (SHT_REL_TYPE == SHT_RELA)
30992 rel->r_addend = elf_xword_to_cpu(rel->r_addend);
30993@@ -512,7 +560,7 @@ static void read_relocs(FILE *fp)
30994
30995 static void print_absolute_symbols(void)
30996 {
30997- int i;
30998+ unsigned int i;
30999 const char *format;
31000
31001 if (ELF_BITS == 64)
31002@@ -525,7 +573,7 @@ static void print_absolute_symbols(void)
31003 for (i = 0; i < ehdr.e_shnum; i++) {
31004 struct section *sec = &secs[i];
31005 char *sym_strtab;
31006- int j;
31007+ unsigned int j;
31008
31009 if (sec->shdr.sh_type != SHT_SYMTAB) {
31010 continue;
31011@@ -552,7 +600,7 @@ static void print_absolute_symbols(void)
31012
31013 static void print_absolute_relocs(void)
31014 {
31015- int i, printed = 0;
31016+ unsigned int i, printed = 0;
31017 const char *format;
31018
31019 if (ELF_BITS == 64)
31020@@ -565,7 +613,7 @@ static void print_absolute_relocs(void)
31021 struct section *sec_applies, *sec_symtab;
31022 char *sym_strtab;
31023 Elf_Sym *sh_symtab;
31024- int j;
31025+ unsigned int j;
31026 if (sec->shdr.sh_type != SHT_REL_TYPE) {
31027 continue;
31028 }
31029@@ -642,13 +690,13 @@ static void add_reloc(struct relocs *r, uint32_t offset)
31030 static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
31031 Elf_Sym *sym, const char *symname))
31032 {
31033- int i;
31034+ unsigned int i;
31035 /* Walk through the relocations */
31036 for (i = 0; i < ehdr.e_shnum; i++) {
31037 char *sym_strtab;
31038 Elf_Sym *sh_symtab;
31039 struct section *sec_applies, *sec_symtab;
31040- int j;
31041+ unsigned int j;
31042 struct section *sec = &secs[i];
31043
31044 if (sec->shdr.sh_type != SHT_REL_TYPE) {
31045@@ -812,6 +860,23 @@ static int do_reloc32(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
31046 {
31047 unsigned r_type = ELF32_R_TYPE(rel->r_info);
31048 int shn_abs = (sym->st_shndx == SHN_ABS) && !is_reloc(S_REL, symname);
31049+ char *sym_strtab = sec->link->link->strtab;
31050+
31051+ /* Don't relocate actual per-cpu variables, they are absolute indices, not addresses */
31052+ if (!strcmp(sec_name(sym->st_shndx), ".data..percpu") && strcmp(sym_name(sym_strtab, sym), "__per_cpu_load"))
31053+ return 0;
31054+
31055+#ifdef CONFIG_PAX_KERNEXEC
31056+ /* Don't relocate actual code, they are relocated implicitly by the base address of KERNEL_CS */
31057+ if (!strcmp(sec_name(sym->st_shndx), ".text.end") && !strcmp(sym_name(sym_strtab, sym), "_etext"))
31058+ return 0;
31059+ if (!strcmp(sec_name(sym->st_shndx), ".init.text"))
31060+ return 0;
31061+ if (!strcmp(sec_name(sym->st_shndx), ".exit.text"))
31062+ return 0;
31063+ if (!strcmp(sec_name(sym->st_shndx), ".text") && strcmp(sym_name(sym_strtab, sym), "__LOAD_PHYSICAL_ADDR"))
31064+ return 0;
31065+#endif
31066
31067 switch (r_type) {
31068 case R_386_NONE:
31069@@ -950,7 +1015,7 @@ static int write32_as_text(uint32_t v, FILE *f)
31070
31071 static void emit_relocs(int as_text, int use_real_mode)
31072 {
31073- int i;
31074+ unsigned int i;
31075 int (*write_reloc)(uint32_t, FILE *) = write32;
31076 int (*do_reloc)(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
31077 const char *symname);
31078@@ -1026,10 +1091,11 @@ void process(FILE *fp, int use_real_mode, int as_text,
31079 {
31080 regex_init(use_real_mode);
31081 read_ehdr(fp);
31082+ read_phdrs(fp);
31083 read_shdrs(fp);
31084 read_strtabs(fp);
31085 read_symtabs(fp);
31086- read_relocs(fp);
31087+ read_relocs(fp, use_real_mode);
31088 if (ELF_BITS == 64)
31089 percpu_init();
31090 if (show_absolute_syms) {
31091diff --git a/arch/x86/um/tls_32.c b/arch/x86/um/tls_32.c
31092index 80ffa5b..a33bd15 100644
31093--- a/arch/x86/um/tls_32.c
31094+++ b/arch/x86/um/tls_32.c
31095@@ -260,7 +260,7 @@ out:
31096 if (unlikely(task == current &&
31097 !t->arch.tls_array[idx - GDT_ENTRY_TLS_MIN].flushed)) {
31098 printk(KERN_ERR "get_tls_entry: task with pid %d got here "
31099- "without flushed TLS.", current->pid);
31100+ "without flushed TLS.", task_pid_nr(current));
31101 }
31102
31103 return 0;
31104diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
31105index fd14be1..e3c79c0 100644
31106--- a/arch/x86/vdso/Makefile
31107+++ b/arch/x86/vdso/Makefile
31108@@ -181,7 +181,7 @@ quiet_cmd_vdso = VDSO $@
31109 -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
31110 sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
31111
31112-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31113+VDSO_LDFLAGS = -fPIC -shared -Wl,--no-undefined $(call cc-ldoption, -Wl$(comma)--hash-style=sysv)
31114 GCOV_PROFILE := n
31115
31116 #
31117diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
31118index 0faad64..39ef157 100644
31119--- a/arch/x86/vdso/vdso32-setup.c
31120+++ b/arch/x86/vdso/vdso32-setup.c
31121@@ -25,6 +25,7 @@
31122 #include <asm/tlbflush.h>
31123 #include <asm/vdso.h>
31124 #include <asm/proto.h>
31125+#include <asm/mman.h>
31126
31127 enum {
31128 VDSO_DISABLED = 0,
31129@@ -226,7 +227,7 @@ static inline void map_compat_vdso(int map)
31130 void enable_sep_cpu(void)
31131 {
31132 int cpu = get_cpu();
31133- struct tss_struct *tss = &per_cpu(init_tss, cpu);
31134+ struct tss_struct *tss = init_tss + cpu;
31135
31136 if (!boot_cpu_has(X86_FEATURE_SEP)) {
31137 put_cpu();
31138@@ -249,7 +250,7 @@ static int __init gate_vma_init(void)
31139 gate_vma.vm_start = FIXADDR_USER_START;
31140 gate_vma.vm_end = FIXADDR_USER_END;
31141 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
31142- gate_vma.vm_page_prot = __P101;
31143+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
31144
31145 return 0;
31146 }
31147@@ -330,14 +331,14 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31148 if (compat)
31149 addr = VDSO_HIGH_BASE;
31150 else {
31151- addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, 0);
31152+ addr = get_unmapped_area(NULL, 0, PAGE_SIZE, 0, MAP_EXECUTABLE);
31153 if (IS_ERR_VALUE(addr)) {
31154 ret = addr;
31155 goto up_fail;
31156 }
31157 }
31158
31159- current->mm->context.vdso = (void *)addr;
31160+ current->mm->context.vdso = addr;
31161
31162 if (compat_uses_vma || !compat) {
31163 /*
31164@@ -353,11 +354,11 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31165 }
31166
31167 current_thread_info()->sysenter_return =
31168- VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31169+ (__force void __user *)VDSO32_SYMBOL(addr, SYSENTER_RETURN);
31170
31171 up_fail:
31172 if (ret)
31173- current->mm->context.vdso = NULL;
31174+ current->mm->context.vdso = 0;
31175
31176 up_write(&mm->mmap_sem);
31177
31178@@ -404,8 +405,14 @@ __initcall(ia32_binfmt_init);
31179
31180 const char *arch_vma_name(struct vm_area_struct *vma)
31181 {
31182- if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
31183+ if (vma->vm_mm && vma->vm_start == vma->vm_mm->context.vdso)
31184 return "[vdso]";
31185+
31186+#ifdef CONFIG_PAX_SEGMEXEC
31187+ if (vma->vm_mm && vma->vm_mirror && vma->vm_mirror->vm_start == vma->vm_mm->context.vdso)
31188+ return "[vdso]";
31189+#endif
31190+
31191 return NULL;
31192 }
31193
31194@@ -415,7 +422,7 @@ struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
31195 * Check to see if the corresponding task was created in compat vdso
31196 * mode.
31197 */
31198- if (mm && mm->context.vdso == (void *)VDSO_HIGH_BASE)
31199+ if (mm && mm->context.vdso == VDSO_HIGH_BASE)
31200 return &gate_vma;
31201 return NULL;
31202 }
31203diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
31204index 431e875..cbb23f3 100644
31205--- a/arch/x86/vdso/vma.c
31206+++ b/arch/x86/vdso/vma.c
31207@@ -16,8 +16,6 @@
31208 #include <asm/vdso.h>
31209 #include <asm/page.h>
31210
31211-unsigned int __read_mostly vdso_enabled = 1;
31212-
31213 extern char vdso_start[], vdso_end[];
31214 extern unsigned short vdso_sync_cpuid;
31215
31216@@ -141,7 +139,6 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
31217 * unaligned here as a result of stack start randomization.
31218 */
31219 addr = PAGE_ALIGN(addr);
31220- addr = align_vdso_addr(addr);
31221
31222 return addr;
31223 }
31224@@ -154,30 +151,31 @@ static int setup_additional_pages(struct linux_binprm *bprm,
31225 unsigned size)
31226 {
31227 struct mm_struct *mm = current->mm;
31228- unsigned long addr;
31229+ unsigned long addr = 0;
31230 int ret;
31231
31232- if (!vdso_enabled)
31233- return 0;
31234-
31235 down_write(&mm->mmap_sem);
31236+
31237+#ifdef CONFIG_PAX_RANDMMAP
31238+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
31239+#endif
31240+
31241 addr = vdso_addr(mm->start_stack, size);
31242+ addr = align_vdso_addr(addr);
31243 addr = get_unmapped_area(NULL, addr, size, 0, 0);
31244 if (IS_ERR_VALUE(addr)) {
31245 ret = addr;
31246 goto up_fail;
31247 }
31248
31249- current->mm->context.vdso = (void *)addr;
31250+ mm->context.vdso = addr;
31251
31252 ret = install_special_mapping(mm, addr, size,
31253 VM_READ|VM_EXEC|
31254 VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
31255 pages);
31256- if (ret) {
31257- current->mm->context.vdso = NULL;
31258- goto up_fail;
31259- }
31260+ if (ret)
31261+ mm->context.vdso = 0;
31262
31263 up_fail:
31264 up_write(&mm->mmap_sem);
31265@@ -197,10 +195,3 @@ int x32_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
31266 vdsox32_size);
31267 }
31268 #endif
31269-
31270-static __init int vdso_setup(char *s)
31271-{
31272- vdso_enabled = simple_strtoul(s, NULL, 0);
31273- return 0;
31274-}
31275-__setup("vdso=", vdso_setup);
31276diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c
31277index a492be2..08678da 100644
31278--- a/arch/x86/xen/enlighten.c
31279+++ b/arch/x86/xen/enlighten.c
31280@@ -123,8 +123,6 @@ EXPORT_SYMBOL_GPL(xen_start_info);
31281
31282 struct shared_info xen_dummy_shared_info;
31283
31284-void *xen_initial_gdt;
31285-
31286 RESERVE_BRK(shared_info_page_brk, PAGE_SIZE);
31287 __read_mostly int xen_have_vector_callback;
31288 EXPORT_SYMBOL_GPL(xen_have_vector_callback);
31289@@ -542,8 +540,7 @@ static void xen_load_gdt(const struct desc_ptr *dtr)
31290 {
31291 unsigned long va = dtr->address;
31292 unsigned int size = dtr->size + 1;
31293- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31294- unsigned long frames[pages];
31295+ unsigned long frames[65536 / PAGE_SIZE];
31296 int f;
31297
31298 /*
31299@@ -591,8 +588,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31300 {
31301 unsigned long va = dtr->address;
31302 unsigned int size = dtr->size + 1;
31303- unsigned pages = (size + PAGE_SIZE - 1) / PAGE_SIZE;
31304- unsigned long frames[pages];
31305+ unsigned long frames[(GDT_SIZE + PAGE_SIZE - 1) / PAGE_SIZE];
31306 int f;
31307
31308 /*
31309@@ -600,7 +596,7 @@ static void __init xen_load_gdt_boot(const struct desc_ptr *dtr)
31310 * 8-byte entries, or 16 4k pages..
31311 */
31312
31313- BUG_ON(size > 65536);
31314+ BUG_ON(size > GDT_SIZE);
31315 BUG_ON(va & ~PAGE_MASK);
31316
31317 for (f = 0; va < dtr->address + size; va += PAGE_SIZE, f++) {
31318@@ -985,7 +981,7 @@ static u32 xen_safe_apic_wait_icr_idle(void)
31319 return 0;
31320 }
31321
31322-static void set_xen_basic_apic_ops(void)
31323+static void __init set_xen_basic_apic_ops(void)
31324 {
31325 apic->read = xen_apic_read;
31326 apic->write = xen_apic_write;
31327@@ -1290,30 +1286,30 @@ static const struct pv_apic_ops xen_apic_ops __initconst = {
31328 #endif
31329 };
31330
31331-static void xen_reboot(int reason)
31332+static __noreturn void xen_reboot(int reason)
31333 {
31334 struct sched_shutdown r = { .reason = reason };
31335
31336- if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r))
31337- BUG();
31338+ HYPERVISOR_sched_op(SCHEDOP_shutdown, &r);
31339+ BUG();
31340 }
31341
31342-static void xen_restart(char *msg)
31343+static __noreturn void xen_restart(char *msg)
31344 {
31345 xen_reboot(SHUTDOWN_reboot);
31346 }
31347
31348-static void xen_emergency_restart(void)
31349+static __noreturn void xen_emergency_restart(void)
31350 {
31351 xen_reboot(SHUTDOWN_reboot);
31352 }
31353
31354-static void xen_machine_halt(void)
31355+static __noreturn void xen_machine_halt(void)
31356 {
31357 xen_reboot(SHUTDOWN_poweroff);
31358 }
31359
31360-static void xen_machine_power_off(void)
31361+static __noreturn void xen_machine_power_off(void)
31362 {
31363 if (pm_power_off)
31364 pm_power_off();
31365@@ -1464,7 +1460,17 @@ asmlinkage void __init xen_start_kernel(void)
31366 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
31367
31368 /* Work out if we support NX */
31369- x86_configure_nx();
31370+#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
31371+ if ((cpuid_eax(0x80000000) & 0xffff0000) == 0x80000000 &&
31372+ (cpuid_edx(0x80000001) & (1U << (X86_FEATURE_NX & 31)))) {
31373+ unsigned l, h;
31374+
31375+ __supported_pte_mask |= _PAGE_NX;
31376+ rdmsr(MSR_EFER, l, h);
31377+ l |= EFER_NX;
31378+ wrmsr(MSR_EFER, l, h);
31379+ }
31380+#endif
31381
31382 xen_setup_features();
31383
31384@@ -1495,13 +1501,6 @@ asmlinkage void __init xen_start_kernel(void)
31385
31386 machine_ops = xen_machine_ops;
31387
31388- /*
31389- * The only reliable way to retain the initial address of the
31390- * percpu gdt_page is to remember it here, so we can go and
31391- * mark it RW later, when the initial percpu area is freed.
31392- */
31393- xen_initial_gdt = &per_cpu(gdt_page, 0);
31394-
31395 xen_smp_init();
31396
31397 #ifdef CONFIG_ACPI_NUMA
31398@@ -1700,7 +1699,7 @@ static int __cpuinit xen_hvm_cpu_notify(struct notifier_block *self,
31399 return NOTIFY_OK;
31400 }
31401
31402-static struct notifier_block xen_hvm_cpu_notifier __cpuinitdata = {
31403+static struct notifier_block xen_hvm_cpu_notifier = {
31404 .notifier_call = xen_hvm_cpu_notify,
31405 };
31406
31407diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c
31408index fdc3ba2..3daee39 100644
31409--- a/arch/x86/xen/mmu.c
31410+++ b/arch/x86/xen/mmu.c
31411@@ -1894,6 +1894,9 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31412 /* L3_k[510] -> level2_kernel_pgt
31413 * L3_i[511] -> level2_fixmap_pgt */
31414 convert_pfn_mfn(level3_kernel_pgt);
31415+ convert_pfn_mfn(level3_vmalloc_start_pgt);
31416+ convert_pfn_mfn(level3_vmalloc_end_pgt);
31417+ convert_pfn_mfn(level3_vmemmap_pgt);
31418
31419 /* We get [511][511] and have Xen's version of level2_kernel_pgt */
31420 l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd);
31421@@ -1923,8 +1926,12 @@ void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn)
31422 set_page_prot(init_level4_pgt, PAGE_KERNEL_RO);
31423 set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO);
31424 set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO);
31425+ set_page_prot(level3_vmalloc_start_pgt, PAGE_KERNEL_RO);
31426+ set_page_prot(level3_vmalloc_end_pgt, PAGE_KERNEL_RO);
31427+ set_page_prot(level3_vmemmap_pgt, PAGE_KERNEL_RO);
31428 set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO);
31429 set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO);
31430+ set_page_prot(level2_vmemmap_pgt, PAGE_KERNEL_RO);
31431 set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO);
31432 set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO);
31433
31434@@ -2108,6 +2115,7 @@ static void __init xen_post_allocator_init(void)
31435 pv_mmu_ops.set_pud = xen_set_pud;
31436 #if PAGETABLE_LEVELS == 4
31437 pv_mmu_ops.set_pgd = xen_set_pgd;
31438+ pv_mmu_ops.set_pgd_batched = xen_set_pgd;
31439 #endif
31440
31441 /* This will work as long as patching hasn't happened yet
31442@@ -2186,6 +2194,7 @@ static const struct pv_mmu_ops xen_mmu_ops __initconst = {
31443 .pud_val = PV_CALLEE_SAVE(xen_pud_val),
31444 .make_pud = PV_CALLEE_SAVE(xen_make_pud),
31445 .set_pgd = xen_set_pgd_hyper,
31446+ .set_pgd_batched = xen_set_pgd_hyper,
31447
31448 .alloc_pud = xen_alloc_pmd_init,
31449 .release_pud = xen_release_pmd_init,
31450diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c
31451index d99cae8..18401e1 100644
31452--- a/arch/x86/xen/smp.c
31453+++ b/arch/x86/xen/smp.c
31454@@ -240,11 +240,6 @@ static void __init xen_smp_prepare_boot_cpu(void)
31455 {
31456 BUG_ON(smp_processor_id() != 0);
31457 native_smp_prepare_boot_cpu();
31458-
31459- /* We've switched to the "real" per-cpu gdt, so make sure the
31460- old memory can be recycled */
31461- make_lowmem_page_readwrite(xen_initial_gdt);
31462-
31463 xen_filter_cpu_maps();
31464 xen_setup_vcpu_info_placement();
31465 }
31466@@ -314,7 +309,7 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31467 ctxt->user_regs.ss = __KERNEL_DS;
31468 #ifdef CONFIG_X86_32
31469 ctxt->user_regs.fs = __KERNEL_PERCPU;
31470- ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
31471+ savesegment(gs, ctxt->user_regs.gs);
31472 #else
31473 ctxt->gs_base_kernel = per_cpu_offset(cpu);
31474 #endif
31475@@ -324,8 +319,8 @@ cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
31476
31477 {
31478 ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
31479- ctxt->user_regs.ds = __USER_DS;
31480- ctxt->user_regs.es = __USER_DS;
31481+ ctxt->user_regs.ds = __KERNEL_DS;
31482+ ctxt->user_regs.es = __KERNEL_DS;
31483
31484 xen_copy_trap_info(ctxt->trap_ctxt);
31485
31486@@ -370,13 +365,12 @@ static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle)
31487 int rc;
31488
31489 per_cpu(current_task, cpu) = idle;
31490+ per_cpu(current_tinfo, cpu) = &idle->tinfo;
31491 #ifdef CONFIG_X86_32
31492 irq_ctx_init(cpu);
31493 #else
31494 clear_tsk_thread_flag(idle, TIF_FORK);
31495- per_cpu(kernel_stack, cpu) =
31496- (unsigned long)task_stack_page(idle) -
31497- KERNEL_STACK_OFFSET + THREAD_SIZE;
31498+ per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE;
31499 #endif
31500 xen_setup_runstate_info(cpu);
31501 xen_setup_timer(cpu);
31502@@ -651,7 +645,7 @@ static const struct smp_ops xen_smp_ops __initconst = {
31503
31504 void __init xen_smp_init(void)
31505 {
31506- smp_ops = xen_smp_ops;
31507+ memcpy((void *)&smp_ops, &xen_smp_ops, sizeof smp_ops);
31508 xen_fill_possible_map();
31509 xen_init_spinlocks();
31510 }
31511diff --git a/arch/x86/xen/xen-asm_32.S b/arch/x86/xen/xen-asm_32.S
31512index 33ca6e4..0ded929 100644
31513--- a/arch/x86/xen/xen-asm_32.S
31514+++ b/arch/x86/xen/xen-asm_32.S
31515@@ -84,14 +84,14 @@ ENTRY(xen_iret)
31516 ESP_OFFSET=4 # bytes pushed onto stack
31517
31518 /*
31519- * Store vcpu_info pointer for easy access. Do it this way to
31520- * avoid having to reload %fs
31521+ * Store vcpu_info pointer for easy access.
31522 */
31523 #ifdef CONFIG_SMP
31524- GET_THREAD_INFO(%eax)
31525- movl %ss:TI_cpu(%eax), %eax
31526- movl %ss:__per_cpu_offset(,%eax,4), %eax
31527- mov %ss:xen_vcpu(%eax), %eax
31528+ push %fs
31529+ mov $(__KERNEL_PERCPU), %eax
31530+ mov %eax, %fs
31531+ mov PER_CPU_VAR(xen_vcpu), %eax
31532+ pop %fs
31533 #else
31534 movl %ss:xen_vcpu, %eax
31535 #endif
31536diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S
31537index 7faed58..ba4427c 100644
31538--- a/arch/x86/xen/xen-head.S
31539+++ b/arch/x86/xen/xen-head.S
31540@@ -19,6 +19,17 @@ ENTRY(startup_xen)
31541 #ifdef CONFIG_X86_32
31542 mov %esi,xen_start_info
31543 mov $init_thread_union+THREAD_SIZE,%esp
31544+#ifdef CONFIG_SMP
31545+ movl $cpu_gdt_table,%edi
31546+ movl $__per_cpu_load,%eax
31547+ movw %ax,__KERNEL_PERCPU + 2(%edi)
31548+ rorl $16,%eax
31549+ movb %al,__KERNEL_PERCPU + 4(%edi)
31550+ movb %ah,__KERNEL_PERCPU + 7(%edi)
31551+ movl $__per_cpu_end - 1,%eax
31552+ subl $__per_cpu_start,%eax
31553+ movw %ax,__KERNEL_PERCPU + 0(%edi)
31554+#endif
31555 #else
31556 mov %rsi,xen_start_info
31557 mov $init_thread_union+THREAD_SIZE,%rsp
31558diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h
31559index a95b417..b6dbd0b 100644
31560--- a/arch/x86/xen/xen-ops.h
31561+++ b/arch/x86/xen/xen-ops.h
31562@@ -10,8 +10,6 @@
31563 extern const char xen_hypervisor_callback[];
31564 extern const char xen_failsafe_callback[];
31565
31566-extern void *xen_initial_gdt;
31567-
31568 struct trap_info;
31569 void xen_copy_trap_info(struct trap_info *traps);
31570
31571diff --git a/arch/xtensa/variants/dc232b/include/variant/core.h b/arch/xtensa/variants/dc232b/include/variant/core.h
31572index 525bd3d..ef888b1 100644
31573--- a/arch/xtensa/variants/dc232b/include/variant/core.h
31574+++ b/arch/xtensa/variants/dc232b/include/variant/core.h
31575@@ -119,9 +119,9 @@
31576 ----------------------------------------------------------------------*/
31577
31578 #define XCHAL_ICACHE_LINESIZE 32 /* I-cache line size in bytes */
31579-#define XCHAL_DCACHE_LINESIZE 32 /* D-cache line size in bytes */
31580 #define XCHAL_ICACHE_LINEWIDTH 5 /* log2(I line size in bytes) */
31581 #define XCHAL_DCACHE_LINEWIDTH 5 /* log2(D line size in bytes) */
31582+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31583
31584 #define XCHAL_ICACHE_SIZE 16384 /* I-cache size in bytes or 0 */
31585 #define XCHAL_DCACHE_SIZE 16384 /* D-cache size in bytes or 0 */
31586diff --git a/arch/xtensa/variants/fsf/include/variant/core.h b/arch/xtensa/variants/fsf/include/variant/core.h
31587index 2f33760..835e50a 100644
31588--- a/arch/xtensa/variants/fsf/include/variant/core.h
31589+++ b/arch/xtensa/variants/fsf/include/variant/core.h
31590@@ -11,6 +11,7 @@
31591 #ifndef _XTENSA_CORE_H
31592 #define _XTENSA_CORE_H
31593
31594+#include <linux/const.h>
31595
31596 /****************************************************************************
31597 Parameters Useful for Any Code, USER or PRIVILEGED
31598@@ -112,9 +113,9 @@
31599 ----------------------------------------------------------------------*/
31600
31601 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31602-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31603 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31604 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31605+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31606
31607 #define XCHAL_ICACHE_SIZE 8192 /* I-cache size in bytes or 0 */
31608 #define XCHAL_DCACHE_SIZE 8192 /* D-cache size in bytes or 0 */
31609diff --git a/arch/xtensa/variants/s6000/include/variant/core.h b/arch/xtensa/variants/s6000/include/variant/core.h
31610index af00795..2bb8105 100644
31611--- a/arch/xtensa/variants/s6000/include/variant/core.h
31612+++ b/arch/xtensa/variants/s6000/include/variant/core.h
31613@@ -11,6 +11,7 @@
31614 #ifndef _XTENSA_CORE_CONFIGURATION_H
31615 #define _XTENSA_CORE_CONFIGURATION_H
31616
31617+#include <linux/const.h>
31618
31619 /****************************************************************************
31620 Parameters Useful for Any Code, USER or PRIVILEGED
31621@@ -118,9 +119,9 @@
31622 ----------------------------------------------------------------------*/
31623
31624 #define XCHAL_ICACHE_LINESIZE 16 /* I-cache line size in bytes */
31625-#define XCHAL_DCACHE_LINESIZE 16 /* D-cache line size in bytes */
31626 #define XCHAL_ICACHE_LINEWIDTH 4 /* log2(I line size in bytes) */
31627 #define XCHAL_DCACHE_LINEWIDTH 4 /* log2(D line size in bytes) */
31628+#define XCHAL_DCACHE_LINESIZE (_AC(1,UL) << XCHAL_DCACHE_LINEWIDTH) /* D-cache line size in bytes */
31629
31630 #define XCHAL_ICACHE_SIZE 32768 /* I-cache size in bytes or 0 */
31631 #define XCHAL_DCACHE_SIZE 32768 /* D-cache size in bytes or 0 */
31632diff --git a/block/blk-iopoll.c b/block/blk-iopoll.c
31633index 58916af..eb9dbcf6 100644
31634--- a/block/blk-iopoll.c
31635+++ b/block/blk-iopoll.c
31636@@ -77,7 +77,7 @@ void blk_iopoll_complete(struct blk_iopoll *iopoll)
31637 }
31638 EXPORT_SYMBOL(blk_iopoll_complete);
31639
31640-static void blk_iopoll_softirq(struct softirq_action *h)
31641+static void blk_iopoll_softirq(void)
31642 {
31643 struct list_head *list = &__get_cpu_var(blk_cpu_iopoll);
31644 int rearm = 0, budget = blk_iopoll_budget;
31645@@ -209,7 +209,7 @@ static int __cpuinit blk_iopoll_cpu_notify(struct notifier_block *self,
31646 return NOTIFY_OK;
31647 }
31648
31649-static struct notifier_block __cpuinitdata blk_iopoll_cpu_notifier = {
31650+static struct notifier_block blk_iopoll_cpu_notifier = {
31651 .notifier_call = blk_iopoll_cpu_notify,
31652 };
31653
31654diff --git a/block/blk-map.c b/block/blk-map.c
31655index 623e1cd..ca1e109 100644
31656--- a/block/blk-map.c
31657+++ b/block/blk-map.c
31658@@ -302,7 +302,7 @@ int blk_rq_map_kern(struct request_queue *q, struct request *rq, void *kbuf,
31659 if (!len || !kbuf)
31660 return -EINVAL;
31661
31662- do_copy = !blk_rq_aligned(q, addr, len) || object_is_on_stack(kbuf);
31663+ do_copy = !blk_rq_aligned(q, addr, len) || object_starts_on_stack(kbuf);
31664 if (do_copy)
31665 bio = bio_copy_kern(q, kbuf, len, gfp_mask, reading);
31666 else
31667diff --git a/block/blk-softirq.c b/block/blk-softirq.c
31668index 467c8de..f3628c5 100644
31669--- a/block/blk-softirq.c
31670+++ b/block/blk-softirq.c
31671@@ -18,7 +18,7 @@ static DEFINE_PER_CPU(struct list_head, blk_cpu_done);
31672 * Softirq action handler - move entries to local list and loop over them
31673 * while passing them to the queue registered handler.
31674 */
31675-static void blk_done_softirq(struct softirq_action *h)
31676+static void blk_done_softirq(void)
31677 {
31678 struct list_head *cpu_list, local_list;
31679
31680@@ -98,7 +98,7 @@ static int __cpuinit blk_cpu_notify(struct notifier_block *self,
31681 return NOTIFY_OK;
31682 }
31683
31684-static struct notifier_block __cpuinitdata blk_cpu_notifier = {
31685+static struct notifier_block blk_cpu_notifier = {
31686 .notifier_call = blk_cpu_notify,
31687 };
31688
31689diff --git a/block/bsg.c b/block/bsg.c
31690index 420a5a9..23834aa 100644
31691--- a/block/bsg.c
31692+++ b/block/bsg.c
31693@@ -176,16 +176,24 @@ static int blk_fill_sgv4_hdr_rq(struct request_queue *q, struct request *rq,
31694 struct sg_io_v4 *hdr, struct bsg_device *bd,
31695 fmode_t has_write_perm)
31696 {
31697+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31698+ unsigned char *cmdptr;
31699+
31700 if (hdr->request_len > BLK_MAX_CDB) {
31701 rq->cmd = kzalloc(hdr->request_len, GFP_KERNEL);
31702 if (!rq->cmd)
31703 return -ENOMEM;
31704- }
31705+ cmdptr = rq->cmd;
31706+ } else
31707+ cmdptr = tmpcmd;
31708
31709- if (copy_from_user(rq->cmd, (void __user *)(unsigned long)hdr->request,
31710+ if (copy_from_user(cmdptr, (void __user *)(unsigned long)hdr->request,
31711 hdr->request_len))
31712 return -EFAULT;
31713
31714+ if (cmdptr != rq->cmd)
31715+ memcpy(rq->cmd, cmdptr, hdr->request_len);
31716+
31717 if (hdr->subprotocol == BSG_SUB_PROTOCOL_SCSI_CMD) {
31718 if (blk_verify_command(rq->cmd, has_write_perm))
31719 return -EPERM;
31720diff --git a/block/compat_ioctl.c b/block/compat_ioctl.c
31721index 7c668c8..db3521c 100644
31722--- a/block/compat_ioctl.c
31723+++ b/block/compat_ioctl.c
31724@@ -340,7 +340,7 @@ static int compat_fd_ioctl(struct block_device *bdev, fmode_t mode,
31725 err |= __get_user(f->spec1, &uf->spec1);
31726 err |= __get_user(f->fmt_gap, &uf->fmt_gap);
31727 err |= __get_user(name, &uf->name);
31728- f->name = compat_ptr(name);
31729+ f->name = (void __force_kernel *)compat_ptr(name);
31730 if (err) {
31731 err = -EFAULT;
31732 goto out;
31733diff --git a/block/genhd.c b/block/genhd.c
31734index cdeb527..10aa34d 100644
31735--- a/block/genhd.c
31736+++ b/block/genhd.c
31737@@ -467,21 +467,24 @@ static char *bdevt_str(dev_t devt, char *buf)
31738
31739 /*
31740 * Register device numbers dev..(dev+range-1)
31741- * range must be nonzero
31742+ * Noop if @range is zero.
31743 * The hash chain is sorted on range, so that subranges can override.
31744 */
31745 void blk_register_region(dev_t devt, unsigned long range, struct module *module,
31746 struct kobject *(*probe)(dev_t, int *, void *),
31747 int (*lock)(dev_t, void *), void *data)
31748 {
31749- kobj_map(bdev_map, devt, range, module, probe, lock, data);
31750+ if (range)
31751+ kobj_map(bdev_map, devt, range, module, probe, lock, data);
31752 }
31753
31754 EXPORT_SYMBOL(blk_register_region);
31755
31756+/* undo blk_register_region(), noop if @range is zero */
31757 void blk_unregister_region(dev_t devt, unsigned long range)
31758 {
31759- kobj_unmap(bdev_map, devt, range);
31760+ if (range)
31761+ kobj_unmap(bdev_map, devt, range);
31762 }
31763
31764 EXPORT_SYMBOL(blk_unregister_region);
31765diff --git a/block/partitions/efi.c b/block/partitions/efi.c
31766index c85fc89..51e690b 100644
31767--- a/block/partitions/efi.c
31768+++ b/block/partitions/efi.c
31769@@ -234,14 +234,14 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
31770 if (!gpt)
31771 return NULL;
31772
31773+ if (!le32_to_cpu(gpt->num_partition_entries))
31774+ return NULL;
31775+ pte = kcalloc(le32_to_cpu(gpt->num_partition_entries), le32_to_cpu(gpt->sizeof_partition_entry), GFP_KERNEL);
31776+ if (!pte)
31777+ return NULL;
31778+
31779 count = le32_to_cpu(gpt->num_partition_entries) *
31780 le32_to_cpu(gpt->sizeof_partition_entry);
31781- if (!count)
31782- return NULL;
31783- pte = kmalloc(count, GFP_KERNEL);
31784- if (!pte)
31785- return NULL;
31786-
31787 if (read_lba(state, le64_to_cpu(gpt->partition_entry_lba),
31788 (u8 *) pte,
31789 count) < count) {
31790diff --git a/block/scsi_ioctl.c b/block/scsi_ioctl.c
31791index a5ffcc9..3cedc9c 100644
31792--- a/block/scsi_ioctl.c
31793+++ b/block/scsi_ioctl.c
31794@@ -224,8 +224,20 @@ EXPORT_SYMBOL(blk_verify_command);
31795 static int blk_fill_sghdr_rq(struct request_queue *q, struct request *rq,
31796 struct sg_io_hdr *hdr, fmode_t mode)
31797 {
31798- if (copy_from_user(rq->cmd, hdr->cmdp, hdr->cmd_len))
31799+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31800+ unsigned char *cmdptr;
31801+
31802+ if (rq->cmd != rq->__cmd)
31803+ cmdptr = rq->cmd;
31804+ else
31805+ cmdptr = tmpcmd;
31806+
31807+ if (copy_from_user(cmdptr, hdr->cmdp, hdr->cmd_len))
31808 return -EFAULT;
31809+
31810+ if (cmdptr != rq->cmd)
31811+ memcpy(rq->cmd, cmdptr, hdr->cmd_len);
31812+
31813 if (blk_verify_command(rq->cmd, mode & FMODE_WRITE))
31814 return -EPERM;
31815
31816@@ -434,6 +446,8 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31817 int err;
31818 unsigned int in_len, out_len, bytes, opcode, cmdlen;
31819 char *buffer = NULL, sense[SCSI_SENSE_BUFFERSIZE];
31820+ unsigned char tmpcmd[sizeof(rq->__cmd)];
31821+ unsigned char *cmdptr;
31822
31823 if (!sic)
31824 return -EINVAL;
31825@@ -467,9 +481,18 @@ int sg_scsi_ioctl(struct request_queue *q, struct gendisk *disk, fmode_t mode,
31826 */
31827 err = -EFAULT;
31828 rq->cmd_len = cmdlen;
31829- if (copy_from_user(rq->cmd, sic->data, cmdlen))
31830+
31831+ if (rq->cmd != rq->__cmd)
31832+ cmdptr = rq->cmd;
31833+ else
31834+ cmdptr = tmpcmd;
31835+
31836+ if (copy_from_user(cmdptr, sic->data, cmdlen))
31837 goto error;
31838
31839+ if (rq->cmd != cmdptr)
31840+ memcpy(rq->cmd, cmdptr, cmdlen);
31841+
31842 if (in_len && copy_from_user(buffer, sic->data + cmdlen, in_len))
31843 goto error;
31844
31845diff --git a/crypto/cryptd.c b/crypto/cryptd.c
31846index 7bdd61b..afec999 100644
31847--- a/crypto/cryptd.c
31848+++ b/crypto/cryptd.c
31849@@ -63,7 +63,7 @@ struct cryptd_blkcipher_ctx {
31850
31851 struct cryptd_blkcipher_request_ctx {
31852 crypto_completion_t complete;
31853-};
31854+} __no_const;
31855
31856 struct cryptd_hash_ctx {
31857 struct crypto_shash *child;
31858@@ -80,7 +80,7 @@ struct cryptd_aead_ctx {
31859
31860 struct cryptd_aead_request_ctx {
31861 crypto_completion_t complete;
31862-};
31863+} __no_const;
31864
31865 static void cryptd_queue_worker(struct work_struct *work);
31866
31867diff --git a/crypto/pcrypt.c b/crypto/pcrypt.c
31868index b2c99dc..476c9fb 100644
31869--- a/crypto/pcrypt.c
31870+++ b/crypto/pcrypt.c
31871@@ -440,7 +440,7 @@ static int pcrypt_sysfs_add(struct padata_instance *pinst, const char *name)
31872 int ret;
31873
31874 pinst->kobj.kset = pcrypt_kset;
31875- ret = kobject_add(&pinst->kobj, NULL, name);
31876+ ret = kobject_add(&pinst->kobj, NULL, "%s", name);
31877 if (!ret)
31878 kobject_uevent(&pinst->kobj, KOBJ_ADD);
31879
31880@@ -455,8 +455,8 @@ static int pcrypt_init_padata(struct padata_pcrypt *pcrypt,
31881
31882 get_online_cpus();
31883
31884- pcrypt->wq = alloc_workqueue(name,
31885- WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1);
31886+ pcrypt->wq = alloc_workqueue("%s",
31887+ WQ_MEM_RECLAIM | WQ_CPU_INTENSIVE, 1, name);
31888 if (!pcrypt->wq)
31889 goto err;
31890
31891diff --git a/drivers/acpi/apei/apei-internal.h b/drivers/acpi/apei/apei-internal.h
31892index f220d64..d359ad6 100644
31893--- a/drivers/acpi/apei/apei-internal.h
31894+++ b/drivers/acpi/apei/apei-internal.h
31895@@ -20,7 +20,7 @@ typedef int (*apei_exec_ins_func_t)(struct apei_exec_context *ctx,
31896 struct apei_exec_ins_type {
31897 u32 flags;
31898 apei_exec_ins_func_t run;
31899-};
31900+} __do_const;
31901
31902 struct apei_exec_context {
31903 u32 ip;
31904diff --git a/drivers/acpi/apei/cper.c b/drivers/acpi/apei/cper.c
31905index 33dc6a0..4b24b47 100644
31906--- a/drivers/acpi/apei/cper.c
31907+++ b/drivers/acpi/apei/cper.c
31908@@ -39,12 +39,12 @@
31909 */
31910 u64 cper_next_record_id(void)
31911 {
31912- static atomic64_t seq;
31913+ static atomic64_unchecked_t seq;
31914
31915- if (!atomic64_read(&seq))
31916- atomic64_set(&seq, ((u64)get_seconds()) << 32);
31917+ if (!atomic64_read_unchecked(&seq))
31918+ atomic64_set_unchecked(&seq, ((u64)get_seconds()) << 32);
31919
31920- return atomic64_inc_return(&seq);
31921+ return atomic64_inc_return_unchecked(&seq);
31922 }
31923 EXPORT_SYMBOL_GPL(cper_next_record_id);
31924
31925diff --git a/drivers/acpi/bgrt.c b/drivers/acpi/bgrt.c
31926index be60399..778b33e8 100644
31927--- a/drivers/acpi/bgrt.c
31928+++ b/drivers/acpi/bgrt.c
31929@@ -87,8 +87,10 @@ static int __init bgrt_init(void)
31930 return -ENODEV;
31931
31932 sysfs_bin_attr_init(&image_attr);
31933- image_attr.private = bgrt_image;
31934- image_attr.size = bgrt_image_size;
31935+ pax_open_kernel();
31936+ *(void **)&image_attr.private = bgrt_image;
31937+ *(size_t *)&image_attr.size = bgrt_image_size;
31938+ pax_close_kernel();
31939
31940 bgrt_kobj = kobject_create_and_add("bgrt", acpi_kobj);
31941 if (!bgrt_kobj)
31942diff --git a/drivers/acpi/blacklist.c b/drivers/acpi/blacklist.c
31943index cb96296..b81293b 100644
31944--- a/drivers/acpi/blacklist.c
31945+++ b/drivers/acpi/blacklist.c
31946@@ -52,7 +52,7 @@ struct acpi_blacklist_item {
31947 u32 is_critical_error;
31948 };
31949
31950-static struct dmi_system_id acpi_osi_dmi_table[] __initdata;
31951+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst;
31952
31953 /*
31954 * POLICY: If *anything* doesn't work, put it on the blacklist.
31955@@ -193,7 +193,7 @@ static int __init dmi_disable_osi_win7(const struct dmi_system_id *d)
31956 return 0;
31957 }
31958
31959-static struct dmi_system_id acpi_osi_dmi_table[] __initdata = {
31960+static const struct dmi_system_id acpi_osi_dmi_table[] __initconst = {
31961 {
31962 .callback = dmi_disable_osi_vista,
31963 .ident = "Fujitsu Siemens",
31964diff --git a/drivers/acpi/ec_sys.c b/drivers/acpi/ec_sys.c
31965index 7586544..636a2f0 100644
31966--- a/drivers/acpi/ec_sys.c
31967+++ b/drivers/acpi/ec_sys.c
31968@@ -12,6 +12,7 @@
31969 #include <linux/acpi.h>
31970 #include <linux/debugfs.h>
31971 #include <linux/module.h>
31972+#include <linux/uaccess.h>
31973 #include "internal.h"
31974
31975 MODULE_AUTHOR("Thomas Renninger <trenn@suse.de>");
31976@@ -34,7 +35,7 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31977 * struct acpi_ec *ec = ((struct seq_file *)f->private_data)->private;
31978 */
31979 unsigned int size = EC_SPACE_SIZE;
31980- u8 *data = (u8 *) buf;
31981+ u8 data;
31982 loff_t init_off = *off;
31983 int err = 0;
31984
31985@@ -47,9 +48,11 @@ static ssize_t acpi_ec_read_io(struct file *f, char __user *buf,
31986 size = count;
31987
31988 while (size) {
31989- err = ec_read(*off, &data[*off - init_off]);
31990+ err = ec_read(*off, &data);
31991 if (err)
31992 return err;
31993+ if (put_user(data, &buf[*off - init_off]))
31994+ return -EFAULT;
31995 *off += 1;
31996 size--;
31997 }
31998@@ -65,7 +68,6 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
31999
32000 unsigned int size = count;
32001 loff_t init_off = *off;
32002- u8 *data = (u8 *) buf;
32003 int err = 0;
32004
32005 if (*off >= EC_SPACE_SIZE)
32006@@ -76,7 +78,9 @@ static ssize_t acpi_ec_write_io(struct file *f, const char __user *buf,
32007 }
32008
32009 while (size) {
32010- u8 byte_write = data[*off - init_off];
32011+ u8 byte_write;
32012+ if (get_user(byte_write, &buf[*off - init_off]))
32013+ return -EFAULT;
32014 err = ec_write(*off, byte_write);
32015 if (err)
32016 return err;
32017diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
32018index eb133c7..f571552 100644
32019--- a/drivers/acpi/processor_idle.c
32020+++ b/drivers/acpi/processor_idle.c
32021@@ -994,7 +994,7 @@ static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
32022 {
32023 int i, count = CPUIDLE_DRIVER_STATE_START;
32024 struct acpi_processor_cx *cx;
32025- struct cpuidle_state *state;
32026+ cpuidle_state_no_const *state;
32027 struct cpuidle_driver *drv = &acpi_idle_driver;
32028
32029 if (!pr->flags.power_setup_done)
32030diff --git a/drivers/acpi/sysfs.c b/drivers/acpi/sysfs.c
32031index fcae5fa..e9f71ea 100644
32032--- a/drivers/acpi/sysfs.c
32033+++ b/drivers/acpi/sysfs.c
32034@@ -423,11 +423,11 @@ static u32 num_counters;
32035 static struct attribute **all_attrs;
32036 static u32 acpi_gpe_count;
32037
32038-static struct attribute_group interrupt_stats_attr_group = {
32039+static attribute_group_no_const interrupt_stats_attr_group = {
32040 .name = "interrupts",
32041 };
32042
32043-static struct kobj_attribute *counter_attrs;
32044+static kobj_attribute_no_const *counter_attrs;
32045
32046 static void delete_gpe_attr_array(void)
32047 {
32048diff --git a/drivers/ata/libahci.c b/drivers/ata/libahci.c
32049index 7b9bdd8..37638ca 100644
32050--- a/drivers/ata/libahci.c
32051+++ b/drivers/ata/libahci.c
32052@@ -1230,7 +1230,7 @@ int ahci_kick_engine(struct ata_port *ap)
32053 }
32054 EXPORT_SYMBOL_GPL(ahci_kick_engine);
32055
32056-static int ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32057+static int __intentional_overflow(-1) ahci_exec_polled_cmd(struct ata_port *ap, int pmp,
32058 struct ata_taskfile *tf, int is_cmd, u16 flags,
32059 unsigned long timeout_msec)
32060 {
32061diff --git a/drivers/ata/libata-core.c b/drivers/ata/libata-core.c
32062index adf002a..39bb8f9 100644
32063--- a/drivers/ata/libata-core.c
32064+++ b/drivers/ata/libata-core.c
32065@@ -4792,7 +4792,7 @@ void ata_qc_free(struct ata_queued_cmd *qc)
32066 struct ata_port *ap;
32067 unsigned int tag;
32068
32069- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32070+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32071 ap = qc->ap;
32072
32073 qc->flags = 0;
32074@@ -4808,7 +4808,7 @@ void __ata_qc_complete(struct ata_queued_cmd *qc)
32075 struct ata_port *ap;
32076 struct ata_link *link;
32077
32078- WARN_ON_ONCE(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32079+ BUG_ON(qc == NULL); /* ata_qc_from_tag _might_ return NULL */
32080 WARN_ON_ONCE(!(qc->flags & ATA_QCFLAG_ACTIVE));
32081 ap = qc->ap;
32082 link = qc->dev->link;
32083@@ -5926,6 +5926,7 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32084 return;
32085
32086 spin_lock(&lock);
32087+ pax_open_kernel();
32088
32089 for (cur = ops->inherits; cur; cur = cur->inherits) {
32090 void **inherit = (void **)cur;
32091@@ -5939,8 +5940,9 @@ static void ata_finalize_port_ops(struct ata_port_operations *ops)
32092 if (IS_ERR(*pp))
32093 *pp = NULL;
32094
32095- ops->inherits = NULL;
32096+ *(struct ata_port_operations **)&ops->inherits = NULL;
32097
32098+ pax_close_kernel();
32099 spin_unlock(&lock);
32100 }
32101
32102diff --git a/drivers/ata/pata_arasan_cf.c b/drivers/ata/pata_arasan_cf.c
32103index 7638121..357a965 100644
32104--- a/drivers/ata/pata_arasan_cf.c
32105+++ b/drivers/ata/pata_arasan_cf.c
32106@@ -865,7 +865,9 @@ static int arasan_cf_probe(struct platform_device *pdev)
32107 /* Handle platform specific quirks */
32108 if (quirk) {
32109 if (quirk & CF_BROKEN_PIO) {
32110- ap->ops->set_piomode = NULL;
32111+ pax_open_kernel();
32112+ *(void **)&ap->ops->set_piomode = NULL;
32113+ pax_close_kernel();
32114 ap->pio_mask = 0;
32115 }
32116 if (quirk & CF_BROKEN_MWDMA)
32117diff --git a/drivers/atm/adummy.c b/drivers/atm/adummy.c
32118index f9b983a..887b9d8 100644
32119--- a/drivers/atm/adummy.c
32120+++ b/drivers/atm/adummy.c
32121@@ -114,7 +114,7 @@ adummy_send(struct atm_vcc *vcc, struct sk_buff *skb)
32122 vcc->pop(vcc, skb);
32123 else
32124 dev_kfree_skb_any(skb);
32125- atomic_inc(&vcc->stats->tx);
32126+ atomic_inc_unchecked(&vcc->stats->tx);
32127
32128 return 0;
32129 }
32130diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
32131index 77a7480d..05cde58 100644
32132--- a/drivers/atm/ambassador.c
32133+++ b/drivers/atm/ambassador.c
32134@@ -454,7 +454,7 @@ static void tx_complete (amb_dev * dev, tx_out * tx) {
32135 PRINTD (DBG_FLOW|DBG_TX, "tx_complete %p %p", dev, tx);
32136
32137 // VC layer stats
32138- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32139+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32140
32141 // free the descriptor
32142 kfree (tx_descr);
32143@@ -495,7 +495,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32144 dump_skb ("<<<", vc, skb);
32145
32146 // VC layer stats
32147- atomic_inc(&atm_vcc->stats->rx);
32148+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32149 __net_timestamp(skb);
32150 // end of our responsibility
32151 atm_vcc->push (atm_vcc, skb);
32152@@ -510,7 +510,7 @@ static void rx_complete (amb_dev * dev, rx_out * rx) {
32153 } else {
32154 PRINTK (KERN_INFO, "dropped over-size frame");
32155 // should we count this?
32156- atomic_inc(&atm_vcc->stats->rx_drop);
32157+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32158 }
32159
32160 } else {
32161@@ -1338,7 +1338,7 @@ static int amb_send (struct atm_vcc * atm_vcc, struct sk_buff * skb) {
32162 }
32163
32164 if (check_area (skb->data, skb->len)) {
32165- atomic_inc(&atm_vcc->stats->tx_err);
32166+ atomic_inc_unchecked(&atm_vcc->stats->tx_err);
32167 return -ENOMEM; // ?
32168 }
32169
32170diff --git a/drivers/atm/atmtcp.c b/drivers/atm/atmtcp.c
32171index 0e3f8f9..765a7a5 100644
32172--- a/drivers/atm/atmtcp.c
32173+++ b/drivers/atm/atmtcp.c
32174@@ -206,7 +206,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32175 if (vcc->pop) vcc->pop(vcc,skb);
32176 else dev_kfree_skb(skb);
32177 if (dev_data) return 0;
32178- atomic_inc(&vcc->stats->tx_err);
32179+ atomic_inc_unchecked(&vcc->stats->tx_err);
32180 return -ENOLINK;
32181 }
32182 size = skb->len+sizeof(struct atmtcp_hdr);
32183@@ -214,7 +214,7 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32184 if (!new_skb) {
32185 if (vcc->pop) vcc->pop(vcc,skb);
32186 else dev_kfree_skb(skb);
32187- atomic_inc(&vcc->stats->tx_err);
32188+ atomic_inc_unchecked(&vcc->stats->tx_err);
32189 return -ENOBUFS;
32190 }
32191 hdr = (void *) skb_put(new_skb,sizeof(struct atmtcp_hdr));
32192@@ -225,8 +225,8 @@ static int atmtcp_v_send(struct atm_vcc *vcc,struct sk_buff *skb)
32193 if (vcc->pop) vcc->pop(vcc,skb);
32194 else dev_kfree_skb(skb);
32195 out_vcc->push(out_vcc,new_skb);
32196- atomic_inc(&vcc->stats->tx);
32197- atomic_inc(&out_vcc->stats->rx);
32198+ atomic_inc_unchecked(&vcc->stats->tx);
32199+ atomic_inc_unchecked(&out_vcc->stats->rx);
32200 return 0;
32201 }
32202
32203@@ -299,7 +299,7 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32204 out_vcc = find_vcc(dev, ntohs(hdr->vpi), ntohs(hdr->vci));
32205 read_unlock(&vcc_sklist_lock);
32206 if (!out_vcc) {
32207- atomic_inc(&vcc->stats->tx_err);
32208+ atomic_inc_unchecked(&vcc->stats->tx_err);
32209 goto done;
32210 }
32211 skb_pull(skb,sizeof(struct atmtcp_hdr));
32212@@ -311,8 +311,8 @@ static int atmtcp_c_send(struct atm_vcc *vcc,struct sk_buff *skb)
32213 __net_timestamp(new_skb);
32214 skb_copy_from_linear_data(skb, skb_put(new_skb, skb->len), skb->len);
32215 out_vcc->push(out_vcc,new_skb);
32216- atomic_inc(&vcc->stats->tx);
32217- atomic_inc(&out_vcc->stats->rx);
32218+ atomic_inc_unchecked(&vcc->stats->tx);
32219+ atomic_inc_unchecked(&out_vcc->stats->rx);
32220 done:
32221 if (vcc->pop) vcc->pop(vcc,skb);
32222 else dev_kfree_skb(skb);
32223diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c
32224index b1955ba..b179940 100644
32225--- a/drivers/atm/eni.c
32226+++ b/drivers/atm/eni.c
32227@@ -522,7 +522,7 @@ static int rx_aal0(struct atm_vcc *vcc)
32228 DPRINTK(DEV_LABEL "(itf %d): trashing empty cell\n",
32229 vcc->dev->number);
32230 length = 0;
32231- atomic_inc(&vcc->stats->rx_err);
32232+ atomic_inc_unchecked(&vcc->stats->rx_err);
32233 }
32234 else {
32235 length = ATM_CELL_SIZE-1; /* no HEC */
32236@@ -577,7 +577,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32237 size);
32238 }
32239 eff = length = 0;
32240- atomic_inc(&vcc->stats->rx_err);
32241+ atomic_inc_unchecked(&vcc->stats->rx_err);
32242 }
32243 else {
32244 size = (descr & MID_RED_COUNT)*(ATM_CELL_PAYLOAD >> 2);
32245@@ -594,7 +594,7 @@ static int rx_aal5(struct atm_vcc *vcc)
32246 "(VCI=%d,length=%ld,size=%ld (descr 0x%lx))\n",
32247 vcc->dev->number,vcc->vci,length,size << 2,descr);
32248 length = eff = 0;
32249- atomic_inc(&vcc->stats->rx_err);
32250+ atomic_inc_unchecked(&vcc->stats->rx_err);
32251 }
32252 }
32253 skb = eff ? atm_alloc_charge(vcc,eff << 2,GFP_ATOMIC) : NULL;
32254@@ -767,7 +767,7 @@ rx_dequeued++;
32255 vcc->push(vcc,skb);
32256 pushed++;
32257 }
32258- atomic_inc(&vcc->stats->rx);
32259+ atomic_inc_unchecked(&vcc->stats->rx);
32260 }
32261 wake_up(&eni_dev->rx_wait);
32262 }
32263@@ -1227,7 +1227,7 @@ static void dequeue_tx(struct atm_dev *dev)
32264 PCI_DMA_TODEVICE);
32265 if (vcc->pop) vcc->pop(vcc,skb);
32266 else dev_kfree_skb_irq(skb);
32267- atomic_inc(&vcc->stats->tx);
32268+ atomic_inc_unchecked(&vcc->stats->tx);
32269 wake_up(&eni_dev->tx_wait);
32270 dma_complete++;
32271 }
32272diff --git a/drivers/atm/firestream.c b/drivers/atm/firestream.c
32273index b41c948..a002b17 100644
32274--- a/drivers/atm/firestream.c
32275+++ b/drivers/atm/firestream.c
32276@@ -749,7 +749,7 @@ static void process_txdone_queue (struct fs_dev *dev, struct queue *q)
32277 }
32278 }
32279
32280- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32281+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32282
32283 fs_dprintk (FS_DEBUG_TXMEM, "i");
32284 fs_dprintk (FS_DEBUG_ALLOC, "Free t-skb: %p\n", skb);
32285@@ -816,7 +816,7 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32286 #endif
32287 skb_put (skb, qe->p1 & 0xffff);
32288 ATM_SKB(skb)->vcc = atm_vcc;
32289- atomic_inc(&atm_vcc->stats->rx);
32290+ atomic_inc_unchecked(&atm_vcc->stats->rx);
32291 __net_timestamp(skb);
32292 fs_dprintk (FS_DEBUG_ALLOC, "Free rec-skb: %p (pushed)\n", skb);
32293 atm_vcc->push (atm_vcc, skb);
32294@@ -837,12 +837,12 @@ static void process_incoming (struct fs_dev *dev, struct queue *q)
32295 kfree (pe);
32296 }
32297 if (atm_vcc)
32298- atomic_inc(&atm_vcc->stats->rx_drop);
32299+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32300 break;
32301 case 0x1f: /* Reassembly abort: no buffers. */
32302 /* Silently increment error counter. */
32303 if (atm_vcc)
32304- atomic_inc(&atm_vcc->stats->rx_drop);
32305+ atomic_inc_unchecked(&atm_vcc->stats->rx_drop);
32306 break;
32307 default: /* Hmm. Haven't written the code to handle the others yet... -- REW */
32308 printk (KERN_WARNING "Don't know what to do with RX status %x: %s.\n",
32309diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
32310index 204814e..cede831 100644
32311--- a/drivers/atm/fore200e.c
32312+++ b/drivers/atm/fore200e.c
32313@@ -931,9 +931,9 @@ fore200e_tx_irq(struct fore200e* fore200e)
32314 #endif
32315 /* check error condition */
32316 if (*entry->status & STATUS_ERROR)
32317- atomic_inc(&vcc->stats->tx_err);
32318+ atomic_inc_unchecked(&vcc->stats->tx_err);
32319 else
32320- atomic_inc(&vcc->stats->tx);
32321+ atomic_inc_unchecked(&vcc->stats->tx);
32322 }
32323 }
32324
32325@@ -1082,7 +1082,7 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32326 if (skb == NULL) {
32327 DPRINTK(2, "unable to alloc new skb, rx PDU length = %d\n", pdu_len);
32328
32329- atomic_inc(&vcc->stats->rx_drop);
32330+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32331 return -ENOMEM;
32332 }
32333
32334@@ -1125,14 +1125,14 @@ fore200e_push_rpd(struct fore200e* fore200e, struct atm_vcc* vcc, struct rpd* rp
32335
32336 dev_kfree_skb_any(skb);
32337
32338- atomic_inc(&vcc->stats->rx_drop);
32339+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32340 return -ENOMEM;
32341 }
32342
32343 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32344
32345 vcc->push(vcc, skb);
32346- atomic_inc(&vcc->stats->rx);
32347+ atomic_inc_unchecked(&vcc->stats->rx);
32348
32349 ASSERT(atomic_read(&sk_atm(vcc)->sk_wmem_alloc) >= 0);
32350
32351@@ -1210,7 +1210,7 @@ fore200e_rx_irq(struct fore200e* fore200e)
32352 DPRINTK(2, "damaged PDU on %d.%d.%d\n",
32353 fore200e->atm_dev->number,
32354 entry->rpd->atm_header.vpi, entry->rpd->atm_header.vci);
32355- atomic_inc(&vcc->stats->rx_err);
32356+ atomic_inc_unchecked(&vcc->stats->rx_err);
32357 }
32358 }
32359
32360@@ -1655,7 +1655,7 @@ fore200e_send(struct atm_vcc *vcc, struct sk_buff *skb)
32361 goto retry_here;
32362 }
32363
32364- atomic_inc(&vcc->stats->tx_err);
32365+ atomic_inc_unchecked(&vcc->stats->tx_err);
32366
32367 fore200e->tx_sat++;
32368 DPRINTK(2, "tx queue of device %s is saturated, PDU dropped - heartbeat is %08x\n",
32369diff --git a/drivers/atm/he.c b/drivers/atm/he.c
32370index 507362a..a845e57 100644
32371--- a/drivers/atm/he.c
32372+++ b/drivers/atm/he.c
32373@@ -1698,7 +1698,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32374
32375 if (RBRQ_HBUF_ERR(he_dev->rbrq_head)) {
32376 hprintk("HBUF_ERR! (cid 0x%x)\n", cid);
32377- atomic_inc(&vcc->stats->rx_drop);
32378+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32379 goto return_host_buffers;
32380 }
32381
32382@@ -1725,7 +1725,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32383 RBRQ_LEN_ERR(he_dev->rbrq_head)
32384 ? "LEN_ERR" : "",
32385 vcc->vpi, vcc->vci);
32386- atomic_inc(&vcc->stats->rx_err);
32387+ atomic_inc_unchecked(&vcc->stats->rx_err);
32388 goto return_host_buffers;
32389 }
32390
32391@@ -1777,7 +1777,7 @@ he_service_rbrq(struct he_dev *he_dev, int group)
32392 vcc->push(vcc, skb);
32393 spin_lock(&he_dev->global_lock);
32394
32395- atomic_inc(&vcc->stats->rx);
32396+ atomic_inc_unchecked(&vcc->stats->rx);
32397
32398 return_host_buffers:
32399 ++pdus_assembled;
32400@@ -2103,7 +2103,7 @@ __enqueue_tpd(struct he_dev *he_dev, struct he_tpd *tpd, unsigned cid)
32401 tpd->vcc->pop(tpd->vcc, tpd->skb);
32402 else
32403 dev_kfree_skb_any(tpd->skb);
32404- atomic_inc(&tpd->vcc->stats->tx_err);
32405+ atomic_inc_unchecked(&tpd->vcc->stats->tx_err);
32406 }
32407 pci_pool_free(he_dev->tpd_pool, tpd, TPD_ADDR(tpd->status));
32408 return;
32409@@ -2515,7 +2515,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32410 vcc->pop(vcc, skb);
32411 else
32412 dev_kfree_skb_any(skb);
32413- atomic_inc(&vcc->stats->tx_err);
32414+ atomic_inc_unchecked(&vcc->stats->tx_err);
32415 return -EINVAL;
32416 }
32417
32418@@ -2526,7 +2526,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32419 vcc->pop(vcc, skb);
32420 else
32421 dev_kfree_skb_any(skb);
32422- atomic_inc(&vcc->stats->tx_err);
32423+ atomic_inc_unchecked(&vcc->stats->tx_err);
32424 return -EINVAL;
32425 }
32426 #endif
32427@@ -2538,7 +2538,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32428 vcc->pop(vcc, skb);
32429 else
32430 dev_kfree_skb_any(skb);
32431- atomic_inc(&vcc->stats->tx_err);
32432+ atomic_inc_unchecked(&vcc->stats->tx_err);
32433 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32434 return -ENOMEM;
32435 }
32436@@ -2580,7 +2580,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32437 vcc->pop(vcc, skb);
32438 else
32439 dev_kfree_skb_any(skb);
32440- atomic_inc(&vcc->stats->tx_err);
32441+ atomic_inc_unchecked(&vcc->stats->tx_err);
32442 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32443 return -ENOMEM;
32444 }
32445@@ -2611,7 +2611,7 @@ he_send(struct atm_vcc *vcc, struct sk_buff *skb)
32446 __enqueue_tpd(he_dev, tpd, cid);
32447 spin_unlock_irqrestore(&he_dev->global_lock, flags);
32448
32449- atomic_inc(&vcc->stats->tx);
32450+ atomic_inc_unchecked(&vcc->stats->tx);
32451
32452 return 0;
32453 }
32454diff --git a/drivers/atm/horizon.c b/drivers/atm/horizon.c
32455index 1dc0519..1aadaf7 100644
32456--- a/drivers/atm/horizon.c
32457+++ b/drivers/atm/horizon.c
32458@@ -1034,7 +1034,7 @@ static void rx_schedule (hrz_dev * dev, int irq) {
32459 {
32460 struct atm_vcc * vcc = ATM_SKB(skb)->vcc;
32461 // VC layer stats
32462- atomic_inc(&vcc->stats->rx);
32463+ atomic_inc_unchecked(&vcc->stats->rx);
32464 __net_timestamp(skb);
32465 // end of our responsibility
32466 vcc->push (vcc, skb);
32467@@ -1186,7 +1186,7 @@ static void tx_schedule (hrz_dev * const dev, int irq) {
32468 dev->tx_iovec = NULL;
32469
32470 // VC layer stats
32471- atomic_inc(&ATM_SKB(skb)->vcc->stats->tx);
32472+ atomic_inc_unchecked(&ATM_SKB(skb)->vcc->stats->tx);
32473
32474 // free the skb
32475 hrz_kfree_skb (skb);
32476diff --git a/drivers/atm/idt77252.c b/drivers/atm/idt77252.c
32477index 272f009..a18ba55 100644
32478--- a/drivers/atm/idt77252.c
32479+++ b/drivers/atm/idt77252.c
32480@@ -812,7 +812,7 @@ drain_scq(struct idt77252_dev *card, struct vc_map *vc)
32481 else
32482 dev_kfree_skb(skb);
32483
32484- atomic_inc(&vcc->stats->tx);
32485+ atomic_inc_unchecked(&vcc->stats->tx);
32486 }
32487
32488 atomic_dec(&scq->used);
32489@@ -1075,13 +1075,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32490 if ((sb = dev_alloc_skb(64)) == NULL) {
32491 printk("%s: Can't allocate buffers for aal0.\n",
32492 card->name);
32493- atomic_add(i, &vcc->stats->rx_drop);
32494+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32495 break;
32496 }
32497 if (!atm_charge(vcc, sb->truesize)) {
32498 RXPRINTK("%s: atm_charge() dropped aal0 packets.\n",
32499 card->name);
32500- atomic_add(i - 1, &vcc->stats->rx_drop);
32501+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop);
32502 dev_kfree_skb(sb);
32503 break;
32504 }
32505@@ -1098,7 +1098,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32506 ATM_SKB(sb)->vcc = vcc;
32507 __net_timestamp(sb);
32508 vcc->push(vcc, sb);
32509- atomic_inc(&vcc->stats->rx);
32510+ atomic_inc_unchecked(&vcc->stats->rx);
32511
32512 cell += ATM_CELL_PAYLOAD;
32513 }
32514@@ -1135,13 +1135,13 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32515 "(CDC: %08x)\n",
32516 card->name, len, rpp->len, readl(SAR_REG_CDC));
32517 recycle_rx_pool_skb(card, rpp);
32518- atomic_inc(&vcc->stats->rx_err);
32519+ atomic_inc_unchecked(&vcc->stats->rx_err);
32520 return;
32521 }
32522 if (stat & SAR_RSQE_CRC) {
32523 RXPRINTK("%s: AAL5 CRC error.\n", card->name);
32524 recycle_rx_pool_skb(card, rpp);
32525- atomic_inc(&vcc->stats->rx_err);
32526+ atomic_inc_unchecked(&vcc->stats->rx_err);
32527 return;
32528 }
32529 if (skb_queue_len(&rpp->queue) > 1) {
32530@@ -1152,7 +1152,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32531 RXPRINTK("%s: Can't alloc RX skb.\n",
32532 card->name);
32533 recycle_rx_pool_skb(card, rpp);
32534- atomic_inc(&vcc->stats->rx_err);
32535+ atomic_inc_unchecked(&vcc->stats->rx_err);
32536 return;
32537 }
32538 if (!atm_charge(vcc, skb->truesize)) {
32539@@ -1171,7 +1171,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32540 __net_timestamp(skb);
32541
32542 vcc->push(vcc, skb);
32543- atomic_inc(&vcc->stats->rx);
32544+ atomic_inc_unchecked(&vcc->stats->rx);
32545
32546 return;
32547 }
32548@@ -1193,7 +1193,7 @@ dequeue_rx(struct idt77252_dev *card, struct rsq_entry *rsqe)
32549 __net_timestamp(skb);
32550
32551 vcc->push(vcc, skb);
32552- atomic_inc(&vcc->stats->rx);
32553+ atomic_inc_unchecked(&vcc->stats->rx);
32554
32555 if (skb->truesize > SAR_FB_SIZE_3)
32556 add_rx_skb(card, 3, SAR_FB_SIZE_3, 1);
32557@@ -1304,14 +1304,14 @@ idt77252_rx_raw(struct idt77252_dev *card)
32558 if (vcc->qos.aal != ATM_AAL0) {
32559 RPRINTK("%s: raw cell for non AAL0 vc %u.%u\n",
32560 card->name, vpi, vci);
32561- atomic_inc(&vcc->stats->rx_drop);
32562+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32563 goto drop;
32564 }
32565
32566 if ((sb = dev_alloc_skb(64)) == NULL) {
32567 printk("%s: Can't allocate buffers for AAL0.\n",
32568 card->name);
32569- atomic_inc(&vcc->stats->rx_err);
32570+ atomic_inc_unchecked(&vcc->stats->rx_err);
32571 goto drop;
32572 }
32573
32574@@ -1330,7 +1330,7 @@ idt77252_rx_raw(struct idt77252_dev *card)
32575 ATM_SKB(sb)->vcc = vcc;
32576 __net_timestamp(sb);
32577 vcc->push(vcc, sb);
32578- atomic_inc(&vcc->stats->rx);
32579+ atomic_inc_unchecked(&vcc->stats->rx);
32580
32581 drop:
32582 skb_pull(queue, 64);
32583@@ -1955,13 +1955,13 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32584
32585 if (vc == NULL) {
32586 printk("%s: NULL connection in send().\n", card->name);
32587- atomic_inc(&vcc->stats->tx_err);
32588+ atomic_inc_unchecked(&vcc->stats->tx_err);
32589 dev_kfree_skb(skb);
32590 return -EINVAL;
32591 }
32592 if (!test_bit(VCF_TX, &vc->flags)) {
32593 printk("%s: Trying to transmit on a non-tx VC.\n", card->name);
32594- atomic_inc(&vcc->stats->tx_err);
32595+ atomic_inc_unchecked(&vcc->stats->tx_err);
32596 dev_kfree_skb(skb);
32597 return -EINVAL;
32598 }
32599@@ -1973,14 +1973,14 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32600 break;
32601 default:
32602 printk("%s: Unsupported AAL: %d\n", card->name, vcc->qos.aal);
32603- atomic_inc(&vcc->stats->tx_err);
32604+ atomic_inc_unchecked(&vcc->stats->tx_err);
32605 dev_kfree_skb(skb);
32606 return -EINVAL;
32607 }
32608
32609 if (skb_shinfo(skb)->nr_frags != 0) {
32610 printk("%s: No scatter-gather yet.\n", card->name);
32611- atomic_inc(&vcc->stats->tx_err);
32612+ atomic_inc_unchecked(&vcc->stats->tx_err);
32613 dev_kfree_skb(skb);
32614 return -EINVAL;
32615 }
32616@@ -1988,7 +1988,7 @@ idt77252_send_skb(struct atm_vcc *vcc, struct sk_buff *skb, int oam)
32617
32618 err = queue_skb(card, vc, skb, oam);
32619 if (err) {
32620- atomic_inc(&vcc->stats->tx_err);
32621+ atomic_inc_unchecked(&vcc->stats->tx_err);
32622 dev_kfree_skb(skb);
32623 return err;
32624 }
32625@@ -2011,7 +2011,7 @@ idt77252_send_oam(struct atm_vcc *vcc, void *cell, int flags)
32626 skb = dev_alloc_skb(64);
32627 if (!skb) {
32628 printk("%s: Out of memory in send_oam().\n", card->name);
32629- atomic_inc(&vcc->stats->tx_err);
32630+ atomic_inc_unchecked(&vcc->stats->tx_err);
32631 return -ENOMEM;
32632 }
32633 atomic_add(skb->truesize, &sk_atm(vcc)->sk_wmem_alloc);
32634diff --git a/drivers/atm/iphase.c b/drivers/atm/iphase.c
32635index 4217f29..88f547a 100644
32636--- a/drivers/atm/iphase.c
32637+++ b/drivers/atm/iphase.c
32638@@ -1145,7 +1145,7 @@ static int rx_pkt(struct atm_dev *dev)
32639 status = (u_short) (buf_desc_ptr->desc_mode);
32640 if (status & (RX_CER | RX_PTE | RX_OFL))
32641 {
32642- atomic_inc(&vcc->stats->rx_err);
32643+ atomic_inc_unchecked(&vcc->stats->rx_err);
32644 IF_ERR(printk("IA: bad packet, dropping it");)
32645 if (status & RX_CER) {
32646 IF_ERR(printk(" cause: packet CRC error\n");)
32647@@ -1168,7 +1168,7 @@ static int rx_pkt(struct atm_dev *dev)
32648 len = dma_addr - buf_addr;
32649 if (len > iadev->rx_buf_sz) {
32650 printk("Over %d bytes sdu received, dropped!!!\n", iadev->rx_buf_sz);
32651- atomic_inc(&vcc->stats->rx_err);
32652+ atomic_inc_unchecked(&vcc->stats->rx_err);
32653 goto out_free_desc;
32654 }
32655
32656@@ -1318,7 +1318,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32657 ia_vcc = INPH_IA_VCC(vcc);
32658 if (ia_vcc == NULL)
32659 {
32660- atomic_inc(&vcc->stats->rx_err);
32661+ atomic_inc_unchecked(&vcc->stats->rx_err);
32662 atm_return(vcc, skb->truesize);
32663 dev_kfree_skb_any(skb);
32664 goto INCR_DLE;
32665@@ -1330,7 +1330,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32666 if ((length > iadev->rx_buf_sz) || (length >
32667 (skb->len - sizeof(struct cpcs_trailer))))
32668 {
32669- atomic_inc(&vcc->stats->rx_err);
32670+ atomic_inc_unchecked(&vcc->stats->rx_err);
32671 IF_ERR(printk("rx_dle_intr: Bad AAL5 trailer %d (skb len %d)",
32672 length, skb->len);)
32673 atm_return(vcc, skb->truesize);
32674@@ -1346,7 +1346,7 @@ static void rx_dle_intr(struct atm_dev *dev)
32675
32676 IF_RX(printk("rx_dle_intr: skb push");)
32677 vcc->push(vcc,skb);
32678- atomic_inc(&vcc->stats->rx);
32679+ atomic_inc_unchecked(&vcc->stats->rx);
32680 iadev->rx_pkt_cnt++;
32681 }
32682 INCR_DLE:
32683@@ -2826,15 +2826,15 @@ static int ia_ioctl(struct atm_dev *dev, unsigned int cmd, void __user *arg)
32684 {
32685 struct k_sonet_stats *stats;
32686 stats = &PRIV(_ia_dev[board])->sonet_stats;
32687- printk("section_bip: %d\n", atomic_read(&stats->section_bip));
32688- printk("line_bip : %d\n", atomic_read(&stats->line_bip));
32689- printk("path_bip : %d\n", atomic_read(&stats->path_bip));
32690- printk("line_febe : %d\n", atomic_read(&stats->line_febe));
32691- printk("path_febe : %d\n", atomic_read(&stats->path_febe));
32692- printk("corr_hcs : %d\n", atomic_read(&stats->corr_hcs));
32693- printk("uncorr_hcs : %d\n", atomic_read(&stats->uncorr_hcs));
32694- printk("tx_cells : %d\n", atomic_read(&stats->tx_cells));
32695- printk("rx_cells : %d\n", atomic_read(&stats->rx_cells));
32696+ printk("section_bip: %d\n", atomic_read_unchecked(&stats->section_bip));
32697+ printk("line_bip : %d\n", atomic_read_unchecked(&stats->line_bip));
32698+ printk("path_bip : %d\n", atomic_read_unchecked(&stats->path_bip));
32699+ printk("line_febe : %d\n", atomic_read_unchecked(&stats->line_febe));
32700+ printk("path_febe : %d\n", atomic_read_unchecked(&stats->path_febe));
32701+ printk("corr_hcs : %d\n", atomic_read_unchecked(&stats->corr_hcs));
32702+ printk("uncorr_hcs : %d\n", atomic_read_unchecked(&stats->uncorr_hcs));
32703+ printk("tx_cells : %d\n", atomic_read_unchecked(&stats->tx_cells));
32704+ printk("rx_cells : %d\n", atomic_read_unchecked(&stats->rx_cells));
32705 }
32706 ia_cmds.status = 0;
32707 break;
32708@@ -2939,7 +2939,7 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32709 if ((desc == 0) || (desc > iadev->num_tx_desc))
32710 {
32711 IF_ERR(printk(DEV_LABEL "invalid desc for send: %d\n", desc);)
32712- atomic_inc(&vcc->stats->tx);
32713+ atomic_inc_unchecked(&vcc->stats->tx);
32714 if (vcc->pop)
32715 vcc->pop(vcc, skb);
32716 else
32717@@ -3044,14 +3044,14 @@ static int ia_pkt_tx (struct atm_vcc *vcc, struct sk_buff *skb) {
32718 ATM_DESC(skb) = vcc->vci;
32719 skb_queue_tail(&iadev->tx_dma_q, skb);
32720
32721- atomic_inc(&vcc->stats->tx);
32722+ atomic_inc_unchecked(&vcc->stats->tx);
32723 iadev->tx_pkt_cnt++;
32724 /* Increment transaction counter */
32725 writel(2, iadev->dma+IPHASE5575_TX_COUNTER);
32726
32727 #if 0
32728 /* add flow control logic */
32729- if (atomic_read(&vcc->stats->tx) % 20 == 0) {
32730+ if (atomic_read_unchecked(&vcc->stats->tx) % 20 == 0) {
32731 if (iavcc->vc_desc_cnt > 10) {
32732 vcc->tx_quota = vcc->tx_quota * 3 / 4;
32733 printk("Tx1: vcc->tx_quota = %d \n", (u32)vcc->tx_quota );
32734diff --git a/drivers/atm/lanai.c b/drivers/atm/lanai.c
32735index fa7d701..1e404c7 100644
32736--- a/drivers/atm/lanai.c
32737+++ b/drivers/atm/lanai.c
32738@@ -1303,7 +1303,7 @@ static void lanai_send_one_aal5(struct lanai_dev *lanai,
32739 vcc_tx_add_aal5_trailer(lvcc, skb->len, 0, 0);
32740 lanai_endtx(lanai, lvcc);
32741 lanai_free_skb(lvcc->tx.atmvcc, skb);
32742- atomic_inc(&lvcc->tx.atmvcc->stats->tx);
32743+ atomic_inc_unchecked(&lvcc->tx.atmvcc->stats->tx);
32744 }
32745
32746 /* Try to fill the buffer - don't call unless there is backlog */
32747@@ -1426,7 +1426,7 @@ static void vcc_rx_aal5(struct lanai_vcc *lvcc, int endptr)
32748 ATM_SKB(skb)->vcc = lvcc->rx.atmvcc;
32749 __net_timestamp(skb);
32750 lvcc->rx.atmvcc->push(lvcc->rx.atmvcc, skb);
32751- atomic_inc(&lvcc->rx.atmvcc->stats->rx);
32752+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx);
32753 out:
32754 lvcc->rx.buf.ptr = end;
32755 cardvcc_write(lvcc, endptr, vcc_rxreadptr);
32756@@ -1667,7 +1667,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32757 DPRINTK("(itf %d) got RX service entry 0x%X for non-AAL5 "
32758 "vcc %d\n", lanai->number, (unsigned int) s, vci);
32759 lanai->stats.service_rxnotaal5++;
32760- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32761+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32762 return 0;
32763 }
32764 if (likely(!(s & (SERVICE_TRASH | SERVICE_STREAM | SERVICE_CRCERR)))) {
32765@@ -1679,7 +1679,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32766 int bytes;
32767 read_unlock(&vcc_sklist_lock);
32768 DPRINTK("got trashed rx pdu on vci %d\n", vci);
32769- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32770+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32771 lvcc->stats.x.aal5.service_trash++;
32772 bytes = (SERVICE_GET_END(s) * 16) -
32773 (((unsigned long) lvcc->rx.buf.ptr) -
32774@@ -1691,7 +1691,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32775 }
32776 if (s & SERVICE_STREAM) {
32777 read_unlock(&vcc_sklist_lock);
32778- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32779+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32780 lvcc->stats.x.aal5.service_stream++;
32781 printk(KERN_ERR DEV_LABEL "(itf %d): Got AAL5 stream "
32782 "PDU on VCI %d!\n", lanai->number, vci);
32783@@ -1699,7 +1699,7 @@ static int handle_service(struct lanai_dev *lanai, u32 s)
32784 return 0;
32785 }
32786 DPRINTK("got rx crc error on vci %d\n", vci);
32787- atomic_inc(&lvcc->rx.atmvcc->stats->rx_err);
32788+ atomic_inc_unchecked(&lvcc->rx.atmvcc->stats->rx_err);
32789 lvcc->stats.x.aal5.service_rxcrc++;
32790 lvcc->rx.buf.ptr = &lvcc->rx.buf.start[SERVICE_GET_END(s) * 4];
32791 cardvcc_write(lvcc, SERVICE_GET_END(s), vcc_rxreadptr);
32792diff --git a/drivers/atm/nicstar.c b/drivers/atm/nicstar.c
32793index 6587dc2..149833d 100644
32794--- a/drivers/atm/nicstar.c
32795+++ b/drivers/atm/nicstar.c
32796@@ -1641,7 +1641,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32797 if ((vc = (vc_map *) vcc->dev_data) == NULL) {
32798 printk("nicstar%d: vcc->dev_data == NULL on ns_send().\n",
32799 card->index);
32800- atomic_inc(&vcc->stats->tx_err);
32801+ atomic_inc_unchecked(&vcc->stats->tx_err);
32802 dev_kfree_skb_any(skb);
32803 return -EINVAL;
32804 }
32805@@ -1649,7 +1649,7 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32806 if (!vc->tx) {
32807 printk("nicstar%d: Trying to transmit on a non-tx VC.\n",
32808 card->index);
32809- atomic_inc(&vcc->stats->tx_err);
32810+ atomic_inc_unchecked(&vcc->stats->tx_err);
32811 dev_kfree_skb_any(skb);
32812 return -EINVAL;
32813 }
32814@@ -1657,14 +1657,14 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32815 if (vcc->qos.aal != ATM_AAL5 && vcc->qos.aal != ATM_AAL0) {
32816 printk("nicstar%d: Only AAL0 and AAL5 are supported.\n",
32817 card->index);
32818- atomic_inc(&vcc->stats->tx_err);
32819+ atomic_inc_unchecked(&vcc->stats->tx_err);
32820 dev_kfree_skb_any(skb);
32821 return -EINVAL;
32822 }
32823
32824 if (skb_shinfo(skb)->nr_frags != 0) {
32825 printk("nicstar%d: No scatter-gather yet.\n", card->index);
32826- atomic_inc(&vcc->stats->tx_err);
32827+ atomic_inc_unchecked(&vcc->stats->tx_err);
32828 dev_kfree_skb_any(skb);
32829 return -EINVAL;
32830 }
32831@@ -1712,11 +1712,11 @@ static int ns_send(struct atm_vcc *vcc, struct sk_buff *skb)
32832 }
32833
32834 if (push_scqe(card, vc, scq, &scqe, skb) != 0) {
32835- atomic_inc(&vcc->stats->tx_err);
32836+ atomic_inc_unchecked(&vcc->stats->tx_err);
32837 dev_kfree_skb_any(skb);
32838 return -EIO;
32839 }
32840- atomic_inc(&vcc->stats->tx);
32841+ atomic_inc_unchecked(&vcc->stats->tx);
32842
32843 return 0;
32844 }
32845@@ -2033,14 +2033,14 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32846 printk
32847 ("nicstar%d: Can't allocate buffers for aal0.\n",
32848 card->index);
32849- atomic_add(i, &vcc->stats->rx_drop);
32850+ atomic_add_unchecked(i, &vcc->stats->rx_drop);
32851 break;
32852 }
32853 if (!atm_charge(vcc, sb->truesize)) {
32854 RXPRINTK
32855 ("nicstar%d: atm_charge() dropped aal0 packets.\n",
32856 card->index);
32857- atomic_add(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32858+ atomic_add_unchecked(i - 1, &vcc->stats->rx_drop); /* already increased by 1 */
32859 dev_kfree_skb_any(sb);
32860 break;
32861 }
32862@@ -2055,7 +2055,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32863 ATM_SKB(sb)->vcc = vcc;
32864 __net_timestamp(sb);
32865 vcc->push(vcc, sb);
32866- atomic_inc(&vcc->stats->rx);
32867+ atomic_inc_unchecked(&vcc->stats->rx);
32868 cell += ATM_CELL_PAYLOAD;
32869 }
32870
32871@@ -2072,7 +2072,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32872 if (iovb == NULL) {
32873 printk("nicstar%d: Out of iovec buffers.\n",
32874 card->index);
32875- atomic_inc(&vcc->stats->rx_drop);
32876+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32877 recycle_rx_buf(card, skb);
32878 return;
32879 }
32880@@ -2096,7 +2096,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32881 small or large buffer itself. */
32882 } else if (NS_PRV_IOVCNT(iovb) >= NS_MAX_IOVECS) {
32883 printk("nicstar%d: received too big AAL5 SDU.\n", card->index);
32884- atomic_inc(&vcc->stats->rx_err);
32885+ atomic_inc_unchecked(&vcc->stats->rx_err);
32886 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32887 NS_MAX_IOVECS);
32888 NS_PRV_IOVCNT(iovb) = 0;
32889@@ -2116,7 +2116,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32890 ("nicstar%d: Expected a small buffer, and this is not one.\n",
32891 card->index);
32892 which_list(card, skb);
32893- atomic_inc(&vcc->stats->rx_err);
32894+ atomic_inc_unchecked(&vcc->stats->rx_err);
32895 recycle_rx_buf(card, skb);
32896 vc->rx_iov = NULL;
32897 recycle_iov_buf(card, iovb);
32898@@ -2129,7 +2129,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32899 ("nicstar%d: Expected a large buffer, and this is not one.\n",
32900 card->index);
32901 which_list(card, skb);
32902- atomic_inc(&vcc->stats->rx_err);
32903+ atomic_inc_unchecked(&vcc->stats->rx_err);
32904 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32905 NS_PRV_IOVCNT(iovb));
32906 vc->rx_iov = NULL;
32907@@ -2152,7 +2152,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32908 printk(" - PDU size mismatch.\n");
32909 else
32910 printk(".\n");
32911- atomic_inc(&vcc->stats->rx_err);
32912+ atomic_inc_unchecked(&vcc->stats->rx_err);
32913 recycle_iovec_rx_bufs(card, (struct iovec *)iovb->data,
32914 NS_PRV_IOVCNT(iovb));
32915 vc->rx_iov = NULL;
32916@@ -2166,7 +2166,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32917 /* skb points to a small buffer */
32918 if (!atm_charge(vcc, skb->truesize)) {
32919 push_rxbufs(card, skb);
32920- atomic_inc(&vcc->stats->rx_drop);
32921+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32922 } else {
32923 skb_put(skb, len);
32924 dequeue_sm_buf(card, skb);
32925@@ -2176,7 +2176,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32926 ATM_SKB(skb)->vcc = vcc;
32927 __net_timestamp(skb);
32928 vcc->push(vcc, skb);
32929- atomic_inc(&vcc->stats->rx);
32930+ atomic_inc_unchecked(&vcc->stats->rx);
32931 }
32932 } else if (NS_PRV_IOVCNT(iovb) == 2) { /* One small plus one large buffer */
32933 struct sk_buff *sb;
32934@@ -2187,7 +2187,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32935 if (len <= NS_SMBUFSIZE) {
32936 if (!atm_charge(vcc, sb->truesize)) {
32937 push_rxbufs(card, sb);
32938- atomic_inc(&vcc->stats->rx_drop);
32939+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32940 } else {
32941 skb_put(sb, len);
32942 dequeue_sm_buf(card, sb);
32943@@ -2197,7 +2197,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32944 ATM_SKB(sb)->vcc = vcc;
32945 __net_timestamp(sb);
32946 vcc->push(vcc, sb);
32947- atomic_inc(&vcc->stats->rx);
32948+ atomic_inc_unchecked(&vcc->stats->rx);
32949 }
32950
32951 push_rxbufs(card, skb);
32952@@ -2206,7 +2206,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32953
32954 if (!atm_charge(vcc, skb->truesize)) {
32955 push_rxbufs(card, skb);
32956- atomic_inc(&vcc->stats->rx_drop);
32957+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32958 } else {
32959 dequeue_lg_buf(card, skb);
32960 #ifdef NS_USE_DESTRUCTORS
32961@@ -2219,7 +2219,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32962 ATM_SKB(skb)->vcc = vcc;
32963 __net_timestamp(skb);
32964 vcc->push(vcc, skb);
32965- atomic_inc(&vcc->stats->rx);
32966+ atomic_inc_unchecked(&vcc->stats->rx);
32967 }
32968
32969 push_rxbufs(card, sb);
32970@@ -2240,7 +2240,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32971 printk
32972 ("nicstar%d: Out of huge buffers.\n",
32973 card->index);
32974- atomic_inc(&vcc->stats->rx_drop);
32975+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32976 recycle_iovec_rx_bufs(card,
32977 (struct iovec *)
32978 iovb->data,
32979@@ -2291,7 +2291,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32980 card->hbpool.count++;
32981 } else
32982 dev_kfree_skb_any(hb);
32983- atomic_inc(&vcc->stats->rx_drop);
32984+ atomic_inc_unchecked(&vcc->stats->rx_drop);
32985 } else {
32986 /* Copy the small buffer to the huge buffer */
32987 sb = (struct sk_buff *)iov->iov_base;
32988@@ -2328,7 +2328,7 @@ static void dequeue_rx(ns_dev * card, ns_rsqe * rsqe)
32989 #endif /* NS_USE_DESTRUCTORS */
32990 __net_timestamp(hb);
32991 vcc->push(vcc, hb);
32992- atomic_inc(&vcc->stats->rx);
32993+ atomic_inc_unchecked(&vcc->stats->rx);
32994 }
32995 }
32996
32997diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
32998index 32784d1..4a8434a 100644
32999--- a/drivers/atm/solos-pci.c
33000+++ b/drivers/atm/solos-pci.c
33001@@ -838,7 +838,7 @@ void solos_bh(unsigned long card_arg)
33002 }
33003 atm_charge(vcc, skb->truesize);
33004 vcc->push(vcc, skb);
33005- atomic_inc(&vcc->stats->rx);
33006+ atomic_inc_unchecked(&vcc->stats->rx);
33007 break;
33008
33009 case PKT_STATUS:
33010@@ -1116,7 +1116,7 @@ static uint32_t fpga_tx(struct solos_card *card)
33011 vcc = SKB_CB(oldskb)->vcc;
33012
33013 if (vcc) {
33014- atomic_inc(&vcc->stats->tx);
33015+ atomic_inc_unchecked(&vcc->stats->tx);
33016 solos_pop(vcc, oldskb);
33017 } else {
33018 dev_kfree_skb_irq(oldskb);
33019diff --git a/drivers/atm/suni.c b/drivers/atm/suni.c
33020index 0215934..ce9f5b1 100644
33021--- a/drivers/atm/suni.c
33022+++ b/drivers/atm/suni.c
33023@@ -49,8 +49,8 @@ static DEFINE_SPINLOCK(sunis_lock);
33024
33025
33026 #define ADD_LIMITED(s,v) \
33027- atomic_add((v),&stats->s); \
33028- if (atomic_read(&stats->s) < 0) atomic_set(&stats->s,INT_MAX);
33029+ atomic_add_unchecked((v),&stats->s); \
33030+ if (atomic_read_unchecked(&stats->s) < 0) atomic_set_unchecked(&stats->s,INT_MAX);
33031
33032
33033 static void suni_hz(unsigned long from_timer)
33034diff --git a/drivers/atm/uPD98402.c b/drivers/atm/uPD98402.c
33035index 5120a96..e2572bd 100644
33036--- a/drivers/atm/uPD98402.c
33037+++ b/drivers/atm/uPD98402.c
33038@@ -42,7 +42,7 @@ static int fetch_stats(struct atm_dev *dev,struct sonet_stats __user *arg,int ze
33039 struct sonet_stats tmp;
33040 int error = 0;
33041
33042- atomic_add(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33043+ atomic_add_unchecked(GET(HECCT),&PRIV(dev)->sonet_stats.uncorr_hcs);
33044 sonet_copy_stats(&PRIV(dev)->sonet_stats,&tmp);
33045 if (arg) error = copy_to_user(arg,&tmp,sizeof(tmp));
33046 if (zero && !error) {
33047@@ -161,9 +161,9 @@ static int uPD98402_ioctl(struct atm_dev *dev,unsigned int cmd,void __user *arg)
33048
33049
33050 #define ADD_LIMITED(s,v) \
33051- { atomic_add(GET(v),&PRIV(dev)->sonet_stats.s); \
33052- if (atomic_read(&PRIV(dev)->sonet_stats.s) < 0) \
33053- atomic_set(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33054+ { atomic_add_unchecked(GET(v),&PRIV(dev)->sonet_stats.s); \
33055+ if (atomic_read_unchecked(&PRIV(dev)->sonet_stats.s) < 0) \
33056+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.s,INT_MAX); }
33057
33058
33059 static void stat_event(struct atm_dev *dev)
33060@@ -194,7 +194,7 @@ static void uPD98402_int(struct atm_dev *dev)
33061 if (reason & uPD98402_INT_PFM) stat_event(dev);
33062 if (reason & uPD98402_INT_PCO) {
33063 (void) GET(PCOCR); /* clear interrupt cause */
33064- atomic_add(GET(HECCT),
33065+ atomic_add_unchecked(GET(HECCT),
33066 &PRIV(dev)->sonet_stats.uncorr_hcs);
33067 }
33068 if ((reason & uPD98402_INT_RFO) &&
33069@@ -222,9 +222,9 @@ static int uPD98402_start(struct atm_dev *dev)
33070 PUT(~(uPD98402_INT_PFM | uPD98402_INT_ALM | uPD98402_INT_RFO |
33071 uPD98402_INT_LOS),PIMR); /* enable them */
33072 (void) fetch_stats(dev,NULL,1); /* clear kernel counters */
33073- atomic_set(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33074- atomic_set(&PRIV(dev)->sonet_stats.tx_cells,-1);
33075- atomic_set(&PRIV(dev)->sonet_stats.rx_cells,-1);
33076+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.corr_hcs,-1);
33077+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.tx_cells,-1);
33078+ atomic_set_unchecked(&PRIV(dev)->sonet_stats.rx_cells,-1);
33079 return 0;
33080 }
33081
33082diff --git a/drivers/atm/zatm.c b/drivers/atm/zatm.c
33083index 969c3c2..9b72956 100644
33084--- a/drivers/atm/zatm.c
33085+++ b/drivers/atm/zatm.c
33086@@ -459,7 +459,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33087 }
33088 if (!size) {
33089 dev_kfree_skb_irq(skb);
33090- if (vcc) atomic_inc(&vcc->stats->rx_err);
33091+ if (vcc) atomic_inc_unchecked(&vcc->stats->rx_err);
33092 continue;
33093 }
33094 if (!atm_charge(vcc,skb->truesize)) {
33095@@ -469,7 +469,7 @@ printk("dummy: 0x%08lx, 0x%08lx\n",dummy[0],dummy[1]);
33096 skb->len = size;
33097 ATM_SKB(skb)->vcc = vcc;
33098 vcc->push(vcc,skb);
33099- atomic_inc(&vcc->stats->rx);
33100+ atomic_inc_unchecked(&vcc->stats->rx);
33101 }
33102 zout(pos & 0xffff,MTA(mbx));
33103 #if 0 /* probably a stupid idea */
33104@@ -733,7 +733,7 @@ if (*ZATM_PRV_DSC(skb) != (uPD98401_TXPD_V | uPD98401_TXPD_DP |
33105 skb_queue_head(&zatm_vcc->backlog,skb);
33106 break;
33107 }
33108- atomic_inc(&vcc->stats->tx);
33109+ atomic_inc_unchecked(&vcc->stats->tx);
33110 wake_up(&zatm_vcc->tx_wait);
33111 }
33112
33113diff --git a/drivers/base/attribute_container.c b/drivers/base/attribute_container.c
33114index d78b204..ecc1929 100644
33115--- a/drivers/base/attribute_container.c
33116+++ b/drivers/base/attribute_container.c
33117@@ -167,7 +167,7 @@ attribute_container_add_device(struct device *dev,
33118 ic->classdev.parent = get_device(dev);
33119 ic->classdev.class = cont->class;
33120 cont->class->dev_release = attribute_container_release;
33121- dev_set_name(&ic->classdev, dev_name(dev));
33122+ dev_set_name(&ic->classdev, "%s", dev_name(dev));
33123 if (fn)
33124 fn(cont, dev, &ic->classdev);
33125 else
33126diff --git a/drivers/base/bus.c b/drivers/base/bus.c
33127index d414331..b4dd4ba 100644
33128--- a/drivers/base/bus.c
33129+++ b/drivers/base/bus.c
33130@@ -1163,7 +1163,7 @@ int subsys_interface_register(struct subsys_interface *sif)
33131 return -EINVAL;
33132
33133 mutex_lock(&subsys->p->mutex);
33134- list_add_tail(&sif->node, &subsys->p->interfaces);
33135+ pax_list_add_tail((struct list_head *)&sif->node, &subsys->p->interfaces);
33136 if (sif->add_dev) {
33137 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33138 while ((dev = subsys_dev_iter_next(&iter)))
33139@@ -1188,7 +1188,7 @@ void subsys_interface_unregister(struct subsys_interface *sif)
33140 subsys = sif->subsys;
33141
33142 mutex_lock(&subsys->p->mutex);
33143- list_del_init(&sif->node);
33144+ pax_list_del_init((struct list_head *)&sif->node);
33145 if (sif->remove_dev) {
33146 subsys_dev_iter_init(&iter, subsys, NULL, NULL);
33147 while ((dev = subsys_dev_iter_next(&iter)))
33148diff --git a/drivers/base/devtmpfs.c b/drivers/base/devtmpfs.c
33149index 7413d06..79155fa 100644
33150--- a/drivers/base/devtmpfs.c
33151+++ b/drivers/base/devtmpfs.c
33152@@ -354,7 +354,7 @@ int devtmpfs_mount(const char *mntdir)
33153 if (!thread)
33154 return 0;
33155
33156- err = sys_mount("devtmpfs", (char *)mntdir, "devtmpfs", MS_SILENT, NULL);
33157+ err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)mntdir, (char __force_user *)"devtmpfs", MS_SILENT, NULL);
33158 if (err)
33159 printk(KERN_INFO "devtmpfs: error mounting %i\n", err);
33160 else
33161@@ -380,11 +380,11 @@ static int devtmpfsd(void *p)
33162 *err = sys_unshare(CLONE_NEWNS);
33163 if (*err)
33164 goto out;
33165- *err = sys_mount("devtmpfs", "/", "devtmpfs", MS_SILENT, options);
33166+ *err = sys_mount((char __force_user *)"devtmpfs", (char __force_user *)"/", (char __force_user *)"devtmpfs", MS_SILENT, (char __force_user *)options);
33167 if (*err)
33168 goto out;
33169- sys_chdir("/.."); /* will traverse into overmounted root */
33170- sys_chroot(".");
33171+ sys_chdir((char __force_user *)"/.."); /* will traverse into overmounted root */
33172+ sys_chroot((char __force_user *)".");
33173 complete(&setup_done);
33174 while (1) {
33175 spin_lock(&req_lock);
33176diff --git a/drivers/base/node.c b/drivers/base/node.c
33177index 7616a77c..8f57f51 100644
33178--- a/drivers/base/node.c
33179+++ b/drivers/base/node.c
33180@@ -626,7 +626,7 @@ static ssize_t print_nodes_state(enum node_states state, char *buf)
33181 struct node_attr {
33182 struct device_attribute attr;
33183 enum node_states state;
33184-};
33185+} __do_const;
33186
33187 static ssize_t show_node_state(struct device *dev,
33188 struct device_attribute *attr, char *buf)
33189diff --git a/drivers/base/power/domain.c b/drivers/base/power/domain.c
33190index 7072404..76dcebd 100644
33191--- a/drivers/base/power/domain.c
33192+++ b/drivers/base/power/domain.c
33193@@ -1850,7 +1850,7 @@ int pm_genpd_attach_cpuidle(struct generic_pm_domain *genpd, int state)
33194 {
33195 struct cpuidle_driver *cpuidle_drv;
33196 struct gpd_cpu_data *cpu_data;
33197- struct cpuidle_state *idle_state;
33198+ cpuidle_state_no_const *idle_state;
33199 int ret = 0;
33200
33201 if (IS_ERR_OR_NULL(genpd) || state < 0)
33202@@ -1918,7 +1918,7 @@ int pm_genpd_name_attach_cpuidle(const char *name, int state)
33203 int pm_genpd_detach_cpuidle(struct generic_pm_domain *genpd)
33204 {
33205 struct gpd_cpu_data *cpu_data;
33206- struct cpuidle_state *idle_state;
33207+ cpuidle_state_no_const *idle_state;
33208 int ret = 0;
33209
33210 if (IS_ERR_OR_NULL(genpd))
33211diff --git a/drivers/base/power/sysfs.c b/drivers/base/power/sysfs.c
33212index a53ebd2..8f73eeb 100644
33213--- a/drivers/base/power/sysfs.c
33214+++ b/drivers/base/power/sysfs.c
33215@@ -185,7 +185,7 @@ static ssize_t rtpm_status_show(struct device *dev,
33216 return -EIO;
33217 }
33218 }
33219- return sprintf(buf, p);
33220+ return sprintf(buf, "%s", p);
33221 }
33222
33223 static DEVICE_ATTR(runtime_status, 0444, rtpm_status_show, NULL);
33224diff --git a/drivers/base/power/wakeup.c b/drivers/base/power/wakeup.c
33225index 79715e7..df06b3b 100644
33226--- a/drivers/base/power/wakeup.c
33227+++ b/drivers/base/power/wakeup.c
33228@@ -29,14 +29,14 @@ bool events_check_enabled __read_mostly;
33229 * They need to be modified together atomically, so it's better to use one
33230 * atomic variable to hold them both.
33231 */
33232-static atomic_t combined_event_count = ATOMIC_INIT(0);
33233+static atomic_unchecked_t combined_event_count = ATOMIC_INIT(0);
33234
33235 #define IN_PROGRESS_BITS (sizeof(int) * 4)
33236 #define MAX_IN_PROGRESS ((1 << IN_PROGRESS_BITS) - 1)
33237
33238 static void split_counters(unsigned int *cnt, unsigned int *inpr)
33239 {
33240- unsigned int comb = atomic_read(&combined_event_count);
33241+ unsigned int comb = atomic_read_unchecked(&combined_event_count);
33242
33243 *cnt = (comb >> IN_PROGRESS_BITS);
33244 *inpr = comb & MAX_IN_PROGRESS;
33245@@ -395,7 +395,7 @@ static void wakeup_source_activate(struct wakeup_source *ws)
33246 ws->start_prevent_time = ws->last_time;
33247
33248 /* Increment the counter of events in progress. */
33249- cec = atomic_inc_return(&combined_event_count);
33250+ cec = atomic_inc_return_unchecked(&combined_event_count);
33251
33252 trace_wakeup_source_activate(ws->name, cec);
33253 }
33254@@ -521,7 +521,7 @@ static void wakeup_source_deactivate(struct wakeup_source *ws)
33255 * Increment the counter of registered wakeup events and decrement the
33256 * couter of wakeup events in progress simultaneously.
33257 */
33258- cec = atomic_add_return(MAX_IN_PROGRESS, &combined_event_count);
33259+ cec = atomic_add_return_unchecked(MAX_IN_PROGRESS, &combined_event_count);
33260 trace_wakeup_source_deactivate(ws->name, cec);
33261
33262 split_counters(&cnt, &inpr);
33263diff --git a/drivers/base/syscore.c b/drivers/base/syscore.c
33264index e8d11b6..7b1b36f 100644
33265--- a/drivers/base/syscore.c
33266+++ b/drivers/base/syscore.c
33267@@ -21,7 +21,7 @@ static DEFINE_MUTEX(syscore_ops_lock);
33268 void register_syscore_ops(struct syscore_ops *ops)
33269 {
33270 mutex_lock(&syscore_ops_lock);
33271- list_add_tail(&ops->node, &syscore_ops_list);
33272+ pax_list_add_tail((struct list_head *)&ops->node, &syscore_ops_list);
33273 mutex_unlock(&syscore_ops_lock);
33274 }
33275 EXPORT_SYMBOL_GPL(register_syscore_ops);
33276@@ -33,7 +33,7 @@ EXPORT_SYMBOL_GPL(register_syscore_ops);
33277 void unregister_syscore_ops(struct syscore_ops *ops)
33278 {
33279 mutex_lock(&syscore_ops_lock);
33280- list_del(&ops->node);
33281+ pax_list_del((struct list_head *)&ops->node);
33282 mutex_unlock(&syscore_ops_lock);
33283 }
33284 EXPORT_SYMBOL_GPL(unregister_syscore_ops);
33285diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
33286index 62b6c2c..4a11354 100644
33287--- a/drivers/block/cciss.c
33288+++ b/drivers/block/cciss.c
33289@@ -1189,6 +1189,8 @@ static int cciss_ioctl32_passthru(struct block_device *bdev, fmode_t mode,
33290 int err;
33291 u32 cp;
33292
33293+ memset(&arg64, 0, sizeof(arg64));
33294+
33295 err = 0;
33296 err |=
33297 copy_from_user(&arg64.LUN_info, &arg32->LUN_info,
33298@@ -3010,7 +3012,7 @@ static void start_io(ctlr_info_t *h)
33299 while (!list_empty(&h->reqQ)) {
33300 c = list_entry(h->reqQ.next, CommandList_struct, list);
33301 /* can't do anything if fifo is full */
33302- if ((h->access.fifo_full(h))) {
33303+ if ((h->access->fifo_full(h))) {
33304 dev_warn(&h->pdev->dev, "fifo full\n");
33305 break;
33306 }
33307@@ -3020,7 +3022,7 @@ static void start_io(ctlr_info_t *h)
33308 h->Qdepth--;
33309
33310 /* Tell the controller execute command */
33311- h->access.submit_command(h, c);
33312+ h->access->submit_command(h, c);
33313
33314 /* Put job onto the completed Q */
33315 addQ(&h->cmpQ, c);
33316@@ -3446,17 +3448,17 @@ startio:
33317
33318 static inline unsigned long get_next_completion(ctlr_info_t *h)
33319 {
33320- return h->access.command_completed(h);
33321+ return h->access->command_completed(h);
33322 }
33323
33324 static inline int interrupt_pending(ctlr_info_t *h)
33325 {
33326- return h->access.intr_pending(h);
33327+ return h->access->intr_pending(h);
33328 }
33329
33330 static inline long interrupt_not_for_us(ctlr_info_t *h)
33331 {
33332- return ((h->access.intr_pending(h) == 0) ||
33333+ return ((h->access->intr_pending(h) == 0) ||
33334 (h->interrupts_enabled == 0));
33335 }
33336
33337@@ -3489,7 +3491,7 @@ static inline u32 next_command(ctlr_info_t *h)
33338 u32 a;
33339
33340 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
33341- return h->access.command_completed(h);
33342+ return h->access->command_completed(h);
33343
33344 if ((*(h->reply_pool_head) & 1) == (h->reply_pool_wraparound)) {
33345 a = *(h->reply_pool_head); /* Next cmd in ring buffer */
33346@@ -4046,7 +4048,7 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
33347 trans_support & CFGTBL_Trans_use_short_tags);
33348
33349 /* Change the access methods to the performant access methods */
33350- h->access = SA5_performant_access;
33351+ h->access = &SA5_performant_access;
33352 h->transMethod = CFGTBL_Trans_Performant;
33353
33354 return;
33355@@ -4319,7 +4321,7 @@ static int cciss_pci_init(ctlr_info_t *h)
33356 if (prod_index < 0)
33357 return -ENODEV;
33358 h->product_name = products[prod_index].product_name;
33359- h->access = *(products[prod_index].access);
33360+ h->access = products[prod_index].access;
33361
33362 if (cciss_board_disabled(h)) {
33363 dev_warn(&h->pdev->dev, "controller appears to be disabled\n");
33364@@ -5051,7 +5053,7 @@ reinit_after_soft_reset:
33365 }
33366
33367 /* make sure the board interrupts are off */
33368- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33369+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33370 rc = cciss_request_irq(h, do_cciss_msix_intr, do_cciss_intx);
33371 if (rc)
33372 goto clean2;
33373@@ -5101,7 +5103,7 @@ reinit_after_soft_reset:
33374 * fake ones to scoop up any residual completions.
33375 */
33376 spin_lock_irqsave(&h->lock, flags);
33377- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33378+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33379 spin_unlock_irqrestore(&h->lock, flags);
33380 free_irq(h->intr[h->intr_mode], h);
33381 rc = cciss_request_irq(h, cciss_msix_discard_completions,
33382@@ -5121,9 +5123,9 @@ reinit_after_soft_reset:
33383 dev_info(&h->pdev->dev, "Board READY.\n");
33384 dev_info(&h->pdev->dev,
33385 "Waiting for stale completions to drain.\n");
33386- h->access.set_intr_mask(h, CCISS_INTR_ON);
33387+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33388 msleep(10000);
33389- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33390+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33391
33392 rc = controller_reset_failed(h->cfgtable);
33393 if (rc)
33394@@ -5146,7 +5148,7 @@ reinit_after_soft_reset:
33395 cciss_scsi_setup(h);
33396
33397 /* Turn the interrupts on so we can service requests */
33398- h->access.set_intr_mask(h, CCISS_INTR_ON);
33399+ h->access->set_intr_mask(h, CCISS_INTR_ON);
33400
33401 /* Get the firmware version */
33402 inq_buff = kzalloc(sizeof(InquiryData_struct), GFP_KERNEL);
33403@@ -5218,7 +5220,7 @@ static void cciss_shutdown(struct pci_dev *pdev)
33404 kfree(flush_buf);
33405 if (return_code != IO_OK)
33406 dev_warn(&h->pdev->dev, "Error flushing cache\n");
33407- h->access.set_intr_mask(h, CCISS_INTR_OFF);
33408+ h->access->set_intr_mask(h, CCISS_INTR_OFF);
33409 free_irq(h->intr[h->intr_mode], h);
33410 }
33411
33412diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
33413index 7fda30e..eb5dfe0 100644
33414--- a/drivers/block/cciss.h
33415+++ b/drivers/block/cciss.h
33416@@ -101,7 +101,7 @@ struct ctlr_info
33417 /* information about each logical volume */
33418 drive_info_struct *drv[CISS_MAX_LUN];
33419
33420- struct access_method access;
33421+ struct access_method *access;
33422
33423 /* queue and queue Info */
33424 struct list_head reqQ;
33425diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
33426index 639d26b..fd6ad1f 100644
33427--- a/drivers/block/cpqarray.c
33428+++ b/drivers/block/cpqarray.c
33429@@ -404,7 +404,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33430 if (register_blkdev(COMPAQ_SMART2_MAJOR+i, hba[i]->devname)) {
33431 goto Enomem4;
33432 }
33433- hba[i]->access.set_intr_mask(hba[i], 0);
33434+ hba[i]->access->set_intr_mask(hba[i], 0);
33435 if (request_irq(hba[i]->intr, do_ida_intr,
33436 IRQF_DISABLED|IRQF_SHARED, hba[i]->devname, hba[i]))
33437 {
33438@@ -459,7 +459,7 @@ static int cpqarray_register_ctlr(int i, struct pci_dev *pdev)
33439 add_timer(&hba[i]->timer);
33440
33441 /* Enable IRQ now that spinlock and rate limit timer are set up */
33442- hba[i]->access.set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33443+ hba[i]->access->set_intr_mask(hba[i], FIFO_NOT_EMPTY);
33444
33445 for(j=0; j<NWD; j++) {
33446 struct gendisk *disk = ida_gendisk[i][j];
33447@@ -694,7 +694,7 @@ DBGINFO(
33448 for(i=0; i<NR_PRODUCTS; i++) {
33449 if (board_id == products[i].board_id) {
33450 c->product_name = products[i].product_name;
33451- c->access = *(products[i].access);
33452+ c->access = products[i].access;
33453 break;
33454 }
33455 }
33456@@ -792,7 +792,7 @@ static int cpqarray_eisa_detect(void)
33457 hba[ctlr]->intr = intr;
33458 sprintf(hba[ctlr]->devname, "ida%d", nr_ctlr);
33459 hba[ctlr]->product_name = products[j].product_name;
33460- hba[ctlr]->access = *(products[j].access);
33461+ hba[ctlr]->access = products[j].access;
33462 hba[ctlr]->ctlr = ctlr;
33463 hba[ctlr]->board_id = board_id;
33464 hba[ctlr]->pci_dev = NULL; /* not PCI */
33465@@ -978,7 +978,7 @@ static void start_io(ctlr_info_t *h)
33466
33467 while((c = h->reqQ) != NULL) {
33468 /* Can't do anything if we're busy */
33469- if (h->access.fifo_full(h) == 0)
33470+ if (h->access->fifo_full(h) == 0)
33471 return;
33472
33473 /* Get the first entry from the request Q */
33474@@ -986,7 +986,7 @@ static void start_io(ctlr_info_t *h)
33475 h->Qdepth--;
33476
33477 /* Tell the controller to do our bidding */
33478- h->access.submit_command(h, c);
33479+ h->access->submit_command(h, c);
33480
33481 /* Get onto the completion Q */
33482 addQ(&h->cmpQ, c);
33483@@ -1048,7 +1048,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33484 unsigned long flags;
33485 __u32 a,a1;
33486
33487- istat = h->access.intr_pending(h);
33488+ istat = h->access->intr_pending(h);
33489 /* Is this interrupt for us? */
33490 if (istat == 0)
33491 return IRQ_NONE;
33492@@ -1059,7 +1059,7 @@ static irqreturn_t do_ida_intr(int irq, void *dev_id)
33493 */
33494 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
33495 if (istat & FIFO_NOT_EMPTY) {
33496- while((a = h->access.command_completed(h))) {
33497+ while((a = h->access->command_completed(h))) {
33498 a1 = a; a &= ~3;
33499 if ((c = h->cmpQ) == NULL)
33500 {
33501@@ -1193,6 +1193,7 @@ out_passthru:
33502 ida_pci_info_struct pciinfo;
33503
33504 if (!arg) return -EINVAL;
33505+ memset(&pciinfo, 0, sizeof(pciinfo));
33506 pciinfo.bus = host->pci_dev->bus->number;
33507 pciinfo.dev_fn = host->pci_dev->devfn;
33508 pciinfo.board_id = host->board_id;
33509@@ -1447,11 +1448,11 @@ static int sendcmd(
33510 /*
33511 * Disable interrupt
33512 */
33513- info_p->access.set_intr_mask(info_p, 0);
33514+ info_p->access->set_intr_mask(info_p, 0);
33515 /* Make sure there is room in the command FIFO */
33516 /* Actually it should be completely empty at this time. */
33517 for (i = 200000; i > 0; i--) {
33518- temp = info_p->access.fifo_full(info_p);
33519+ temp = info_p->access->fifo_full(info_p);
33520 if (temp != 0) {
33521 break;
33522 }
33523@@ -1464,7 +1465,7 @@ DBG(
33524 /*
33525 * Send the cmd
33526 */
33527- info_p->access.submit_command(info_p, c);
33528+ info_p->access->submit_command(info_p, c);
33529 complete = pollcomplete(ctlr);
33530
33531 pci_unmap_single(info_p->pci_dev, (dma_addr_t) c->req.sg[0].addr,
33532@@ -1547,9 +1548,9 @@ static int revalidate_allvol(ctlr_info_t *host)
33533 * we check the new geometry. Then turn interrupts back on when
33534 * we're done.
33535 */
33536- host->access.set_intr_mask(host, 0);
33537+ host->access->set_intr_mask(host, 0);
33538 getgeometry(ctlr);
33539- host->access.set_intr_mask(host, FIFO_NOT_EMPTY);
33540+ host->access->set_intr_mask(host, FIFO_NOT_EMPTY);
33541
33542 for(i=0; i<NWD; i++) {
33543 struct gendisk *disk = ida_gendisk[ctlr][i];
33544@@ -1589,7 +1590,7 @@ static int pollcomplete(int ctlr)
33545 /* Wait (up to 2 seconds) for a command to complete */
33546
33547 for (i = 200000; i > 0; i--) {
33548- done = hba[ctlr]->access.command_completed(hba[ctlr]);
33549+ done = hba[ctlr]->access->command_completed(hba[ctlr]);
33550 if (done == 0) {
33551 udelay(10); /* a short fixed delay */
33552 } else
33553diff --git a/drivers/block/cpqarray.h b/drivers/block/cpqarray.h
33554index be73e9d..7fbf140 100644
33555--- a/drivers/block/cpqarray.h
33556+++ b/drivers/block/cpqarray.h
33557@@ -99,7 +99,7 @@ struct ctlr_info {
33558 drv_info_t drv[NWD];
33559 struct proc_dir_entry *proc;
33560
33561- struct access_method access;
33562+ struct access_method *access;
33563
33564 cmdlist_t *reqQ;
33565 cmdlist_t *cmpQ;
33566diff --git a/drivers/block/drbd/drbd_int.h b/drivers/block/drbd/drbd_int.h
33567index f943aac..99bfd19 100644
33568--- a/drivers/block/drbd/drbd_int.h
33569+++ b/drivers/block/drbd/drbd_int.h
33570@@ -582,7 +582,7 @@ struct drbd_epoch {
33571 struct drbd_tconn *tconn;
33572 struct list_head list;
33573 unsigned int barrier_nr;
33574- atomic_t epoch_size; /* increased on every request added. */
33575+ atomic_unchecked_t epoch_size; /* increased on every request added. */
33576 atomic_t active; /* increased on every req. added, and dec on every finished. */
33577 unsigned long flags;
33578 };
33579@@ -1021,7 +1021,7 @@ struct drbd_conf {
33580 unsigned int al_tr_number;
33581 int al_tr_cycle;
33582 wait_queue_head_t seq_wait;
33583- atomic_t packet_seq;
33584+ atomic_unchecked_t packet_seq;
33585 unsigned int peer_seq;
33586 spinlock_t peer_seq_lock;
33587 unsigned int minor;
33588@@ -1562,7 +1562,7 @@ static inline int drbd_setsockopt(struct socket *sock, int level, int optname,
33589 char __user *uoptval;
33590 int err;
33591
33592- uoptval = (char __user __force *)optval;
33593+ uoptval = (char __force_user *)optval;
33594
33595 set_fs(KERNEL_DS);
33596 if (level == SOL_SOCKET)
33597diff --git a/drivers/block/drbd/drbd_main.c b/drivers/block/drbd/drbd_main.c
33598index a5dca6a..bb27967 100644
33599--- a/drivers/block/drbd/drbd_main.c
33600+++ b/drivers/block/drbd/drbd_main.c
33601@@ -1317,7 +1317,7 @@ static int _drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
33602 p->sector = sector;
33603 p->block_id = block_id;
33604 p->blksize = blksize;
33605- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33606+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33607 return drbd_send_command(mdev, sock, cmd, sizeof(*p), NULL, 0);
33608 }
33609
33610@@ -1619,7 +1619,7 @@ int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req)
33611 return -EIO;
33612 p->sector = cpu_to_be64(req->i.sector);
33613 p->block_id = (unsigned long)req;
33614- p->seq_num = cpu_to_be32(atomic_inc_return(&mdev->packet_seq));
33615+ p->seq_num = cpu_to_be32(atomic_inc_return_unchecked(&mdev->packet_seq));
33616 dp_flags = bio_flags_to_wire(mdev, req->master_bio->bi_rw);
33617 if (mdev->state.conn >= C_SYNC_SOURCE &&
33618 mdev->state.conn <= C_PAUSED_SYNC_T)
33619@@ -2574,8 +2574,8 @@ void conn_destroy(struct kref *kref)
33620 {
33621 struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref);
33622
33623- if (atomic_read(&tconn->current_epoch->epoch_size) != 0)
33624- conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size));
33625+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size) != 0)
33626+ conn_err(tconn, "epoch_size:%d\n", atomic_read_unchecked(&tconn->current_epoch->epoch_size));
33627 kfree(tconn->current_epoch);
33628
33629 idr_destroy(&tconn->volumes);
33630diff --git a/drivers/block/drbd/drbd_receiver.c b/drivers/block/drbd/drbd_receiver.c
33631index 4222aff..1f79506 100644
33632--- a/drivers/block/drbd/drbd_receiver.c
33633+++ b/drivers/block/drbd/drbd_receiver.c
33634@@ -834,7 +834,7 @@ int drbd_connected(struct drbd_conf *mdev)
33635 {
33636 int err;
33637
33638- atomic_set(&mdev->packet_seq, 0);
33639+ atomic_set_unchecked(&mdev->packet_seq, 0);
33640 mdev->peer_seq = 0;
33641
33642 mdev->state_mutex = mdev->tconn->agreed_pro_version < 100 ?
33643@@ -1193,7 +1193,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33644 do {
33645 next_epoch = NULL;
33646
33647- epoch_size = atomic_read(&epoch->epoch_size);
33648+ epoch_size = atomic_read_unchecked(&epoch->epoch_size);
33649
33650 switch (ev & ~EV_CLEANUP) {
33651 case EV_PUT:
33652@@ -1233,7 +1233,7 @@ static enum finish_epoch drbd_may_finish_epoch(struct drbd_tconn *tconn,
33653 rv = FE_DESTROYED;
33654 } else {
33655 epoch->flags = 0;
33656- atomic_set(&epoch->epoch_size, 0);
33657+ atomic_set_unchecked(&epoch->epoch_size, 0);
33658 /* atomic_set(&epoch->active, 0); is already zero */
33659 if (rv == FE_STILL_LIVE)
33660 rv = FE_RECYCLED;
33661@@ -1451,7 +1451,7 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33662 conn_wait_active_ee_empty(tconn);
33663 drbd_flush(tconn);
33664
33665- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33666+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33667 epoch = kmalloc(sizeof(struct drbd_epoch), GFP_NOIO);
33668 if (epoch)
33669 break;
33670@@ -1464,11 +1464,11 @@ static int receive_Barrier(struct drbd_tconn *tconn, struct packet_info *pi)
33671 }
33672
33673 epoch->flags = 0;
33674- atomic_set(&epoch->epoch_size, 0);
33675+ atomic_set_unchecked(&epoch->epoch_size, 0);
33676 atomic_set(&epoch->active, 0);
33677
33678 spin_lock(&tconn->epoch_lock);
33679- if (atomic_read(&tconn->current_epoch->epoch_size)) {
33680+ if (atomic_read_unchecked(&tconn->current_epoch->epoch_size)) {
33681 list_add(&epoch->list, &tconn->current_epoch->list);
33682 tconn->current_epoch = epoch;
33683 tconn->epochs++;
33684@@ -2172,7 +2172,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33685
33686 err = wait_for_and_update_peer_seq(mdev, peer_seq);
33687 drbd_send_ack_dp(mdev, P_NEG_ACK, p, pi->size);
33688- atomic_inc(&tconn->current_epoch->epoch_size);
33689+ atomic_inc_unchecked(&tconn->current_epoch->epoch_size);
33690 err2 = drbd_drain_block(mdev, pi->size);
33691 if (!err)
33692 err = err2;
33693@@ -2206,7 +2206,7 @@ static int receive_Data(struct drbd_tconn *tconn, struct packet_info *pi)
33694
33695 spin_lock(&tconn->epoch_lock);
33696 peer_req->epoch = tconn->current_epoch;
33697- atomic_inc(&peer_req->epoch->epoch_size);
33698+ atomic_inc_unchecked(&peer_req->epoch->epoch_size);
33699 atomic_inc(&peer_req->epoch->active);
33700 spin_unlock(&tconn->epoch_lock);
33701
33702@@ -4347,7 +4347,7 @@ struct data_cmd {
33703 int expect_payload;
33704 size_t pkt_size;
33705 int (*fn)(struct drbd_tconn *, struct packet_info *);
33706-};
33707+} __do_const;
33708
33709 static struct data_cmd drbd_cmd_handler[] = {
33710 [P_DATA] = { 1, sizeof(struct p_data), receive_Data },
33711@@ -4467,7 +4467,7 @@ static void conn_disconnect(struct drbd_tconn *tconn)
33712 if (!list_empty(&tconn->current_epoch->list))
33713 conn_err(tconn, "ASSERTION FAILED: tconn->current_epoch->list not empty\n");
33714 /* ok, no more ee's on the fly, it is safe to reset the epoch_size */
33715- atomic_set(&tconn->current_epoch->epoch_size, 0);
33716+ atomic_set_unchecked(&tconn->current_epoch->epoch_size, 0);
33717 tconn->send.seen_any_write_yet = false;
33718
33719 conn_info(tconn, "Connection closed\n");
33720@@ -5223,7 +5223,7 @@ static int tconn_finish_peer_reqs(struct drbd_tconn *tconn)
33721 struct asender_cmd {
33722 size_t pkt_size;
33723 int (*fn)(struct drbd_tconn *tconn, struct packet_info *);
33724-};
33725+} __do_const;
33726
33727 static struct asender_cmd asender_tbl[] = {
33728 [P_PING] = { 0, got_Ping },
33729diff --git a/drivers/block/loop.c b/drivers/block/loop.c
33730index d92d50f..a7e9d97 100644
33731--- a/drivers/block/loop.c
33732+++ b/drivers/block/loop.c
33733@@ -232,7 +232,7 @@ static int __do_lo_send_write(struct file *file,
33734
33735 file_start_write(file);
33736 set_fs(get_ds());
33737- bw = file->f_op->write(file, buf, len, &pos);
33738+ bw = file->f_op->write(file, (const char __force_user *)buf, len, &pos);
33739 set_fs(old_fs);
33740 file_end_write(file);
33741 if (likely(bw == len))
33742diff --git a/drivers/block/pktcdvd.c b/drivers/block/pktcdvd.c
33743index f5d0ea1..c62380a 100644
33744--- a/drivers/block/pktcdvd.c
33745+++ b/drivers/block/pktcdvd.c
33746@@ -84,7 +84,7 @@
33747 #define MAX_SPEED 0xffff
33748
33749 #define ZONE(sector, pd) (((sector) + (pd)->offset) & \
33750- ~(sector_t)((pd)->settings.size - 1))
33751+ ~(sector_t)((pd)->settings.size - 1UL))
33752
33753 static DEFINE_MUTEX(pktcdvd_mutex);
33754 static struct pktcdvd_device *pkt_devs[MAX_WRITERS];
33755diff --git a/drivers/cdrom/cdrom.c b/drivers/cdrom/cdrom.c
33756index 8a3aff7..d7538c2 100644
33757--- a/drivers/cdrom/cdrom.c
33758+++ b/drivers/cdrom/cdrom.c
33759@@ -416,7 +416,6 @@ int register_cdrom(struct cdrom_device_info *cdi)
33760 ENSURE(reset, CDC_RESET);
33761 ENSURE(generic_packet, CDC_GENERIC_PACKET);
33762 cdi->mc_flags = 0;
33763- cdo->n_minors = 0;
33764 cdi->options = CDO_USE_FFLAGS;
33765
33766 if (autoclose==1 && CDROM_CAN(CDC_CLOSE_TRAY))
33767@@ -436,8 +435,11 @@ int register_cdrom(struct cdrom_device_info *cdi)
33768 else
33769 cdi->cdda_method = CDDA_OLD;
33770
33771- if (!cdo->generic_packet)
33772- cdo->generic_packet = cdrom_dummy_generic_packet;
33773+ if (!cdo->generic_packet) {
33774+ pax_open_kernel();
33775+ *(void **)&cdo->generic_packet = cdrom_dummy_generic_packet;
33776+ pax_close_kernel();
33777+ }
33778
33779 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" registered\n", cdi->name);
33780 mutex_lock(&cdrom_mutex);
33781@@ -458,7 +460,6 @@ void unregister_cdrom(struct cdrom_device_info *cdi)
33782 if (cdi->exit)
33783 cdi->exit(cdi);
33784
33785- cdi->ops->n_minors--;
33786 cdinfo(CD_REG_UNREG, "drive \"/dev/%s\" unregistered\n", cdi->name);
33787 }
33788
33789@@ -2107,7 +2108,7 @@ static int cdrom_read_cdda_old(struct cdrom_device_info *cdi, __u8 __user *ubuf,
33790 */
33791 nr = nframes;
33792 do {
33793- cgc.buffer = kmalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
33794+ cgc.buffer = kzalloc(CD_FRAMESIZE_RAW * nr, GFP_KERNEL);
33795 if (cgc.buffer)
33796 break;
33797
33798@@ -3429,7 +3430,7 @@ static int cdrom_print_info(const char *header, int val, char *info,
33799 struct cdrom_device_info *cdi;
33800 int ret;
33801
33802- ret = scnprintf(info + *pos, max_size - *pos, header);
33803+ ret = scnprintf(info + *pos, max_size - *pos, "%s", header);
33804 if (!ret)
33805 return 1;
33806
33807diff --git a/drivers/cdrom/gdrom.c b/drivers/cdrom/gdrom.c
33808index 4afcb65..a68a32d 100644
33809--- a/drivers/cdrom/gdrom.c
33810+++ b/drivers/cdrom/gdrom.c
33811@@ -491,7 +491,6 @@ static struct cdrom_device_ops gdrom_ops = {
33812 .audio_ioctl = gdrom_audio_ioctl,
33813 .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
33814 CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
33815- .n_minors = 1,
33816 };
33817
33818 static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
33819diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
33820index 3bb6fa3..34013fb 100644
33821--- a/drivers/char/Kconfig
33822+++ b/drivers/char/Kconfig
33823@@ -8,7 +8,8 @@ source "drivers/tty/Kconfig"
33824
33825 config DEVKMEM
33826 bool "/dev/kmem virtual device support"
33827- default y
33828+ default n
33829+ depends on !GRKERNSEC_KMEM
33830 help
33831 Say Y here if you want to support the /dev/kmem device. The
33832 /dev/kmem device is rarely used, but can be used for certain
33833@@ -582,6 +583,7 @@ config DEVPORT
33834 bool
33835 depends on !M68K
33836 depends on ISA || PCI
33837+ depends on !GRKERNSEC_KMEM
33838 default y
33839
33840 source "drivers/s390/char/Kconfig"
33841diff --git a/drivers/char/agp/compat_ioctl.c b/drivers/char/agp/compat_ioctl.c
33842index a48e05b..6bac831 100644
33843--- a/drivers/char/agp/compat_ioctl.c
33844+++ b/drivers/char/agp/compat_ioctl.c
33845@@ -108,7 +108,7 @@ static int compat_agpioc_reserve_wrap(struct agp_file_private *priv, void __user
33846 return -ENOMEM;
33847 }
33848
33849- if (copy_from_user(usegment, (void __user *) ureserve.seg_list,
33850+ if (copy_from_user(usegment, (void __force_user *) ureserve.seg_list,
33851 sizeof(*usegment) * ureserve.seg_count)) {
33852 kfree(usegment);
33853 kfree(ksegment);
33854diff --git a/drivers/char/agp/frontend.c b/drivers/char/agp/frontend.c
33855index 2e04433..771f2cc 100644
33856--- a/drivers/char/agp/frontend.c
33857+++ b/drivers/char/agp/frontend.c
33858@@ -817,7 +817,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33859 if (copy_from_user(&reserve, arg, sizeof(struct agp_region)))
33860 return -EFAULT;
33861
33862- if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment))
33863+ if ((unsigned) reserve.seg_count >= ~0U/sizeof(struct agp_segment_priv))
33864 return -EFAULT;
33865
33866 client = agp_find_client_by_pid(reserve.pid);
33867@@ -847,7 +847,7 @@ static int agpioc_reserve_wrap(struct agp_file_private *priv, void __user *arg)
33868 if (segment == NULL)
33869 return -ENOMEM;
33870
33871- if (copy_from_user(segment, (void __user *) reserve.seg_list,
33872+ if (copy_from_user(segment, (void __force_user *) reserve.seg_list,
33873 sizeof(struct agp_segment) * reserve.seg_count)) {
33874 kfree(segment);
33875 return -EFAULT;
33876diff --git a/drivers/char/genrtc.c b/drivers/char/genrtc.c
33877index 4f94375..413694e 100644
33878--- a/drivers/char/genrtc.c
33879+++ b/drivers/char/genrtc.c
33880@@ -273,6 +273,7 @@ static int gen_rtc_ioctl(struct file *file,
33881 switch (cmd) {
33882
33883 case RTC_PLL_GET:
33884+ memset(&pll, 0, sizeof(pll));
33885 if (get_rtc_pll(&pll))
33886 return -EINVAL;
33887 else
33888diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
33889index d784650..e8bfd69 100644
33890--- a/drivers/char/hpet.c
33891+++ b/drivers/char/hpet.c
33892@@ -559,7 +559,7 @@ static inline unsigned long hpet_time_div(struct hpets *hpets,
33893 }
33894
33895 static int
33896-hpet_ioctl_common(struct hpet_dev *devp, int cmd, unsigned long arg,
33897+hpet_ioctl_common(struct hpet_dev *devp, unsigned int cmd, unsigned long arg,
33898 struct hpet_info *info)
33899 {
33900 struct hpet_timer __iomem *timer;
33901diff --git a/drivers/char/hw_random/intel-rng.c b/drivers/char/hw_random/intel-rng.c
33902index 86fe45c..c0ea948 100644
33903--- a/drivers/char/hw_random/intel-rng.c
33904+++ b/drivers/char/hw_random/intel-rng.c
33905@@ -314,7 +314,7 @@ PFX "RNG, try using the 'no_fwh_detect' option.\n";
33906
33907 if (no_fwh_detect)
33908 return -ENODEV;
33909- printk(warning);
33910+ printk("%s", warning);
33911 return -EBUSY;
33912 }
33913
33914diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
33915index 4445fa1..7c6de37 100644
33916--- a/drivers/char/ipmi/ipmi_msghandler.c
33917+++ b/drivers/char/ipmi/ipmi_msghandler.c
33918@@ -420,7 +420,7 @@ struct ipmi_smi {
33919 struct proc_dir_entry *proc_dir;
33920 char proc_dir_name[10];
33921
33922- atomic_t stats[IPMI_NUM_STATS];
33923+ atomic_unchecked_t stats[IPMI_NUM_STATS];
33924
33925 /*
33926 * run_to_completion duplicate of smb_info, smi_info
33927@@ -453,9 +453,9 @@ static DEFINE_MUTEX(smi_watchers_mutex);
33928
33929
33930 #define ipmi_inc_stat(intf, stat) \
33931- atomic_inc(&(intf)->stats[IPMI_STAT_ ## stat])
33932+ atomic_inc_unchecked(&(intf)->stats[IPMI_STAT_ ## stat])
33933 #define ipmi_get_stat(intf, stat) \
33934- ((unsigned int) atomic_read(&(intf)->stats[IPMI_STAT_ ## stat]))
33935+ ((unsigned int) atomic_read_unchecked(&(intf)->stats[IPMI_STAT_ ## stat]))
33936
33937 static int is_lan_addr(struct ipmi_addr *addr)
33938 {
33939@@ -2883,7 +2883,7 @@ int ipmi_register_smi(struct ipmi_smi_handlers *handlers,
33940 INIT_LIST_HEAD(&intf->cmd_rcvrs);
33941 init_waitqueue_head(&intf->waitq);
33942 for (i = 0; i < IPMI_NUM_STATS; i++)
33943- atomic_set(&intf->stats[i], 0);
33944+ atomic_set_unchecked(&intf->stats[i], 0);
33945
33946 intf->proc_dir = NULL;
33947
33948diff --git a/drivers/char/ipmi/ipmi_si_intf.c b/drivers/char/ipmi/ipmi_si_intf.c
33949index af4b23f..79806fc 100644
33950--- a/drivers/char/ipmi/ipmi_si_intf.c
33951+++ b/drivers/char/ipmi/ipmi_si_intf.c
33952@@ -275,7 +275,7 @@ struct smi_info {
33953 unsigned char slave_addr;
33954
33955 /* Counters and things for the proc filesystem. */
33956- atomic_t stats[SI_NUM_STATS];
33957+ atomic_unchecked_t stats[SI_NUM_STATS];
33958
33959 struct task_struct *thread;
33960
33961@@ -284,9 +284,9 @@ struct smi_info {
33962 };
33963
33964 #define smi_inc_stat(smi, stat) \
33965- atomic_inc(&(smi)->stats[SI_STAT_ ## stat])
33966+ atomic_inc_unchecked(&(smi)->stats[SI_STAT_ ## stat])
33967 #define smi_get_stat(smi, stat) \
33968- ((unsigned int) atomic_read(&(smi)->stats[SI_STAT_ ## stat]))
33969+ ((unsigned int) atomic_read_unchecked(&(smi)->stats[SI_STAT_ ## stat]))
33970
33971 #define SI_MAX_PARMS 4
33972
33973@@ -3258,7 +3258,7 @@ static int try_smi_init(struct smi_info *new_smi)
33974 atomic_set(&new_smi->req_events, 0);
33975 new_smi->run_to_completion = 0;
33976 for (i = 0; i < SI_NUM_STATS; i++)
33977- atomic_set(&new_smi->stats[i], 0);
33978+ atomic_set_unchecked(&new_smi->stats[i], 0);
33979
33980 new_smi->interrupt_disabled = 1;
33981 atomic_set(&new_smi->stop_operation, 0);
33982diff --git a/drivers/char/mem.c b/drivers/char/mem.c
33983index 1ccbe94..6ad651a 100644
33984--- a/drivers/char/mem.c
33985+++ b/drivers/char/mem.c
33986@@ -18,6 +18,7 @@
33987 #include <linux/raw.h>
33988 #include <linux/tty.h>
33989 #include <linux/capability.h>
33990+#include <linux/security.h>
33991 #include <linux/ptrace.h>
33992 #include <linux/device.h>
33993 #include <linux/highmem.h>
33994@@ -38,6 +39,10 @@
33995
33996 #define DEVPORT_MINOR 4
33997
33998+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
33999+extern const struct file_operations grsec_fops;
34000+#endif
34001+
34002 static inline unsigned long size_inside_page(unsigned long start,
34003 unsigned long size)
34004 {
34005@@ -69,9 +74,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34006
34007 while (cursor < to) {
34008 if (!devmem_is_allowed(pfn)) {
34009+#ifdef CONFIG_GRKERNSEC_KMEM
34010+ gr_handle_mem_readwrite(from, to);
34011+#else
34012 printk(KERN_INFO
34013 "Program %s tried to access /dev/mem between %Lx->%Lx.\n",
34014 current->comm, from, to);
34015+#endif
34016 return 0;
34017 }
34018 cursor += PAGE_SIZE;
34019@@ -79,6 +88,11 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34020 }
34021 return 1;
34022 }
34023+#elif defined(CONFIG_GRKERNSEC_KMEM)
34024+static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34025+{
34026+ return 0;
34027+}
34028 #else
34029 static inline int range_is_allowed(unsigned long pfn, unsigned long size)
34030 {
34031@@ -121,6 +135,7 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34032
34033 while (count > 0) {
34034 unsigned long remaining;
34035+ char *temp;
34036
34037 sz = size_inside_page(p, count);
34038
34039@@ -136,7 +151,23 @@ static ssize_t read_mem(struct file *file, char __user *buf,
34040 if (!ptr)
34041 return -EFAULT;
34042
34043- remaining = copy_to_user(buf, ptr, sz);
34044+#ifdef CONFIG_PAX_USERCOPY
34045+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34046+ if (!temp) {
34047+ unxlate_dev_mem_ptr(p, ptr);
34048+ return -ENOMEM;
34049+ }
34050+ memcpy(temp, ptr, sz);
34051+#else
34052+ temp = ptr;
34053+#endif
34054+
34055+ remaining = copy_to_user(buf, temp, sz);
34056+
34057+#ifdef CONFIG_PAX_USERCOPY
34058+ kfree(temp);
34059+#endif
34060+
34061 unxlate_dev_mem_ptr(p, ptr);
34062 if (remaining)
34063 return -EFAULT;
34064@@ -379,7 +410,7 @@ static ssize_t read_oldmem(struct file *file, char __user *buf,
34065 else
34066 csize = count;
34067
34068- rc = copy_oldmem_page(pfn, buf, csize, offset, 1);
34069+ rc = copy_oldmem_page(pfn, (char __force_kernel *)buf, csize, offset, 1);
34070 if (rc < 0)
34071 return rc;
34072 buf += csize;
34073@@ -399,9 +430,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34074 size_t count, loff_t *ppos)
34075 {
34076 unsigned long p = *ppos;
34077- ssize_t low_count, read, sz;
34078+ ssize_t low_count, read, sz, err = 0;
34079 char *kbuf; /* k-addr because vread() takes vmlist_lock rwlock */
34080- int err = 0;
34081
34082 read = 0;
34083 if (p < (unsigned long) high_memory) {
34084@@ -423,6 +453,8 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34085 }
34086 #endif
34087 while (low_count > 0) {
34088+ char *temp;
34089+
34090 sz = size_inside_page(p, low_count);
34091
34092 /*
34093@@ -432,7 +464,22 @@ static ssize_t read_kmem(struct file *file, char __user *buf,
34094 */
34095 kbuf = xlate_dev_kmem_ptr((char *)p);
34096
34097- if (copy_to_user(buf, kbuf, sz))
34098+#ifdef CONFIG_PAX_USERCOPY
34099+ temp = kmalloc(sz, GFP_KERNEL|GFP_USERCOPY);
34100+ if (!temp)
34101+ return -ENOMEM;
34102+ memcpy(temp, kbuf, sz);
34103+#else
34104+ temp = kbuf;
34105+#endif
34106+
34107+ err = copy_to_user(buf, temp, sz);
34108+
34109+#ifdef CONFIG_PAX_USERCOPY
34110+ kfree(temp);
34111+#endif
34112+
34113+ if (err)
34114 return -EFAULT;
34115 buf += sz;
34116 p += sz;
34117@@ -869,6 +916,9 @@ static const struct memdev {
34118 #ifdef CONFIG_CRASH_DUMP
34119 [12] = { "oldmem", 0, &oldmem_fops, NULL },
34120 #endif
34121+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
34122+ [13] = { "grsec",S_IRUSR | S_IWUGO, &grsec_fops, NULL },
34123+#endif
34124 };
34125
34126 static int memory_open(struct inode *inode, struct file *filp)
34127@@ -940,7 +990,7 @@ static int __init chr_dev_init(void)
34128 continue;
34129
34130 device_create(mem_class, NULL, MKDEV(MEM_MAJOR, minor),
34131- NULL, devlist[minor].name);
34132+ NULL, "%s", devlist[minor].name);
34133 }
34134
34135 return tty_init();
34136diff --git a/drivers/char/mwave/tp3780i.c b/drivers/char/mwave/tp3780i.c
34137index c689697..04e6d6a2 100644
34138--- a/drivers/char/mwave/tp3780i.c
34139+++ b/drivers/char/mwave/tp3780i.c
34140@@ -479,6 +479,7 @@ int tp3780I_QueryAbilities(THINKPAD_BD_DATA * pBDData, MW_ABILITIES * pAbilities
34141 PRINTK_2(TRACE_TP3780I,
34142 "tp3780i::tp3780I_QueryAbilities entry pBDData %p\n", pBDData);
34143
34144+ memset(pAbilities, 0, sizeof(*pAbilities));
34145 /* fill out standard constant fields */
34146 pAbilities->instr_per_sec = pBDData->rDspSettings.uIps;
34147 pAbilities->data_size = pBDData->rDspSettings.uDStoreSize;
34148diff --git a/drivers/char/nvram.c b/drivers/char/nvram.c
34149index 9df78e2..01ba9ae 100644
34150--- a/drivers/char/nvram.c
34151+++ b/drivers/char/nvram.c
34152@@ -247,7 +247,7 @@ static ssize_t nvram_read(struct file *file, char __user *buf,
34153
34154 spin_unlock_irq(&rtc_lock);
34155
34156- if (copy_to_user(buf, contents, tmp - contents))
34157+ if (tmp - contents > sizeof(contents) || copy_to_user(buf, contents, tmp - contents))
34158 return -EFAULT;
34159
34160 *ppos = i;
34161diff --git a/drivers/char/pcmcia/synclink_cs.c b/drivers/char/pcmcia/synclink_cs.c
34162index 5c5cc00..ac9edb7 100644
34163--- a/drivers/char/pcmcia/synclink_cs.c
34164+++ b/drivers/char/pcmcia/synclink_cs.c
34165@@ -2345,9 +2345,9 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34166
34167 if (debug_level >= DEBUG_LEVEL_INFO)
34168 printk("%s(%d):mgslpc_close(%s) entry, count=%d\n",
34169- __FILE__, __LINE__, info->device_name, port->count);
34170+ __FILE__, __LINE__, info->device_name, atomic_read(&port->count));
34171
34172- WARN_ON(!port->count);
34173+ WARN_ON(!atomic_read(&port->count));
34174
34175 if (tty_port_close_start(port, tty, filp) == 0)
34176 goto cleanup;
34177@@ -2365,7 +2365,7 @@ static void mgslpc_close(struct tty_struct *tty, struct file * filp)
34178 cleanup:
34179 if (debug_level >= DEBUG_LEVEL_INFO)
34180 printk("%s(%d):mgslpc_close(%s) exit, count=%d\n", __FILE__, __LINE__,
34181- tty->driver->name, port->count);
34182+ tty->driver->name, atomic_read(&port->count));
34183 }
34184
34185 /* Wait until the transmitter is empty.
34186@@ -2507,7 +2507,7 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34187
34188 if (debug_level >= DEBUG_LEVEL_INFO)
34189 printk("%s(%d):mgslpc_open(%s), old ref count = %d\n",
34190- __FILE__, __LINE__, tty->driver->name, port->count);
34191+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
34192
34193 /* If port is closing, signal caller to try again */
34194 if (tty_hung_up_p(filp) || port->flags & ASYNC_CLOSING){
34195@@ -2527,11 +2527,11 @@ static int mgslpc_open(struct tty_struct *tty, struct file * filp)
34196 goto cleanup;
34197 }
34198 spin_lock(&port->lock);
34199- port->count++;
34200+ atomic_inc(&port->count);
34201 spin_unlock(&port->lock);
34202 spin_unlock_irqrestore(&info->netlock, flags);
34203
34204- if (port->count == 1) {
34205+ if (atomic_read(&port->count) == 1) {
34206 /* 1st open on this device, init hardware */
34207 retval = startup(info, tty);
34208 if (retval < 0)
34209@@ -3920,7 +3920,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
34210 unsigned short new_crctype;
34211
34212 /* return error if TTY interface open */
34213- if (info->port.count)
34214+ if (atomic_read(&info->port.count))
34215 return -EBUSY;
34216
34217 switch (encoding)
34218@@ -4024,7 +4024,7 @@ static int hdlcdev_open(struct net_device *dev)
34219
34220 /* arbitrate between network and tty opens */
34221 spin_lock_irqsave(&info->netlock, flags);
34222- if (info->port.count != 0 || info->netcount != 0) {
34223+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
34224 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
34225 spin_unlock_irqrestore(&info->netlock, flags);
34226 return -EBUSY;
34227@@ -4114,7 +4114,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
34228 printk("%s:hdlcdev_ioctl(%s)\n", __FILE__, dev->name);
34229
34230 /* return error if TTY interface open */
34231- if (info->port.count)
34232+ if (atomic_read(&info->port.count))
34233 return -EBUSY;
34234
34235 if (cmd != SIOCWANDEV)
34236diff --git a/drivers/char/random.c b/drivers/char/random.c
34237index 35487e8..dac8bd1 100644
34238--- a/drivers/char/random.c
34239+++ b/drivers/char/random.c
34240@@ -272,8 +272,13 @@
34241 /*
34242 * Configuration information
34243 */
34244+#ifdef CONFIG_GRKERNSEC_RANDNET
34245+#define INPUT_POOL_WORDS 512
34246+#define OUTPUT_POOL_WORDS 128
34247+#else
34248 #define INPUT_POOL_WORDS 128
34249 #define OUTPUT_POOL_WORDS 32
34250+#endif
34251 #define SEC_XFER_SIZE 512
34252 #define EXTRACT_SIZE 10
34253
34254@@ -313,10 +318,17 @@ static struct poolinfo {
34255 int poolwords;
34256 int tap1, tap2, tap3, tap4, tap5;
34257 } poolinfo_table[] = {
34258+#ifdef CONFIG_GRKERNSEC_RANDNET
34259+ /* x^512 + x^411 + x^308 + x^208 +x^104 + x + 1 -- 225 */
34260+ { 512, 411, 308, 208, 104, 1 },
34261+ /* x^128 + x^103 + x^76 + x^51 + x^25 + x + 1 -- 105 */
34262+ { 128, 103, 76, 51, 25, 1 },
34263+#else
34264 /* x^128 + x^103 + x^76 + x^51 +x^25 + x + 1 -- 105 */
34265 { 128, 103, 76, 51, 25, 1 },
34266 /* x^32 + x^26 + x^20 + x^14 + x^7 + x + 1 -- 15 */
34267 { 32, 26, 20, 14, 7, 1 },
34268+#endif
34269 #if 0
34270 /* x^2048 + x^1638 + x^1231 + x^819 + x^411 + x + 1 -- 115 */
34271 { 2048, 1638, 1231, 819, 411, 1 },
34272@@ -524,8 +536,8 @@ static void _mix_pool_bytes(struct entropy_store *r, const void *in,
34273 input_rotate += i ? 7 : 14;
34274 }
34275
34276- ACCESS_ONCE(r->input_rotate) = input_rotate;
34277- ACCESS_ONCE(r->add_ptr) = i;
34278+ ACCESS_ONCE_RW(r->input_rotate) = input_rotate;
34279+ ACCESS_ONCE_RW(r->add_ptr) = i;
34280 smp_wmb();
34281
34282 if (out)
34283@@ -1032,7 +1044,7 @@ static ssize_t extract_entropy_user(struct entropy_store *r, void __user *buf,
34284
34285 extract_buf(r, tmp);
34286 i = min_t(int, nbytes, EXTRACT_SIZE);
34287- if (copy_to_user(buf, tmp, i)) {
34288+ if (i > sizeof(tmp) || copy_to_user(buf, tmp, i)) {
34289 ret = -EFAULT;
34290 break;
34291 }
34292@@ -1368,7 +1380,7 @@ EXPORT_SYMBOL(generate_random_uuid);
34293 #include <linux/sysctl.h>
34294
34295 static int min_read_thresh = 8, min_write_thresh;
34296-static int max_read_thresh = INPUT_POOL_WORDS * 32;
34297+static int max_read_thresh = OUTPUT_POOL_WORDS * 32;
34298 static int max_write_thresh = INPUT_POOL_WORDS * 32;
34299 static char sysctl_bootid[16];
34300
34301@@ -1384,7 +1396,7 @@ static char sysctl_bootid[16];
34302 static int proc_do_uuid(ctl_table *table, int write,
34303 void __user *buffer, size_t *lenp, loff_t *ppos)
34304 {
34305- ctl_table fake_table;
34306+ ctl_table_no_const fake_table;
34307 unsigned char buf[64], tmp_uuid[16], *uuid;
34308
34309 uuid = table->data;
34310diff --git a/drivers/char/sonypi.c b/drivers/char/sonypi.c
34311index bf2349db..5456d53 100644
34312--- a/drivers/char/sonypi.c
34313+++ b/drivers/char/sonypi.c
34314@@ -54,6 +54,7 @@
34315
34316 #include <asm/uaccess.h>
34317 #include <asm/io.h>
34318+#include <asm/local.h>
34319
34320 #include <linux/sonypi.h>
34321
34322@@ -490,7 +491,7 @@ static struct sonypi_device {
34323 spinlock_t fifo_lock;
34324 wait_queue_head_t fifo_proc_list;
34325 struct fasync_struct *fifo_async;
34326- int open_count;
34327+ local_t open_count;
34328 int model;
34329 struct input_dev *input_jog_dev;
34330 struct input_dev *input_key_dev;
34331@@ -897,7 +898,7 @@ static int sonypi_misc_fasync(int fd, struct file *filp, int on)
34332 static int sonypi_misc_release(struct inode *inode, struct file *file)
34333 {
34334 mutex_lock(&sonypi_device.lock);
34335- sonypi_device.open_count--;
34336+ local_dec(&sonypi_device.open_count);
34337 mutex_unlock(&sonypi_device.lock);
34338 return 0;
34339 }
34340@@ -906,9 +907,9 @@ static int sonypi_misc_open(struct inode *inode, struct file *file)
34341 {
34342 mutex_lock(&sonypi_device.lock);
34343 /* Flush input queue on first open */
34344- if (!sonypi_device.open_count)
34345+ if (!local_read(&sonypi_device.open_count))
34346 kfifo_reset(&sonypi_device.fifo);
34347- sonypi_device.open_count++;
34348+ local_inc(&sonypi_device.open_count);
34349 mutex_unlock(&sonypi_device.lock);
34350
34351 return 0;
34352diff --git a/drivers/char/tpm/tpm_acpi.c b/drivers/char/tpm/tpm_acpi.c
34353index 64420b3..5c40b56 100644
34354--- a/drivers/char/tpm/tpm_acpi.c
34355+++ b/drivers/char/tpm/tpm_acpi.c
34356@@ -98,11 +98,12 @@ int read_log(struct tpm_bios_log *log)
34357 virt = acpi_os_map_memory(start, len);
34358 if (!virt) {
34359 kfree(log->bios_event_log);
34360+ log->bios_event_log = NULL;
34361 printk("%s: ERROR - Unable to map memory\n", __func__);
34362 return -EIO;
34363 }
34364
34365- memcpy_fromio(log->bios_event_log, virt, len);
34366+ memcpy_fromio(log->bios_event_log, (const char __force_kernel *)virt, len);
34367
34368 acpi_os_unmap_memory(virt, len);
34369 return 0;
34370diff --git a/drivers/char/tpm/tpm_eventlog.c b/drivers/char/tpm/tpm_eventlog.c
34371index 84ddc55..1d32f1e 100644
34372--- a/drivers/char/tpm/tpm_eventlog.c
34373+++ b/drivers/char/tpm/tpm_eventlog.c
34374@@ -95,7 +95,7 @@ static void *tpm_bios_measurements_start(struct seq_file *m, loff_t *pos)
34375 event = addr;
34376
34377 if ((event->event_type == 0 && event->event_size == 0) ||
34378- ((addr + sizeof(struct tcpa_event) + event->event_size) >= limit))
34379+ (event->event_size >= limit - addr - sizeof(struct tcpa_event)))
34380 return NULL;
34381
34382 return addr;
34383@@ -120,7 +120,7 @@ static void *tpm_bios_measurements_next(struct seq_file *m, void *v,
34384 return NULL;
34385
34386 if ((event->event_type == 0 && event->event_size == 0) ||
34387- ((v + sizeof(struct tcpa_event) + event->event_size) >= limit))
34388+ (event->event_size >= limit - v - sizeof(struct tcpa_event)))
34389 return NULL;
34390
34391 (*pos)++;
34392@@ -213,7 +213,8 @@ static int tpm_binary_bios_measurements_show(struct seq_file *m, void *v)
34393 int i;
34394
34395 for (i = 0; i < sizeof(struct tcpa_event) + event->event_size; i++)
34396- seq_putc(m, data[i]);
34397+ if (!seq_putc(m, data[i]))
34398+ return -EFAULT;
34399
34400 return 0;
34401 }
34402diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c
34403index 1b456fe..2510242 100644
34404--- a/drivers/char/virtio_console.c
34405+++ b/drivers/char/virtio_console.c
34406@@ -679,7 +679,7 @@ static ssize_t fill_readbuf(struct port *port, char *out_buf, size_t out_count,
34407 if (to_user) {
34408 ssize_t ret;
34409
34410- ret = copy_to_user(out_buf, buf->buf + buf->offset, out_count);
34411+ ret = copy_to_user((char __force_user *)out_buf, buf->buf + buf->offset, out_count);
34412 if (ret)
34413 return -EFAULT;
34414 } else {
34415@@ -778,7 +778,7 @@ static ssize_t port_fops_read(struct file *filp, char __user *ubuf,
34416 if (!port_has_data(port) && !port->host_connected)
34417 return 0;
34418
34419- return fill_readbuf(port, ubuf, count, true);
34420+ return fill_readbuf(port, (char __force_kernel *)ubuf, count, true);
34421 }
34422
34423 static int wait_port_writable(struct port *port, bool nonblock)
34424diff --git a/drivers/clk/clk-composite.c b/drivers/clk/clk-composite.c
34425index a33f46f..a720eed 100644
34426--- a/drivers/clk/clk-composite.c
34427+++ b/drivers/clk/clk-composite.c
34428@@ -122,7 +122,7 @@ struct clk *clk_register_composite(struct device *dev, const char *name,
34429 struct clk *clk;
34430 struct clk_init_data init;
34431 struct clk_composite *composite;
34432- struct clk_ops *clk_composite_ops;
34433+ clk_ops_no_const *clk_composite_ops;
34434
34435 composite = kzalloc(sizeof(*composite), GFP_KERNEL);
34436 if (!composite) {
34437diff --git a/drivers/clk/socfpga/clk.c b/drivers/clk/socfpga/clk.c
34438index bd11315..7f87098 100644
34439--- a/drivers/clk/socfpga/clk.c
34440+++ b/drivers/clk/socfpga/clk.c
34441@@ -22,6 +22,7 @@
34442 #include <linux/clk-provider.h>
34443 #include <linux/io.h>
34444 #include <linux/of.h>
34445+#include <asm/pgtable.h>
34446
34447 /* Clock Manager offsets */
34448 #define CLKMGR_CTRL 0x0
34449@@ -135,8 +136,10 @@ static __init struct clk *socfpga_clk_init(struct device_node *node,
34450 if (strcmp(clk_name, "main_pll") || strcmp(clk_name, "periph_pll") ||
34451 strcmp(clk_name, "sdram_pll")) {
34452 socfpga_clk->hw.bit_idx = SOCFPGA_PLL_EXT_ENA;
34453- clk_pll_ops.enable = clk_gate_ops.enable;
34454- clk_pll_ops.disable = clk_gate_ops.disable;
34455+ pax_open_kernel();
34456+ *(void **)&clk_pll_ops.enable = clk_gate_ops.enable;
34457+ *(void **)&clk_pll_ops.disable = clk_gate_ops.disable;
34458+ pax_close_kernel();
34459 }
34460
34461 clk = clk_register(NULL, &socfpga_clk->hw.hw);
34462diff --git a/drivers/clocksource/arm_arch_timer.c b/drivers/clocksource/arm_arch_timer.c
34463index a2b2541..bc1e7ff 100644
34464--- a/drivers/clocksource/arm_arch_timer.c
34465+++ b/drivers/clocksource/arm_arch_timer.c
34466@@ -264,7 +264,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34467 return NOTIFY_OK;
34468 }
34469
34470-static struct notifier_block arch_timer_cpu_nb __cpuinitdata = {
34471+static struct notifier_block arch_timer_cpu_nb = {
34472 .notifier_call = arch_timer_cpu_notify,
34473 };
34474
34475diff --git a/drivers/clocksource/metag_generic.c b/drivers/clocksource/metag_generic.c
34476index ade7513..069445f 100644
34477--- a/drivers/clocksource/metag_generic.c
34478+++ b/drivers/clocksource/metag_generic.c
34479@@ -169,7 +169,7 @@ static int __cpuinit arch_timer_cpu_notify(struct notifier_block *self,
34480 return NOTIFY_OK;
34481 }
34482
34483-static struct notifier_block __cpuinitdata arch_timer_cpu_nb = {
34484+static struct notifier_block arch_timer_cpu_nb = {
34485 .notifier_call = arch_timer_cpu_notify,
34486 };
34487
34488diff --git a/drivers/cpufreq/acpi-cpufreq.c b/drivers/cpufreq/acpi-cpufreq.c
34489index edc089e..bc7c0bc 100644
34490--- a/drivers/cpufreq/acpi-cpufreq.c
34491+++ b/drivers/cpufreq/acpi-cpufreq.c
34492@@ -172,7 +172,7 @@ static ssize_t show_global_boost(struct kobject *kobj,
34493 return sprintf(buf, "%u\n", boost_enabled);
34494 }
34495
34496-static struct global_attr global_boost = __ATTR(boost, 0644,
34497+static global_attr_no_const global_boost = __ATTR(boost, 0644,
34498 show_global_boost,
34499 store_global_boost);
34500
34501@@ -705,8 +705,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34502 data->acpi_data = per_cpu_ptr(acpi_perf_data, cpu);
34503 per_cpu(acfreq_data, cpu) = data;
34504
34505- if (cpu_has(c, X86_FEATURE_CONSTANT_TSC))
34506- acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34507+ if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
34508+ pax_open_kernel();
34509+ *(u8 *)&acpi_cpufreq_driver.flags |= CPUFREQ_CONST_LOOPS;
34510+ pax_close_kernel();
34511+ }
34512
34513 result = acpi_processor_register_performance(data->acpi_data, cpu);
34514 if (result)
34515@@ -832,7 +835,9 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34516 policy->cur = acpi_cpufreq_guess_freq(data, policy->cpu);
34517 break;
34518 case ACPI_ADR_SPACE_FIXED_HARDWARE:
34519- acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34520+ pax_open_kernel();
34521+ *(void **)&acpi_cpufreq_driver.get = get_cur_freq_on_cpu;
34522+ pax_close_kernel();
34523 policy->cur = get_cur_freq_on_cpu(cpu);
34524 break;
34525 default:
34526@@ -843,8 +848,11 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
34527 acpi_processor_notify_smm(THIS_MODULE);
34528
34529 /* Check for APERF/MPERF support in hardware */
34530- if (boot_cpu_has(X86_FEATURE_APERFMPERF))
34531- acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34532+ if (boot_cpu_has(X86_FEATURE_APERFMPERF)) {
34533+ pax_open_kernel();
34534+ *(void **)&acpi_cpufreq_driver.getavg = cpufreq_get_measured_perf;
34535+ pax_close_kernel();
34536+ }
34537
34538 pr_debug("CPU%u - ACPI performance management activated.\n", cpu);
34539 for (i = 0; i < perf->state_count; i++)
34540diff --git a/drivers/cpufreq/cpufreq.c b/drivers/cpufreq/cpufreq.c
34541index 178fe7a..5ee8501 100644
34542--- a/drivers/cpufreq/cpufreq.c
34543+++ b/drivers/cpufreq/cpufreq.c
34544@@ -1853,7 +1853,7 @@ static int __cpuinit cpufreq_cpu_callback(struct notifier_block *nfb,
34545 return NOTIFY_OK;
34546 }
34547
34548-static struct notifier_block __refdata cpufreq_cpu_notifier = {
34549+static struct notifier_block cpufreq_cpu_notifier = {
34550 .notifier_call = cpufreq_cpu_callback,
34551 };
34552
34553@@ -1885,8 +1885,11 @@ int cpufreq_register_driver(struct cpufreq_driver *driver_data)
34554
34555 pr_debug("trying to register driver %s\n", driver_data->name);
34556
34557- if (driver_data->setpolicy)
34558- driver_data->flags |= CPUFREQ_CONST_LOOPS;
34559+ if (driver_data->setpolicy) {
34560+ pax_open_kernel();
34561+ *(u8 *)&driver_data->flags |= CPUFREQ_CONST_LOOPS;
34562+ pax_close_kernel();
34563+ }
34564
34565 write_lock_irqsave(&cpufreq_driver_lock, flags);
34566 if (cpufreq_driver) {
34567diff --git a/drivers/cpufreq/cpufreq_governor.c b/drivers/cpufreq/cpufreq_governor.c
34568index 5af40ad..ddf907b 100644
34569--- a/drivers/cpufreq/cpufreq_governor.c
34570+++ b/drivers/cpufreq/cpufreq_governor.c
34571@@ -235,7 +235,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
34572 struct dbs_data *dbs_data;
34573 struct od_cpu_dbs_info_s *od_dbs_info = NULL;
34574 struct cs_cpu_dbs_info_s *cs_dbs_info = NULL;
34575- struct od_ops *od_ops = NULL;
34576+ const struct od_ops *od_ops = NULL;
34577 struct od_dbs_tuners *od_tuners = NULL;
34578 struct cs_dbs_tuners *cs_tuners = NULL;
34579 struct cpu_dbs_common_info *cpu_cdbs;
34580@@ -298,7 +298,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
34581
34582 if ((cdata->governor == GOV_CONSERVATIVE) &&
34583 (!policy->governor->initialized)) {
34584- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34585+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34586
34587 cpufreq_register_notifier(cs_ops->notifier_block,
34588 CPUFREQ_TRANSITION_NOTIFIER);
34589@@ -315,7 +315,7 @@ int cpufreq_governor_dbs(struct cpufreq_policy *policy,
34590
34591 if ((dbs_data->cdata->governor == GOV_CONSERVATIVE) &&
34592 (policy->governor->initialized == 1)) {
34593- struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34594+ const struct cs_ops *cs_ops = dbs_data->cdata->gov_ops;
34595
34596 cpufreq_unregister_notifier(cs_ops->notifier_block,
34597 CPUFREQ_TRANSITION_NOTIFIER);
34598diff --git a/drivers/cpufreq/cpufreq_governor.h b/drivers/cpufreq/cpufreq_governor.h
34599index e16a961..0e68927 100644
34600--- a/drivers/cpufreq/cpufreq_governor.h
34601+++ b/drivers/cpufreq/cpufreq_governor.h
34602@@ -204,7 +204,7 @@ struct common_dbs_data {
34603 void (*exit)(struct dbs_data *dbs_data);
34604
34605 /* Governor specific ops, see below */
34606- void *gov_ops;
34607+ const void *gov_ops;
34608 };
34609
34610 /* Governer Per policy data */
34611diff --git a/drivers/cpufreq/cpufreq_ondemand.c b/drivers/cpufreq/cpufreq_ondemand.c
34612index 93eb5cb..f8ab572 100644
34613--- a/drivers/cpufreq/cpufreq_ondemand.c
34614+++ b/drivers/cpufreq/cpufreq_ondemand.c
34615@@ -615,14 +615,18 @@ void od_register_powersave_bias_handler(unsigned int (*f)
34616 (struct cpufreq_policy *, unsigned int, unsigned int),
34617 unsigned int powersave_bias)
34618 {
34619- od_ops.powersave_bias_target = f;
34620+ pax_open_kernel();
34621+ *(void **)&od_ops.powersave_bias_target = f;
34622+ pax_close_kernel();
34623 od_set_powersave_bias(powersave_bias);
34624 }
34625 EXPORT_SYMBOL_GPL(od_register_powersave_bias_handler);
34626
34627 void od_unregister_powersave_bias_handler(void)
34628 {
34629- od_ops.powersave_bias_target = generic_powersave_bias_target;
34630+ pax_open_kernel();
34631+ *(void **)&od_ops.powersave_bias_target = generic_powersave_bias_target;
34632+ pax_close_kernel();
34633 od_set_powersave_bias(0);
34634 }
34635 EXPORT_SYMBOL_GPL(od_unregister_powersave_bias_handler);
34636diff --git a/drivers/cpufreq/cpufreq_stats.c b/drivers/cpufreq/cpufreq_stats.c
34637index bfd6273..e39dd63 100644
34638--- a/drivers/cpufreq/cpufreq_stats.c
34639+++ b/drivers/cpufreq/cpufreq_stats.c
34640@@ -365,7 +365,7 @@ static int __cpuinit cpufreq_stat_cpu_callback(struct notifier_block *nfb,
34641 }
34642
34643 /* priority=1 so this will get called before cpufreq_remove_dev */
34644-static struct notifier_block cpufreq_stat_cpu_notifier __refdata = {
34645+static struct notifier_block cpufreq_stat_cpu_notifier = {
34646 .notifier_call = cpufreq_stat_cpu_callback,
34647 .priority = 1,
34648 };
34649diff --git a/drivers/cpufreq/p4-clockmod.c b/drivers/cpufreq/p4-clockmod.c
34650index 421ef37..e708530c 100644
34651--- a/drivers/cpufreq/p4-clockmod.c
34652+++ b/drivers/cpufreq/p4-clockmod.c
34653@@ -160,10 +160,14 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34654 case 0x0F: /* Core Duo */
34655 case 0x16: /* Celeron Core */
34656 case 0x1C: /* Atom */
34657- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34658+ pax_open_kernel();
34659+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34660+ pax_close_kernel();
34661 return speedstep_get_frequency(SPEEDSTEP_CPU_PCORE);
34662 case 0x0D: /* Pentium M (Dothan) */
34663- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34664+ pax_open_kernel();
34665+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34666+ pax_close_kernel();
34667 /* fall through */
34668 case 0x09: /* Pentium M (Banias) */
34669 return speedstep_get_frequency(SPEEDSTEP_CPU_PM);
34670@@ -175,7 +179,9 @@ static unsigned int cpufreq_p4_get_frequency(struct cpuinfo_x86 *c)
34671
34672 /* on P-4s, the TSC runs with constant frequency independent whether
34673 * throttling is active or not. */
34674- p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34675+ pax_open_kernel();
34676+ *(u8 *)&p4clockmod_driver.flags |= CPUFREQ_CONST_LOOPS;
34677+ pax_close_kernel();
34678
34679 if (speedstep_detect_processor() == SPEEDSTEP_CPU_P4M) {
34680 printk(KERN_WARNING PFX "Warning: Pentium 4-M detected. "
34681diff --git a/drivers/cpufreq/sparc-us3-cpufreq.c b/drivers/cpufreq/sparc-us3-cpufreq.c
34682index c71ee14..7c2e183 100644
34683--- a/drivers/cpufreq/sparc-us3-cpufreq.c
34684+++ b/drivers/cpufreq/sparc-us3-cpufreq.c
34685@@ -18,14 +18,12 @@
34686 #include <asm/head.h>
34687 #include <asm/timer.h>
34688
34689-static struct cpufreq_driver *cpufreq_us3_driver;
34690-
34691 struct us3_freq_percpu_info {
34692 struct cpufreq_frequency_table table[4];
34693 };
34694
34695 /* Indexed by cpu number. */
34696-static struct us3_freq_percpu_info *us3_freq_table;
34697+static struct us3_freq_percpu_info us3_freq_table[NR_CPUS];
34698
34699 /* UltraSPARC-III has three dividers: 1, 2, and 32. These are controlled
34700 * in the Safari config register.
34701@@ -186,12 +184,25 @@ static int __init us3_freq_cpu_init(struct cpufreq_policy *policy)
34702
34703 static int us3_freq_cpu_exit(struct cpufreq_policy *policy)
34704 {
34705- if (cpufreq_us3_driver)
34706- us3_set_cpu_divider_index(policy, 0);
34707+ us3_set_cpu_divider_index(policy->cpu, 0);
34708
34709 return 0;
34710 }
34711
34712+static int __init us3_freq_init(void);
34713+static void __exit us3_freq_exit(void);
34714+
34715+static struct cpufreq_driver cpufreq_us3_driver = {
34716+ .init = us3_freq_cpu_init,
34717+ .verify = us3_freq_verify,
34718+ .target = us3_freq_target,
34719+ .get = us3_freq_get,
34720+ .exit = us3_freq_cpu_exit,
34721+ .owner = THIS_MODULE,
34722+ .name = "UltraSPARC-III",
34723+
34724+};
34725+
34726 static int __init us3_freq_init(void)
34727 {
34728 unsigned long manuf, impl, ver;
34729@@ -208,57 +219,15 @@ static int __init us3_freq_init(void)
34730 (impl == CHEETAH_IMPL ||
34731 impl == CHEETAH_PLUS_IMPL ||
34732 impl == JAGUAR_IMPL ||
34733- impl == PANTHER_IMPL)) {
34734- struct cpufreq_driver *driver;
34735-
34736- ret = -ENOMEM;
34737- driver = kzalloc(sizeof(struct cpufreq_driver), GFP_KERNEL);
34738- if (!driver)
34739- goto err_out;
34740-
34741- us3_freq_table = kzalloc(
34742- (NR_CPUS * sizeof(struct us3_freq_percpu_info)),
34743- GFP_KERNEL);
34744- if (!us3_freq_table)
34745- goto err_out;
34746-
34747- driver->init = us3_freq_cpu_init;
34748- driver->verify = us3_freq_verify;
34749- driver->target = us3_freq_target;
34750- driver->get = us3_freq_get;
34751- driver->exit = us3_freq_cpu_exit;
34752- driver->owner = THIS_MODULE,
34753- strcpy(driver->name, "UltraSPARC-III");
34754-
34755- cpufreq_us3_driver = driver;
34756- ret = cpufreq_register_driver(driver);
34757- if (ret)
34758- goto err_out;
34759-
34760- return 0;
34761-
34762-err_out:
34763- if (driver) {
34764- kfree(driver);
34765- cpufreq_us3_driver = NULL;
34766- }
34767- kfree(us3_freq_table);
34768- us3_freq_table = NULL;
34769- return ret;
34770- }
34771+ impl == PANTHER_IMPL))
34772+ return cpufreq_register_driver(&cpufreq_us3_driver);
34773
34774 return -ENODEV;
34775 }
34776
34777 static void __exit us3_freq_exit(void)
34778 {
34779- if (cpufreq_us3_driver) {
34780- cpufreq_unregister_driver(cpufreq_us3_driver);
34781- kfree(cpufreq_us3_driver);
34782- cpufreq_us3_driver = NULL;
34783- kfree(us3_freq_table);
34784- us3_freq_table = NULL;
34785- }
34786+ cpufreq_unregister_driver(&cpufreq_us3_driver);
34787 }
34788
34789 MODULE_AUTHOR("David S. Miller <davem@redhat.com>");
34790diff --git a/drivers/cpufreq/speedstep-centrino.c b/drivers/cpufreq/speedstep-centrino.c
34791index 618e6f4..e89d915 100644
34792--- a/drivers/cpufreq/speedstep-centrino.c
34793+++ b/drivers/cpufreq/speedstep-centrino.c
34794@@ -353,8 +353,11 @@ static int centrino_cpu_init(struct cpufreq_policy *policy)
34795 !cpu_has(cpu, X86_FEATURE_EST))
34796 return -ENODEV;
34797
34798- if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC))
34799- centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34800+ if (cpu_has(cpu, X86_FEATURE_CONSTANT_TSC)) {
34801+ pax_open_kernel();
34802+ *(u8 *)&centrino_driver.flags |= CPUFREQ_CONST_LOOPS;
34803+ pax_close_kernel();
34804+ }
34805
34806 if (policy->cpu != 0)
34807 return -ENODEV;
34808diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c
34809index c3a93fe..e808f24 100644
34810--- a/drivers/cpuidle/cpuidle.c
34811+++ b/drivers/cpuidle/cpuidle.c
34812@@ -254,7 +254,7 @@ static int poll_idle(struct cpuidle_device *dev,
34813
34814 static void poll_idle_init(struct cpuidle_driver *drv)
34815 {
34816- struct cpuidle_state *state = &drv->states[0];
34817+ cpuidle_state_no_const *state = &drv->states[0];
34818
34819 snprintf(state->name, CPUIDLE_NAME_LEN, "POLL");
34820 snprintf(state->desc, CPUIDLE_DESC_LEN, "CPUIDLE CORE POLL IDLE");
34821diff --git a/drivers/cpuidle/governor.c b/drivers/cpuidle/governor.c
34822index ea2f8e7..70ac501 100644
34823--- a/drivers/cpuidle/governor.c
34824+++ b/drivers/cpuidle/governor.c
34825@@ -87,7 +87,7 @@ int cpuidle_register_governor(struct cpuidle_governor *gov)
34826 mutex_lock(&cpuidle_lock);
34827 if (__cpuidle_find_governor(gov->name) == NULL) {
34828 ret = 0;
34829- list_add_tail(&gov->governor_list, &cpuidle_governors);
34830+ pax_list_add_tail((struct list_head *)&gov->governor_list, &cpuidle_governors);
34831 if (!cpuidle_curr_governor ||
34832 cpuidle_curr_governor->rating < gov->rating)
34833 cpuidle_switch_governor(gov);
34834@@ -135,7 +135,7 @@ void cpuidle_unregister_governor(struct cpuidle_governor *gov)
34835 new_gov = cpuidle_replace_governor(gov->rating);
34836 cpuidle_switch_governor(new_gov);
34837 }
34838- list_del(&gov->governor_list);
34839+ pax_list_del((struct list_head *)&gov->governor_list);
34840 mutex_unlock(&cpuidle_lock);
34841 }
34842
34843diff --git a/drivers/cpuidle/sysfs.c b/drivers/cpuidle/sysfs.c
34844index 428754a..8bdf9cc 100644
34845--- a/drivers/cpuidle/sysfs.c
34846+++ b/drivers/cpuidle/sysfs.c
34847@@ -131,7 +131,7 @@ static struct attribute *cpuidle_switch_attrs[] = {
34848 NULL
34849 };
34850
34851-static struct attribute_group cpuidle_attr_group = {
34852+static attribute_group_no_const cpuidle_attr_group = {
34853 .attrs = cpuidle_default_attrs,
34854 .name = "cpuidle",
34855 };
34856diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c
34857index 3b36797..db0b0c0 100644
34858--- a/drivers/devfreq/devfreq.c
34859+++ b/drivers/devfreq/devfreq.c
34860@@ -477,7 +477,7 @@ struct devfreq *devfreq_add_device(struct device *dev,
34861 GFP_KERNEL);
34862 devfreq->last_stat_updated = jiffies;
34863
34864- dev_set_name(&devfreq->dev, dev_name(dev));
34865+ dev_set_name(&devfreq->dev, "%s", dev_name(dev));
34866 err = device_register(&devfreq->dev);
34867 if (err) {
34868 put_device(&devfreq->dev);
34869@@ -588,7 +588,7 @@ int devfreq_add_governor(struct devfreq_governor *governor)
34870 goto err_out;
34871 }
34872
34873- list_add(&governor->node, &devfreq_governor_list);
34874+ pax_list_add((struct list_head *)&governor->node, &devfreq_governor_list);
34875
34876 list_for_each_entry(devfreq, &devfreq_list, node) {
34877 int ret = 0;
34878@@ -676,7 +676,7 @@ int devfreq_remove_governor(struct devfreq_governor *governor)
34879 }
34880 }
34881
34882- list_del(&governor->node);
34883+ pax_list_del((struct list_head *)&governor->node);
34884 err_out:
34885 mutex_unlock(&devfreq_list_lock);
34886
34887diff --git a/drivers/dma/sh/shdma.c b/drivers/dma/sh/shdma.c
34888index b70709b..1d8d02a 100644
34889--- a/drivers/dma/sh/shdma.c
34890+++ b/drivers/dma/sh/shdma.c
34891@@ -476,7 +476,7 @@ static int sh_dmae_nmi_handler(struct notifier_block *self,
34892 return ret;
34893 }
34894
34895-static struct notifier_block sh_dmae_nmi_notifier __read_mostly = {
34896+static struct notifier_block sh_dmae_nmi_notifier = {
34897 .notifier_call = sh_dmae_nmi_handler,
34898
34899 /* Run before NMI debug handler and KGDB */
34900diff --git a/drivers/edac/edac_mc_sysfs.c b/drivers/edac/edac_mc_sysfs.c
34901index 67610a6..30f8a11 100644
34902--- a/drivers/edac/edac_mc_sysfs.c
34903+++ b/drivers/edac/edac_mc_sysfs.c
34904@@ -148,7 +148,7 @@ static const char * const edac_caps[] = {
34905 struct dev_ch_attribute {
34906 struct device_attribute attr;
34907 int channel;
34908-};
34909+} __do_const;
34910
34911 #define DEVICE_CHANNEL(_name, _mode, _show, _store, _var) \
34912 struct dev_ch_attribute dev_attr_legacy_##_name = \
34913@@ -1003,14 +1003,16 @@ int edac_create_sysfs_mci_device(struct mem_ctl_info *mci)
34914 }
34915
34916 if (mci->set_sdram_scrub_rate || mci->get_sdram_scrub_rate) {
34917+ pax_open_kernel();
34918 if (mci->get_sdram_scrub_rate) {
34919- dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34920- dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34921+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IRUGO;
34922+ *(void **)&dev_attr_sdram_scrub_rate.show = &mci_sdram_scrub_rate_show;
34923 }
34924 if (mci->set_sdram_scrub_rate) {
34925- dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34926- dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34927+ *(umode_t *)&dev_attr_sdram_scrub_rate.attr.mode |= S_IWUSR;
34928+ *(void **)&dev_attr_sdram_scrub_rate.store = &mci_sdram_scrub_rate_store;
34929 }
34930+ pax_close_kernel();
34931 err = device_create_file(&mci->dev,
34932 &dev_attr_sdram_scrub_rate);
34933 if (err) {
34934diff --git a/drivers/edac/edac_pci_sysfs.c b/drivers/edac/edac_pci_sysfs.c
34935index e8658e4..22746d6 100644
34936--- a/drivers/edac/edac_pci_sysfs.c
34937+++ b/drivers/edac/edac_pci_sysfs.c
34938@@ -26,8 +26,8 @@ static int edac_pci_log_pe = 1; /* log PCI parity errors */
34939 static int edac_pci_log_npe = 1; /* log PCI non-parity error errors */
34940 static int edac_pci_poll_msec = 1000; /* one second workq period */
34941
34942-static atomic_t pci_parity_count = ATOMIC_INIT(0);
34943-static atomic_t pci_nonparity_count = ATOMIC_INIT(0);
34944+static atomic_unchecked_t pci_parity_count = ATOMIC_INIT(0);
34945+static atomic_unchecked_t pci_nonparity_count = ATOMIC_INIT(0);
34946
34947 static struct kobject *edac_pci_top_main_kobj;
34948 static atomic_t edac_pci_sysfs_refcount = ATOMIC_INIT(0);
34949@@ -235,7 +235,7 @@ struct edac_pci_dev_attribute {
34950 void *value;
34951 ssize_t(*show) (void *, char *);
34952 ssize_t(*store) (void *, const char *, size_t);
34953-};
34954+} __do_const;
34955
34956 /* Set of show/store abstract level functions for PCI Parity object */
34957 static ssize_t edac_pci_dev_show(struct kobject *kobj, struct attribute *attr,
34958@@ -579,7 +579,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34959 edac_printk(KERN_CRIT, EDAC_PCI,
34960 "Signaled System Error on %s\n",
34961 pci_name(dev));
34962- atomic_inc(&pci_nonparity_count);
34963+ atomic_inc_unchecked(&pci_nonparity_count);
34964 }
34965
34966 if (status & (PCI_STATUS_PARITY)) {
34967@@ -587,7 +587,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34968 "Master Data Parity Error on %s\n",
34969 pci_name(dev));
34970
34971- atomic_inc(&pci_parity_count);
34972+ atomic_inc_unchecked(&pci_parity_count);
34973 }
34974
34975 if (status & (PCI_STATUS_DETECTED_PARITY)) {
34976@@ -595,7 +595,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34977 "Detected Parity Error on %s\n",
34978 pci_name(dev));
34979
34980- atomic_inc(&pci_parity_count);
34981+ atomic_inc_unchecked(&pci_parity_count);
34982 }
34983 }
34984
34985@@ -618,7 +618,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34986 edac_printk(KERN_CRIT, EDAC_PCI, "Bridge "
34987 "Signaled System Error on %s\n",
34988 pci_name(dev));
34989- atomic_inc(&pci_nonparity_count);
34990+ atomic_inc_unchecked(&pci_nonparity_count);
34991 }
34992
34993 if (status & (PCI_STATUS_PARITY)) {
34994@@ -626,7 +626,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
34995 "Master Data Parity Error on "
34996 "%s\n", pci_name(dev));
34997
34998- atomic_inc(&pci_parity_count);
34999+ atomic_inc_unchecked(&pci_parity_count);
35000 }
35001
35002 if (status & (PCI_STATUS_DETECTED_PARITY)) {
35003@@ -634,7 +634,7 @@ static void edac_pci_dev_parity_test(struct pci_dev *dev)
35004 "Detected Parity Error on %s\n",
35005 pci_name(dev));
35006
35007- atomic_inc(&pci_parity_count);
35008+ atomic_inc_unchecked(&pci_parity_count);
35009 }
35010 }
35011 }
35012@@ -672,7 +672,7 @@ void edac_pci_do_parity_check(void)
35013 if (!check_pci_errors)
35014 return;
35015
35016- before_count = atomic_read(&pci_parity_count);
35017+ before_count = atomic_read_unchecked(&pci_parity_count);
35018
35019 /* scan all PCI devices looking for a Parity Error on devices and
35020 * bridges.
35021@@ -684,7 +684,7 @@ void edac_pci_do_parity_check(void)
35022 /* Only if operator has selected panic on PCI Error */
35023 if (edac_pci_get_panic_on_pe()) {
35024 /* If the count is different 'after' from 'before' */
35025- if (before_count != atomic_read(&pci_parity_count))
35026+ if (before_count != atomic_read_unchecked(&pci_parity_count))
35027 panic("EDAC: PCI Parity Error");
35028 }
35029 }
35030diff --git a/drivers/edac/mce_amd.h b/drivers/edac/mce_amd.h
35031index 51b7e3a..aa8a3e8 100644
35032--- a/drivers/edac/mce_amd.h
35033+++ b/drivers/edac/mce_amd.h
35034@@ -77,7 +77,7 @@ struct amd_decoder_ops {
35035 bool (*mc0_mce)(u16, u8);
35036 bool (*mc1_mce)(u16, u8);
35037 bool (*mc2_mce)(u16, u8);
35038-};
35039+} __no_const;
35040
35041 void amd_report_gart_errors(bool);
35042 void amd_register_ecc_decoder(void (*f)(int, struct mce *));
35043diff --git a/drivers/firewire/core-card.c b/drivers/firewire/core-card.c
35044index 57ea7f4..789e3c3 100644
35045--- a/drivers/firewire/core-card.c
35046+++ b/drivers/firewire/core-card.c
35047@@ -680,7 +680,7 @@ EXPORT_SYMBOL_GPL(fw_card_release);
35048
35049 void fw_core_remove_card(struct fw_card *card)
35050 {
35051- struct fw_card_driver dummy_driver = dummy_driver_template;
35052+ fw_card_driver_no_const dummy_driver = dummy_driver_template;
35053
35054 card->driver->update_phy_reg(card, 4,
35055 PHY_LINK_ACTIVE | PHY_CONTENDER, 0);
35056diff --git a/drivers/firewire/core-device.c b/drivers/firewire/core-device.c
35057index 664a6ff..af13580 100644
35058--- a/drivers/firewire/core-device.c
35059+++ b/drivers/firewire/core-device.c
35060@@ -232,7 +232,7 @@ EXPORT_SYMBOL(fw_device_enable_phys_dma);
35061 struct config_rom_attribute {
35062 struct device_attribute attr;
35063 u32 key;
35064-};
35065+} __do_const;
35066
35067 static ssize_t show_immediate(struct device *dev,
35068 struct device_attribute *dattr, char *buf)
35069diff --git a/drivers/firewire/core-transaction.c b/drivers/firewire/core-transaction.c
35070index 28a94c7..58da63a 100644
35071--- a/drivers/firewire/core-transaction.c
35072+++ b/drivers/firewire/core-transaction.c
35073@@ -38,6 +38,7 @@
35074 #include <linux/timer.h>
35075 #include <linux/types.h>
35076 #include <linux/workqueue.h>
35077+#include <linux/sched.h>
35078
35079 #include <asm/byteorder.h>
35080
35081diff --git a/drivers/firewire/core.h b/drivers/firewire/core.h
35082index 515a42c..5ecf3ba 100644
35083--- a/drivers/firewire/core.h
35084+++ b/drivers/firewire/core.h
35085@@ -111,6 +111,7 @@ struct fw_card_driver {
35086
35087 int (*stop_iso)(struct fw_iso_context *ctx);
35088 };
35089+typedef struct fw_card_driver __no_const fw_card_driver_no_const;
35090
35091 void fw_card_initialize(struct fw_card *card,
35092 const struct fw_card_driver *driver, struct device *device);
35093diff --git a/drivers/firmware/dmi-id.c b/drivers/firmware/dmi-id.c
35094index 94a58a0..f5eba42 100644
35095--- a/drivers/firmware/dmi-id.c
35096+++ b/drivers/firmware/dmi-id.c
35097@@ -16,7 +16,7 @@
35098 struct dmi_device_attribute{
35099 struct device_attribute dev_attr;
35100 int field;
35101-};
35102+} __do_const;
35103 #define to_dmi_dev_attr(_dev_attr) \
35104 container_of(_dev_attr, struct dmi_device_attribute, dev_attr)
35105
35106diff --git a/drivers/firmware/dmi_scan.c b/drivers/firmware/dmi_scan.c
35107index b95159b..841ae55 100644
35108--- a/drivers/firmware/dmi_scan.c
35109+++ b/drivers/firmware/dmi_scan.c
35110@@ -497,11 +497,6 @@ void __init dmi_scan_machine(void)
35111 }
35112 }
35113 else {
35114- /*
35115- * no iounmap() for that ioremap(); it would be a no-op, but
35116- * it's so early in setup that sucker gets confused into doing
35117- * what it shouldn't if we actually call it.
35118- */
35119 p = dmi_ioremap(0xF0000, 0x10000);
35120 if (p == NULL)
35121 goto error;
35122@@ -786,7 +781,7 @@ int dmi_walk(void (*decode)(const struct dmi_header *, void *),
35123 if (buf == NULL)
35124 return -1;
35125
35126- dmi_table(buf, dmi_len, dmi_num, decode, private_data);
35127+ dmi_table((char __force_kernel *)buf, dmi_len, dmi_num, decode, private_data);
35128
35129 iounmap(buf);
35130 return 0;
35131diff --git a/drivers/firmware/efi/efi.c b/drivers/firmware/efi/efi.c
35132index 5145fa3..0d3babd 100644
35133--- a/drivers/firmware/efi/efi.c
35134+++ b/drivers/firmware/efi/efi.c
35135@@ -65,14 +65,16 @@ static struct attribute_group efi_subsys_attr_group = {
35136 };
35137
35138 static struct efivars generic_efivars;
35139-static struct efivar_operations generic_ops;
35140+static efivar_operations_no_const generic_ops __read_only;
35141
35142 static int generic_ops_register(void)
35143 {
35144- generic_ops.get_variable = efi.get_variable;
35145- generic_ops.set_variable = efi.set_variable;
35146- generic_ops.get_next_variable = efi.get_next_variable;
35147- generic_ops.query_variable_store = efi_query_variable_store;
35148+ pax_open_kernel();
35149+ *(void **)&generic_ops.get_variable = efi.get_variable;
35150+ *(void **)&generic_ops.set_variable = efi.set_variable;
35151+ *(void **)&generic_ops.get_next_variable = efi.get_next_variable;
35152+ *(void **)&generic_ops.query_variable_store = efi_query_variable_store;
35153+ pax_close_kernel();
35154
35155 return efivars_register(&generic_efivars, &generic_ops, efi_kobj);
35156 }
35157diff --git a/drivers/firmware/efi/efivars.c b/drivers/firmware/efi/efivars.c
35158index 8bd1bb6..c48b0c6 100644
35159--- a/drivers/firmware/efi/efivars.c
35160+++ b/drivers/firmware/efi/efivars.c
35161@@ -452,7 +452,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
35162 static int
35163 create_efivars_bin_attributes(void)
35164 {
35165- struct bin_attribute *attr;
35166+ bin_attribute_no_const *attr;
35167 int error;
35168
35169 /* new_var */
35170diff --git a/drivers/firmware/google/memconsole.c b/drivers/firmware/google/memconsole.c
35171index 2a90ba6..07f3733 100644
35172--- a/drivers/firmware/google/memconsole.c
35173+++ b/drivers/firmware/google/memconsole.c
35174@@ -147,7 +147,9 @@ static int __init memconsole_init(void)
35175 if (!found_memconsole())
35176 return -ENODEV;
35177
35178- memconsole_bin_attr.size = memconsole_length;
35179+ pax_open_kernel();
35180+ *(size_t *)&memconsole_bin_attr.size = memconsole_length;
35181+ pax_close_kernel();
35182
35183 ret = sysfs_create_bin_file(firmware_kobj, &memconsole_bin_attr);
35184
35185diff --git a/drivers/gpio/gpio-ich.c b/drivers/gpio/gpio-ich.c
35186index e16d932..f0206ef 100644
35187--- a/drivers/gpio/gpio-ich.c
35188+++ b/drivers/gpio/gpio-ich.c
35189@@ -69,7 +69,7 @@ struct ichx_desc {
35190 /* Some chipsets have quirks, let these use their own request/get */
35191 int (*request)(struct gpio_chip *chip, unsigned offset);
35192 int (*get)(struct gpio_chip *chip, unsigned offset);
35193-};
35194+} __do_const;
35195
35196 static struct {
35197 spinlock_t lock;
35198diff --git a/drivers/gpio/gpio-vr41xx.c b/drivers/gpio/gpio-vr41xx.c
35199index 9902732..64b62dd 100644
35200--- a/drivers/gpio/gpio-vr41xx.c
35201+++ b/drivers/gpio/gpio-vr41xx.c
35202@@ -204,7 +204,7 @@ static int giu_get_irq(unsigned int irq)
35203 printk(KERN_ERR "spurious GIU interrupt: %04x(%04x),%04x(%04x)\n",
35204 maskl, pendl, maskh, pendh);
35205
35206- atomic_inc(&irq_err_count);
35207+ atomic_inc_unchecked(&irq_err_count);
35208
35209 return -EINVAL;
35210 }
35211diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
35212index ed1334e..ee0dd42 100644
35213--- a/drivers/gpu/drm/drm_crtc_helper.c
35214+++ b/drivers/gpu/drm/drm_crtc_helper.c
35215@@ -321,7 +321,7 @@ static bool drm_encoder_crtc_ok(struct drm_encoder *encoder,
35216 struct drm_crtc *tmp;
35217 int crtc_mask = 1;
35218
35219- WARN(!crtc, "checking null crtc?\n");
35220+ BUG_ON(!crtc);
35221
35222 dev = crtc->dev;
35223
35224diff --git a/drivers/gpu/drm/drm_drv.c b/drivers/gpu/drm/drm_drv.c
35225index 9cc247f..36aa285 100644
35226--- a/drivers/gpu/drm/drm_drv.c
35227+++ b/drivers/gpu/drm/drm_drv.c
35228@@ -306,7 +306,7 @@ module_exit(drm_core_exit);
35229 /**
35230 * Copy and IOCTL return string to user space
35231 */
35232-static int drm_copy_field(char *buf, size_t *buf_len, const char *value)
35233+static int drm_copy_field(char __user *buf, size_t *buf_len, const char *value)
35234 {
35235 int len;
35236
35237@@ -376,7 +376,7 @@ long drm_ioctl(struct file *filp,
35238 struct drm_file *file_priv = filp->private_data;
35239 struct drm_device *dev;
35240 const struct drm_ioctl_desc *ioctl = NULL;
35241- drm_ioctl_t *func;
35242+ drm_ioctl_no_const_t func;
35243 unsigned int nr = DRM_IOCTL_NR(cmd);
35244 int retcode = -EINVAL;
35245 char stack_kdata[128];
35246@@ -389,7 +389,7 @@ long drm_ioctl(struct file *filp,
35247 return -ENODEV;
35248
35249 atomic_inc(&dev->ioctl_count);
35250- atomic_inc(&dev->counts[_DRM_STAT_IOCTLS]);
35251+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_IOCTLS]);
35252 ++file_priv->ioctl_count;
35253
35254 if ((nr >= DRM_CORE_IOCTL_COUNT) &&
35255diff --git a/drivers/gpu/drm/drm_fops.c b/drivers/gpu/drm/drm_fops.c
35256index 429e07d..e681a2c 100644
35257--- a/drivers/gpu/drm/drm_fops.c
35258+++ b/drivers/gpu/drm/drm_fops.c
35259@@ -71,7 +71,7 @@ static int drm_setup(struct drm_device * dev)
35260 }
35261
35262 for (i = 0; i < ARRAY_SIZE(dev->counts); i++)
35263- atomic_set(&dev->counts[i], 0);
35264+ atomic_set_unchecked(&dev->counts[i], 0);
35265
35266 dev->sigdata.lock = NULL;
35267
35268@@ -135,7 +135,7 @@ int drm_open(struct inode *inode, struct file *filp)
35269 if (drm_device_is_unplugged(dev))
35270 return -ENODEV;
35271
35272- if (!dev->open_count++)
35273+ if (local_inc_return(&dev->open_count) == 1)
35274 need_setup = 1;
35275 mutex_lock(&dev->struct_mutex);
35276 old_imapping = inode->i_mapping;
35277@@ -151,7 +151,7 @@ int drm_open(struct inode *inode, struct file *filp)
35278 retcode = drm_open_helper(inode, filp, dev);
35279 if (retcode)
35280 goto err_undo;
35281- atomic_inc(&dev->counts[_DRM_STAT_OPENS]);
35282+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_OPENS]);
35283 if (need_setup) {
35284 retcode = drm_setup(dev);
35285 if (retcode)
35286@@ -166,7 +166,7 @@ err_undo:
35287 iput(container_of(dev->dev_mapping, struct inode, i_data));
35288 dev->dev_mapping = old_mapping;
35289 mutex_unlock(&dev->struct_mutex);
35290- dev->open_count--;
35291+ local_dec(&dev->open_count);
35292 return retcode;
35293 }
35294 EXPORT_SYMBOL(drm_open);
35295@@ -441,7 +441,7 @@ int drm_release(struct inode *inode, struct file *filp)
35296
35297 mutex_lock(&drm_global_mutex);
35298
35299- DRM_DEBUG("open_count = %d\n", dev->open_count);
35300+ DRM_DEBUG("open_count = %ld\n", local_read(&dev->open_count));
35301
35302 if (dev->driver->preclose)
35303 dev->driver->preclose(dev, file_priv);
35304@@ -450,10 +450,10 @@ int drm_release(struct inode *inode, struct file *filp)
35305 * Begin inline drm_release
35306 */
35307
35308- DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %d\n",
35309+ DRM_DEBUG("pid = %d, device = 0x%lx, open_count = %ld\n",
35310 task_pid_nr(current),
35311 (long)old_encode_dev(file_priv->minor->device),
35312- dev->open_count);
35313+ local_read(&dev->open_count));
35314
35315 /* Release any auth tokens that might point to this file_priv,
35316 (do that under the drm_global_mutex) */
35317@@ -550,8 +550,8 @@ int drm_release(struct inode *inode, struct file *filp)
35318 * End inline drm_release
35319 */
35320
35321- atomic_inc(&dev->counts[_DRM_STAT_CLOSES]);
35322- if (!--dev->open_count) {
35323+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_CLOSES]);
35324+ if (local_dec_and_test(&dev->open_count)) {
35325 if (atomic_read(&dev->ioctl_count)) {
35326 DRM_ERROR("Device busy: %d\n",
35327 atomic_read(&dev->ioctl_count));
35328diff --git a/drivers/gpu/drm/drm_global.c b/drivers/gpu/drm/drm_global.c
35329index f731116..629842c 100644
35330--- a/drivers/gpu/drm/drm_global.c
35331+++ b/drivers/gpu/drm/drm_global.c
35332@@ -36,7 +36,7 @@
35333 struct drm_global_item {
35334 struct mutex mutex;
35335 void *object;
35336- int refcount;
35337+ atomic_t refcount;
35338 };
35339
35340 static struct drm_global_item glob[DRM_GLOBAL_NUM];
35341@@ -49,7 +49,7 @@ void drm_global_init(void)
35342 struct drm_global_item *item = &glob[i];
35343 mutex_init(&item->mutex);
35344 item->object = NULL;
35345- item->refcount = 0;
35346+ atomic_set(&item->refcount, 0);
35347 }
35348 }
35349
35350@@ -59,7 +59,7 @@ void drm_global_release(void)
35351 for (i = 0; i < DRM_GLOBAL_NUM; ++i) {
35352 struct drm_global_item *item = &glob[i];
35353 BUG_ON(item->object != NULL);
35354- BUG_ON(item->refcount != 0);
35355+ BUG_ON(atomic_read(&item->refcount) != 0);
35356 }
35357 }
35358
35359@@ -70,7 +70,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35360 void *object;
35361
35362 mutex_lock(&item->mutex);
35363- if (item->refcount == 0) {
35364+ if (atomic_read(&item->refcount) == 0) {
35365 item->object = kzalloc(ref->size, GFP_KERNEL);
35366 if (unlikely(item->object == NULL)) {
35367 ret = -ENOMEM;
35368@@ -83,7 +83,7 @@ int drm_global_item_ref(struct drm_global_reference *ref)
35369 goto out_err;
35370
35371 }
35372- ++item->refcount;
35373+ atomic_inc(&item->refcount);
35374 ref->object = item->object;
35375 object = item->object;
35376 mutex_unlock(&item->mutex);
35377@@ -100,9 +100,9 @@ void drm_global_item_unref(struct drm_global_reference *ref)
35378 struct drm_global_item *item = &glob[ref->global_type];
35379
35380 mutex_lock(&item->mutex);
35381- BUG_ON(item->refcount == 0);
35382+ BUG_ON(atomic_read(&item->refcount) == 0);
35383 BUG_ON(ref->object != item->object);
35384- if (--item->refcount == 0) {
35385+ if (atomic_dec_and_test(&item->refcount)) {
35386 ref->release(ref);
35387 item->object = NULL;
35388 }
35389diff --git a/drivers/gpu/drm/drm_info.c b/drivers/gpu/drm/drm_info.c
35390index d4b20ce..77a8d41 100644
35391--- a/drivers/gpu/drm/drm_info.c
35392+++ b/drivers/gpu/drm/drm_info.c
35393@@ -75,10 +75,14 @@ int drm_vm_info(struct seq_file *m, void *data)
35394 struct drm_local_map *map;
35395 struct drm_map_list *r_list;
35396
35397- /* Hardcoded from _DRM_FRAME_BUFFER,
35398- _DRM_REGISTERS, _DRM_SHM, _DRM_AGP, and
35399- _DRM_SCATTER_GATHER and _DRM_CONSISTENT */
35400- const char *types[] = { "FB", "REG", "SHM", "AGP", "SG", "PCI" };
35401+ static const char * const types[] = {
35402+ [_DRM_FRAME_BUFFER] = "FB",
35403+ [_DRM_REGISTERS] = "REG",
35404+ [_DRM_SHM] = "SHM",
35405+ [_DRM_AGP] = "AGP",
35406+ [_DRM_SCATTER_GATHER] = "SG",
35407+ [_DRM_CONSISTENT] = "PCI",
35408+ [_DRM_GEM] = "GEM" };
35409 const char *type;
35410 int i;
35411
35412@@ -89,7 +93,7 @@ int drm_vm_info(struct seq_file *m, void *data)
35413 map = r_list->map;
35414 if (!map)
35415 continue;
35416- if (map->type < 0 || map->type > 5)
35417+ if (map->type >= ARRAY_SIZE(types))
35418 type = "??";
35419 else
35420 type = types[map->type];
35421@@ -253,7 +257,11 @@ int drm_vma_info(struct seq_file *m, void *data)
35422 vma->vm_flags & VM_MAYSHARE ? 's' : 'p',
35423 vma->vm_flags & VM_LOCKED ? 'l' : '-',
35424 vma->vm_flags & VM_IO ? 'i' : '-',
35425+#ifdef CONFIG_GRKERNSEC_HIDESYM
35426+ 0);
35427+#else
35428 vma->vm_pgoff);
35429+#endif
35430
35431 #if defined(__i386__)
35432 pgprot = pgprot_val(vma->vm_page_prot);
35433diff --git a/drivers/gpu/drm/drm_ioc32.c b/drivers/gpu/drm/drm_ioc32.c
35434index 2f4c434..dd12cd2 100644
35435--- a/drivers/gpu/drm/drm_ioc32.c
35436+++ b/drivers/gpu/drm/drm_ioc32.c
35437@@ -457,7 +457,7 @@ static int compat_drm_infobufs(struct file *file, unsigned int cmd,
35438 request = compat_alloc_user_space(nbytes);
35439 if (!access_ok(VERIFY_WRITE, request, nbytes))
35440 return -EFAULT;
35441- list = (struct drm_buf_desc *) (request + 1);
35442+ list = (struct drm_buf_desc __user *) (request + 1);
35443
35444 if (__put_user(count, &request->count)
35445 || __put_user(list, &request->list))
35446@@ -518,7 +518,7 @@ static int compat_drm_mapbufs(struct file *file, unsigned int cmd,
35447 request = compat_alloc_user_space(nbytes);
35448 if (!access_ok(VERIFY_WRITE, request, nbytes))
35449 return -EFAULT;
35450- list = (struct drm_buf_pub *) (request + 1);
35451+ list = (struct drm_buf_pub __user *) (request + 1);
35452
35453 if (__put_user(count, &request->count)
35454 || __put_user(list, &request->list))
35455@@ -1016,7 +1016,7 @@ static int compat_drm_wait_vblank(struct file *file, unsigned int cmd,
35456 return 0;
35457 }
35458
35459-drm_ioctl_compat_t *drm_compat_ioctls[] = {
35460+drm_ioctl_compat_t drm_compat_ioctls[] = {
35461 [DRM_IOCTL_NR(DRM_IOCTL_VERSION32)] = compat_drm_version,
35462 [DRM_IOCTL_NR(DRM_IOCTL_GET_UNIQUE32)] = compat_drm_getunique,
35463 [DRM_IOCTL_NR(DRM_IOCTL_GET_MAP32)] = compat_drm_getmap,
35464@@ -1062,7 +1062,6 @@ drm_ioctl_compat_t *drm_compat_ioctls[] = {
35465 long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35466 {
35467 unsigned int nr = DRM_IOCTL_NR(cmd);
35468- drm_ioctl_compat_t *fn;
35469 int ret;
35470
35471 /* Assume that ioctls without an explicit compat routine will just
35472@@ -1072,10 +1071,8 @@ long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35473 if (nr >= ARRAY_SIZE(drm_compat_ioctls))
35474 return drm_ioctl(filp, cmd, arg);
35475
35476- fn = drm_compat_ioctls[nr];
35477-
35478- if (fn != NULL)
35479- ret = (*fn) (filp, cmd, arg);
35480+ if (drm_compat_ioctls[nr] != NULL)
35481+ ret = (*drm_compat_ioctls[nr]) (filp, cmd, arg);
35482 else
35483 ret = drm_ioctl(filp, cmd, arg);
35484
35485diff --git a/drivers/gpu/drm/drm_ioctl.c b/drivers/gpu/drm/drm_ioctl.c
35486index e77bd8b..1571b85 100644
35487--- a/drivers/gpu/drm/drm_ioctl.c
35488+++ b/drivers/gpu/drm/drm_ioctl.c
35489@@ -252,7 +252,7 @@ int drm_getstats(struct drm_device *dev, void *data,
35490 stats->data[i].value =
35491 (file_priv->master->lock.hw_lock ? file_priv->master->lock.hw_lock->lock : 0);
35492 else
35493- stats->data[i].value = atomic_read(&dev->counts[i]);
35494+ stats->data[i].value = atomic_read_unchecked(&dev->counts[i]);
35495 stats->data[i].type = dev->types[i];
35496 }
35497
35498diff --git a/drivers/gpu/drm/drm_lock.c b/drivers/gpu/drm/drm_lock.c
35499index d752c96..fe08455 100644
35500--- a/drivers/gpu/drm/drm_lock.c
35501+++ b/drivers/gpu/drm/drm_lock.c
35502@@ -86,7 +86,7 @@ int drm_lock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35503 if (drm_lock_take(&master->lock, lock->context)) {
35504 master->lock.file_priv = file_priv;
35505 master->lock.lock_time = jiffies;
35506- atomic_inc(&dev->counts[_DRM_STAT_LOCKS]);
35507+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_LOCKS]);
35508 break; /* Got lock */
35509 }
35510
35511@@ -157,7 +157,7 @@ int drm_unlock(struct drm_device *dev, void *data, struct drm_file *file_priv)
35512 return -EINVAL;
35513 }
35514
35515- atomic_inc(&dev->counts[_DRM_STAT_UNLOCKS]);
35516+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_UNLOCKS]);
35517
35518 if (drm_lock_free(&master->lock, lock->context)) {
35519 /* FIXME: Should really bail out here. */
35520diff --git a/drivers/gpu/drm/drm_stub.c b/drivers/gpu/drm/drm_stub.c
35521index 16f3ec5..b28f9ca 100644
35522--- a/drivers/gpu/drm/drm_stub.c
35523+++ b/drivers/gpu/drm/drm_stub.c
35524@@ -501,7 +501,7 @@ void drm_unplug_dev(struct drm_device *dev)
35525
35526 drm_device_set_unplugged(dev);
35527
35528- if (dev->open_count == 0) {
35529+ if (local_read(&dev->open_count) == 0) {
35530 drm_put_dev(dev);
35531 }
35532 mutex_unlock(&drm_global_mutex);
35533diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c
35534index 0229665..f61329c 100644
35535--- a/drivers/gpu/drm/drm_sysfs.c
35536+++ b/drivers/gpu/drm/drm_sysfs.c
35537@@ -499,7 +499,7 @@ EXPORT_SYMBOL(drm_sysfs_hotplug_event);
35538 int drm_sysfs_device_add(struct drm_minor *minor)
35539 {
35540 int err;
35541- char *minor_str;
35542+ const char *minor_str;
35543
35544 minor->kdev.parent = minor->dev->dev;
35545
35546diff --git a/drivers/gpu/drm/i810/i810_dma.c b/drivers/gpu/drm/i810/i810_dma.c
35547index 004ecdf..db1f6e0 100644
35548--- a/drivers/gpu/drm/i810/i810_dma.c
35549+++ b/drivers/gpu/drm/i810/i810_dma.c
35550@@ -945,8 +945,8 @@ static int i810_dma_vertex(struct drm_device *dev, void *data,
35551 dma->buflist[vertex->idx],
35552 vertex->discard, vertex->used);
35553
35554- atomic_add(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35555- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35556+ atomic_add_unchecked(vertex->used, &dev->counts[_DRM_STAT_SECONDARY]);
35557+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35558 sarea_priv->last_enqueue = dev_priv->counter - 1;
35559 sarea_priv->last_dispatch = (int)hw_status[5];
35560
35561@@ -1106,8 +1106,8 @@ static int i810_dma_mc(struct drm_device *dev, void *data,
35562 i810_dma_dispatch_mc(dev, dma->buflist[mc->idx], mc->used,
35563 mc->last_render);
35564
35565- atomic_add(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35566- atomic_inc(&dev->counts[_DRM_STAT_DMA]);
35567+ atomic_add_unchecked(mc->used, &dev->counts[_DRM_STAT_SECONDARY]);
35568+ atomic_inc_unchecked(&dev->counts[_DRM_STAT_DMA]);
35569 sarea_priv->last_enqueue = dev_priv->counter - 1;
35570 sarea_priv->last_dispatch = (int)hw_status[5];
35571
35572diff --git a/drivers/gpu/drm/i810/i810_drv.h b/drivers/gpu/drm/i810/i810_drv.h
35573index 6e0acad..93c8289 100644
35574--- a/drivers/gpu/drm/i810/i810_drv.h
35575+++ b/drivers/gpu/drm/i810/i810_drv.h
35576@@ -108,8 +108,8 @@ typedef struct drm_i810_private {
35577 int page_flipping;
35578
35579 wait_queue_head_t irq_queue;
35580- atomic_t irq_received;
35581- atomic_t irq_emitted;
35582+ atomic_unchecked_t irq_received;
35583+ atomic_unchecked_t irq_emitted;
35584
35585 int front_offset;
35586 } drm_i810_private_t;
35587diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c
35588index e913d32..4d9b351 100644
35589--- a/drivers/gpu/drm/i915/i915_debugfs.c
35590+++ b/drivers/gpu/drm/i915/i915_debugfs.c
35591@@ -499,7 +499,7 @@ static int i915_interrupt_info(struct seq_file *m, void *data)
35592 I915_READ(GTIMR));
35593 }
35594 seq_printf(m, "Interrupts received: %d\n",
35595- atomic_read(&dev_priv->irq_received));
35596+ atomic_read_unchecked(&dev_priv->irq_received));
35597 for_each_ring(ring, dev_priv, i) {
35598 if (IS_GEN6(dev) || IS_GEN7(dev)) {
35599 seq_printf(m,
35600diff --git a/drivers/gpu/drm/i915/i915_dma.c b/drivers/gpu/drm/i915/i915_dma.c
35601index 3b315ba..aac280f 100644
35602--- a/drivers/gpu/drm/i915/i915_dma.c
35603+++ b/drivers/gpu/drm/i915/i915_dma.c
35604@@ -1259,7 +1259,7 @@ static bool i915_switcheroo_can_switch(struct pci_dev *pdev)
35605 bool can_switch;
35606
35607 spin_lock(&dev->count_lock);
35608- can_switch = (dev->open_count == 0);
35609+ can_switch = (local_read(&dev->open_count) == 0);
35610 spin_unlock(&dev->count_lock);
35611 return can_switch;
35612 }
35613diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h
35614index 9669a0b..bb65176 100644
35615--- a/drivers/gpu/drm/i915/i915_drv.h
35616+++ b/drivers/gpu/drm/i915/i915_drv.h
35617@@ -915,7 +915,7 @@ typedef struct drm_i915_private {
35618 drm_dma_handle_t *status_page_dmah;
35619 struct resource mch_res;
35620
35621- atomic_t irq_received;
35622+ atomic_unchecked_t irq_received;
35623
35624 /* protects the irq masks */
35625 spinlock_t irq_lock;
35626@@ -1811,7 +1811,7 @@ extern struct i2c_adapter *intel_gmbus_get_adapter(
35627 struct drm_i915_private *dev_priv, unsigned port);
35628 extern void intel_gmbus_set_speed(struct i2c_adapter *adapter, int speed);
35629 extern void intel_gmbus_force_bit(struct i2c_adapter *adapter, bool force_bit);
35630-extern inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35631+static inline bool intel_gmbus_is_forced_bit(struct i2c_adapter *adapter)
35632 {
35633 return container_of(adapter, struct intel_gmbus, adapter)->force_bit;
35634 }
35635diff --git a/drivers/gpu/drm/i915/i915_gem_execbuffer.c b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35636index 117ce38..eefd237 100644
35637--- a/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35638+++ b/drivers/gpu/drm/i915/i915_gem_execbuffer.c
35639@@ -727,9 +727,9 @@ i915_gem_check_execbuffer(struct drm_i915_gem_execbuffer2 *exec)
35640
35641 static int
35642 validate_exec_list(struct drm_i915_gem_exec_object2 *exec,
35643- int count)
35644+ unsigned int count)
35645 {
35646- int i;
35647+ unsigned int i;
35648 int relocs_total = 0;
35649 int relocs_max = INT_MAX / sizeof(struct drm_i915_gem_relocation_entry);
35650
35651diff --git a/drivers/gpu/drm/i915/i915_ioc32.c b/drivers/gpu/drm/i915/i915_ioc32.c
35652index 3c59584..500f2e9 100644
35653--- a/drivers/gpu/drm/i915/i915_ioc32.c
35654+++ b/drivers/gpu/drm/i915/i915_ioc32.c
35655@@ -181,7 +181,7 @@ static int compat_i915_alloc(struct file *file, unsigned int cmd,
35656 (unsigned long)request);
35657 }
35658
35659-static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35660+static drm_ioctl_compat_t i915_compat_ioctls[] = {
35661 [DRM_I915_BATCHBUFFER] = compat_i915_batchbuffer,
35662 [DRM_I915_CMDBUFFER] = compat_i915_cmdbuffer,
35663 [DRM_I915_GETPARAM] = compat_i915_getparam,
35664@@ -202,18 +202,15 @@ static drm_ioctl_compat_t *i915_compat_ioctls[] = {
35665 long i915_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35666 {
35667 unsigned int nr = DRM_IOCTL_NR(cmd);
35668- drm_ioctl_compat_t *fn = NULL;
35669 int ret;
35670
35671 if (nr < DRM_COMMAND_BASE)
35672 return drm_compat_ioctl(filp, cmd, arg);
35673
35674- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls))
35675- fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35676-
35677- if (fn != NULL)
35678+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(i915_compat_ioctls)) {
35679+ drm_ioctl_compat_t fn = i915_compat_ioctls[nr - DRM_COMMAND_BASE];
35680 ret = (*fn) (filp, cmd, arg);
35681- else
35682+ } else
35683 ret = drm_ioctl(filp, cmd, arg);
35684
35685 return ret;
35686diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c
35687index e5e32869..1678f36 100644
35688--- a/drivers/gpu/drm/i915/i915_irq.c
35689+++ b/drivers/gpu/drm/i915/i915_irq.c
35690@@ -670,7 +670,7 @@ static irqreturn_t valleyview_irq_handler(int irq, void *arg)
35691 int pipe;
35692 u32 pipe_stats[I915_MAX_PIPES];
35693
35694- atomic_inc(&dev_priv->irq_received);
35695+ atomic_inc_unchecked(&dev_priv->irq_received);
35696
35697 while (true) {
35698 iir = I915_READ(VLV_IIR);
35699@@ -835,7 +835,7 @@ static irqreturn_t ivybridge_irq_handler(int irq, void *arg)
35700 irqreturn_t ret = IRQ_NONE;
35701 int i;
35702
35703- atomic_inc(&dev_priv->irq_received);
35704+ atomic_inc_unchecked(&dev_priv->irq_received);
35705
35706 /* disable master interrupt before clearing iir */
35707 de_ier = I915_READ(DEIER);
35708@@ -925,7 +925,7 @@ static irqreturn_t ironlake_irq_handler(int irq, void *arg)
35709 int ret = IRQ_NONE;
35710 u32 de_iir, gt_iir, de_ier, pm_iir, sde_ier;
35711
35712- atomic_inc(&dev_priv->irq_received);
35713+ atomic_inc_unchecked(&dev_priv->irq_received);
35714
35715 /* disable master interrupt before clearing iir */
35716 de_ier = I915_READ(DEIER);
35717@@ -2089,7 +2089,7 @@ static void ironlake_irq_preinstall(struct drm_device *dev)
35718 {
35719 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35720
35721- atomic_set(&dev_priv->irq_received, 0);
35722+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35723
35724 I915_WRITE(HWSTAM, 0xeffe);
35725
35726@@ -2124,7 +2124,7 @@ static void valleyview_irq_preinstall(struct drm_device *dev)
35727 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35728 int pipe;
35729
35730- atomic_set(&dev_priv->irq_received, 0);
35731+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35732
35733 /* VLV magic */
35734 I915_WRITE(VLV_IMR, 0);
35735@@ -2411,7 +2411,7 @@ static void i8xx_irq_preinstall(struct drm_device * dev)
35736 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35737 int pipe;
35738
35739- atomic_set(&dev_priv->irq_received, 0);
35740+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35741
35742 for_each_pipe(pipe)
35743 I915_WRITE(PIPESTAT(pipe), 0);
35744@@ -2490,7 +2490,7 @@ static irqreturn_t i8xx_irq_handler(int irq, void *arg)
35745 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35746 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35747
35748- atomic_inc(&dev_priv->irq_received);
35749+ atomic_inc_unchecked(&dev_priv->irq_received);
35750
35751 iir = I915_READ16(IIR);
35752 if (iir == 0)
35753@@ -2565,7 +2565,7 @@ static void i915_irq_preinstall(struct drm_device * dev)
35754 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35755 int pipe;
35756
35757- atomic_set(&dev_priv->irq_received, 0);
35758+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35759
35760 if (I915_HAS_HOTPLUG(dev)) {
35761 I915_WRITE(PORT_HOTPLUG_EN, 0);
35762@@ -2664,7 +2664,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg)
35763 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35764 int pipe, ret = IRQ_NONE;
35765
35766- atomic_inc(&dev_priv->irq_received);
35767+ atomic_inc_unchecked(&dev_priv->irq_received);
35768
35769 iir = I915_READ(IIR);
35770 do {
35771@@ -2791,7 +2791,7 @@ static void i965_irq_preinstall(struct drm_device * dev)
35772 drm_i915_private_t *dev_priv = (drm_i915_private_t *) dev->dev_private;
35773 int pipe;
35774
35775- atomic_set(&dev_priv->irq_received, 0);
35776+ atomic_set_unchecked(&dev_priv->irq_received, 0);
35777
35778 I915_WRITE(PORT_HOTPLUG_EN, 0);
35779 I915_WRITE(PORT_HOTPLUG_STAT, I915_READ(PORT_HOTPLUG_STAT));
35780@@ -2898,7 +2898,7 @@ static irqreturn_t i965_irq_handler(int irq, void *arg)
35781 I915_DISPLAY_PLANE_A_FLIP_PENDING_INTERRUPT |
35782 I915_DISPLAY_PLANE_B_FLIP_PENDING_INTERRUPT;
35783
35784- atomic_inc(&dev_priv->irq_received);
35785+ atomic_inc_unchecked(&dev_priv->irq_received);
35786
35787 iir = I915_READ(IIR);
35788
35789diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
35790index 56746dc..b5a214f 100644
35791--- a/drivers/gpu/drm/i915/intel_display.c
35792+++ b/drivers/gpu/drm/i915/intel_display.c
35793@@ -8919,13 +8919,13 @@ struct intel_quirk {
35794 int subsystem_vendor;
35795 int subsystem_device;
35796 void (*hook)(struct drm_device *dev);
35797-};
35798+} __do_const;
35799
35800 /* For systems that don't have a meaningful PCI subdevice/subvendor ID */
35801 struct intel_dmi_quirk {
35802 void (*hook)(struct drm_device *dev);
35803 const struct dmi_system_id (*dmi_id_list)[];
35804-};
35805+} __do_const;
35806
35807 static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35808 {
35809@@ -8933,18 +8933,20 @@ static int intel_dmi_reverse_brightness(const struct dmi_system_id *id)
35810 return 1;
35811 }
35812
35813-static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35814+static const struct dmi_system_id intel_dmi_quirks_table[] = {
35815 {
35816- .dmi_id_list = &(const struct dmi_system_id[]) {
35817- {
35818- .callback = intel_dmi_reverse_brightness,
35819- .ident = "NCR Corporation",
35820- .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35821- DMI_MATCH(DMI_PRODUCT_NAME, ""),
35822- },
35823- },
35824- { } /* terminating entry */
35825+ .callback = intel_dmi_reverse_brightness,
35826+ .ident = "NCR Corporation",
35827+ .matches = {DMI_MATCH(DMI_SYS_VENDOR, "NCR Corporation"),
35828+ DMI_MATCH(DMI_PRODUCT_NAME, ""),
35829 },
35830+ },
35831+ { } /* terminating entry */
35832+};
35833+
35834+static const struct intel_dmi_quirk intel_dmi_quirks[] = {
35835+ {
35836+ .dmi_id_list = &intel_dmi_quirks_table,
35837 .hook = quirk_invert_brightness,
35838 },
35839 };
35840diff --git a/drivers/gpu/drm/mga/mga_drv.h b/drivers/gpu/drm/mga/mga_drv.h
35841index 54558a0..2d97005 100644
35842--- a/drivers/gpu/drm/mga/mga_drv.h
35843+++ b/drivers/gpu/drm/mga/mga_drv.h
35844@@ -120,9 +120,9 @@ typedef struct drm_mga_private {
35845 u32 clear_cmd;
35846 u32 maccess;
35847
35848- atomic_t vbl_received; /**< Number of vblanks received. */
35849+ atomic_unchecked_t vbl_received; /**< Number of vblanks received. */
35850 wait_queue_head_t fence_queue;
35851- atomic_t last_fence_retired;
35852+ atomic_unchecked_t last_fence_retired;
35853 u32 next_fence_to_post;
35854
35855 unsigned int fb_cpp;
35856diff --git a/drivers/gpu/drm/mga/mga_ioc32.c b/drivers/gpu/drm/mga/mga_ioc32.c
35857index 709e90d..89a1c0d 100644
35858--- a/drivers/gpu/drm/mga/mga_ioc32.c
35859+++ b/drivers/gpu/drm/mga/mga_ioc32.c
35860@@ -189,7 +189,7 @@ static int compat_mga_dma_bootstrap(struct file *file, unsigned int cmd,
35861 return 0;
35862 }
35863
35864-drm_ioctl_compat_t *mga_compat_ioctls[] = {
35865+drm_ioctl_compat_t mga_compat_ioctls[] = {
35866 [DRM_MGA_INIT] = compat_mga_init,
35867 [DRM_MGA_GETPARAM] = compat_mga_getparam,
35868 [DRM_MGA_DMA_BOOTSTRAP] = compat_mga_dma_bootstrap,
35869@@ -207,18 +207,15 @@ drm_ioctl_compat_t *mga_compat_ioctls[] = {
35870 long mga_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
35871 {
35872 unsigned int nr = DRM_IOCTL_NR(cmd);
35873- drm_ioctl_compat_t *fn = NULL;
35874 int ret;
35875
35876 if (nr < DRM_COMMAND_BASE)
35877 return drm_compat_ioctl(filp, cmd, arg);
35878
35879- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls))
35880- fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35881-
35882- if (fn != NULL)
35883+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(mga_compat_ioctls)) {
35884+ drm_ioctl_compat_t fn = mga_compat_ioctls[nr - DRM_COMMAND_BASE];
35885 ret = (*fn) (filp, cmd, arg);
35886- else
35887+ } else
35888 ret = drm_ioctl(filp, cmd, arg);
35889
35890 return ret;
35891diff --git a/drivers/gpu/drm/mga/mga_irq.c b/drivers/gpu/drm/mga/mga_irq.c
35892index 598c281..60d590e 100644
35893--- a/drivers/gpu/drm/mga/mga_irq.c
35894+++ b/drivers/gpu/drm/mga/mga_irq.c
35895@@ -43,7 +43,7 @@ u32 mga_get_vblank_counter(struct drm_device *dev, int crtc)
35896 if (crtc != 0)
35897 return 0;
35898
35899- return atomic_read(&dev_priv->vbl_received);
35900+ return atomic_read_unchecked(&dev_priv->vbl_received);
35901 }
35902
35903
35904@@ -59,7 +59,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35905 /* VBLANK interrupt */
35906 if (status & MGA_VLINEPEN) {
35907 MGA_WRITE(MGA_ICLEAR, MGA_VLINEICLR);
35908- atomic_inc(&dev_priv->vbl_received);
35909+ atomic_inc_unchecked(&dev_priv->vbl_received);
35910 drm_handle_vblank(dev, 0);
35911 handled = 1;
35912 }
35913@@ -78,7 +78,7 @@ irqreturn_t mga_driver_irq_handler(DRM_IRQ_ARGS)
35914 if ((prim_start & ~0x03) != (prim_end & ~0x03))
35915 MGA_WRITE(MGA_PRIMEND, prim_end);
35916
35917- atomic_inc(&dev_priv->last_fence_retired);
35918+ atomic_inc_unchecked(&dev_priv->last_fence_retired);
35919 DRM_WAKEUP(&dev_priv->fence_queue);
35920 handled = 1;
35921 }
35922@@ -129,7 +129,7 @@ int mga_driver_fence_wait(struct drm_device *dev, unsigned int *sequence)
35923 * using fences.
35924 */
35925 DRM_WAIT_ON(ret, dev_priv->fence_queue, 3 * DRM_HZ,
35926- (((cur_fence = atomic_read(&dev_priv->last_fence_retired))
35927+ (((cur_fence = atomic_read_unchecked(&dev_priv->last_fence_retired))
35928 - *sequence) <= (1 << 23)));
35929
35930 *sequence = cur_fence;
35931diff --git a/drivers/gpu/drm/nouveau/nouveau_bios.c b/drivers/gpu/drm/nouveau/nouveau_bios.c
35932index 6aa2137..fe8dc55 100644
35933--- a/drivers/gpu/drm/nouveau/nouveau_bios.c
35934+++ b/drivers/gpu/drm/nouveau/nouveau_bios.c
35935@@ -965,7 +965,7 @@ static int parse_bit_tmds_tbl_entry(struct drm_device *dev, struct nvbios *bios,
35936 struct bit_table {
35937 const char id;
35938 int (* const parse_fn)(struct drm_device *, struct nvbios *, struct bit_entry *);
35939-};
35940+} __no_const;
35941
35942 #define BIT_TABLE(id, funcid) ((struct bit_table){ id, parse_bit_##funcid##_tbl_entry })
35943
35944diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.h b/drivers/gpu/drm/nouveau/nouveau_drm.h
35945index f2b30f8..d0f9a95 100644
35946--- a/drivers/gpu/drm/nouveau/nouveau_drm.h
35947+++ b/drivers/gpu/drm/nouveau/nouveau_drm.h
35948@@ -92,7 +92,7 @@ struct nouveau_drm {
35949 struct drm_global_reference mem_global_ref;
35950 struct ttm_bo_global_ref bo_global_ref;
35951 struct ttm_bo_device bdev;
35952- atomic_t validate_sequence;
35953+ atomic_unchecked_t validate_sequence;
35954 int (*move)(struct nouveau_channel *,
35955 struct ttm_buffer_object *,
35956 struct ttm_mem_reg *, struct ttm_mem_reg *);
35957diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
35958index b4b4d0c..b7edc15 100644
35959--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
35960+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
35961@@ -322,7 +322,7 @@ validate_init(struct nouveau_channel *chan, struct drm_file *file_priv,
35962 int ret, i;
35963 struct nouveau_bo *res_bo = NULL;
35964
35965- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35966+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35967 retry:
35968 if (++trycnt > 100000) {
35969 NV_ERROR(cli, "%s failed and gave up.\n", __func__);
35970@@ -359,7 +359,7 @@ retry:
35971 if (ret) {
35972 validate_fini(op, NULL);
35973 if (unlikely(ret == -EAGAIN)) {
35974- sequence = atomic_add_return(1, &drm->ttm.validate_sequence);
35975+ sequence = atomic_add_return_unchecked(1, &drm->ttm.validate_sequence);
35976 ret = ttm_bo_reserve_slowpath(&nvbo->bo, true,
35977 sequence);
35978 if (!ret)
35979diff --git a/drivers/gpu/drm/nouveau/nouveau_ioc32.c b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35980index 08214bc..9208577 100644
35981--- a/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35982+++ b/drivers/gpu/drm/nouveau/nouveau_ioc32.c
35983@@ -50,7 +50,7 @@ long nouveau_compat_ioctl(struct file *filp, unsigned int cmd,
35984 unsigned long arg)
35985 {
35986 unsigned int nr = DRM_IOCTL_NR(cmd);
35987- drm_ioctl_compat_t *fn = NULL;
35988+ drm_ioctl_compat_t fn = NULL;
35989 int ret;
35990
35991 if (nr < DRM_COMMAND_BASE)
35992diff --git a/drivers/gpu/drm/nouveau/nouveau_vga.c b/drivers/gpu/drm/nouveau/nouveau_vga.c
35993index 25d3495..d81aaf6 100644
35994--- a/drivers/gpu/drm/nouveau/nouveau_vga.c
35995+++ b/drivers/gpu/drm/nouveau/nouveau_vga.c
35996@@ -62,7 +62,7 @@ nouveau_switcheroo_can_switch(struct pci_dev *pdev)
35997 bool can_switch;
35998
35999 spin_lock(&dev->count_lock);
36000- can_switch = (dev->open_count == 0);
36001+ can_switch = (local_read(&dev->open_count) == 0);
36002 spin_unlock(&dev->count_lock);
36003 return can_switch;
36004 }
36005diff --git a/drivers/gpu/drm/qxl/qxl_ttm.c b/drivers/gpu/drm/qxl/qxl_ttm.c
36006index 489cb8c..0b8d0d3 100644
36007--- a/drivers/gpu/drm/qxl/qxl_ttm.c
36008+++ b/drivers/gpu/drm/qxl/qxl_ttm.c
36009@@ -103,7 +103,7 @@ static void qxl_ttm_global_fini(struct qxl_device *qdev)
36010 }
36011 }
36012
36013-static struct vm_operations_struct qxl_ttm_vm_ops;
36014+static vm_operations_struct_no_const qxl_ttm_vm_ops __read_only;
36015 static const struct vm_operations_struct *ttm_vm_ops;
36016
36017 static int qxl_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36018@@ -147,8 +147,10 @@ int qxl_mmap(struct file *filp, struct vm_area_struct *vma)
36019 return r;
36020 if (unlikely(ttm_vm_ops == NULL)) {
36021 ttm_vm_ops = vma->vm_ops;
36022+ pax_open_kernel();
36023 qxl_ttm_vm_ops = *ttm_vm_ops;
36024 qxl_ttm_vm_ops.fault = &qxl_ttm_fault;
36025+ pax_close_kernel();
36026 }
36027 vma->vm_ops = &qxl_ttm_vm_ops;
36028 return 0;
36029@@ -556,25 +558,23 @@ static int qxl_mm_dump_table(struct seq_file *m, void *data)
36030 static int qxl_ttm_debugfs_init(struct qxl_device *qdev)
36031 {
36032 #if defined(CONFIG_DEBUG_FS)
36033- static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES];
36034- static char qxl_mem_types_names[QXL_DEBUGFS_MEM_TYPES][32];
36035- unsigned i;
36036+ static struct drm_info_list qxl_mem_types_list[QXL_DEBUGFS_MEM_TYPES] = {
36037+ {
36038+ .name = "qxl_mem_mm",
36039+ .show = &qxl_mm_dump_table,
36040+ },
36041+ {
36042+ .name = "qxl_surf_mm",
36043+ .show = &qxl_mm_dump_table,
36044+ }
36045+ };
36046
36047- for (i = 0; i < QXL_DEBUGFS_MEM_TYPES; i++) {
36048- if (i == 0)
36049- sprintf(qxl_mem_types_names[i], "qxl_mem_mm");
36050- else
36051- sprintf(qxl_mem_types_names[i], "qxl_surf_mm");
36052- qxl_mem_types_list[i].name = qxl_mem_types_names[i];
36053- qxl_mem_types_list[i].show = &qxl_mm_dump_table;
36054- qxl_mem_types_list[i].driver_features = 0;
36055- if (i == 0)
36056- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
36057- else
36058- qxl_mem_types_list[i].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
36059+ pax_open_kernel();
36060+ *(void **)&qxl_mem_types_list[0].data = qdev->mman.bdev.man[TTM_PL_VRAM].priv;
36061+ *(void **)&qxl_mem_types_list[1].data = qdev->mman.bdev.man[TTM_PL_PRIV0].priv;
36062+ pax_close_kernel();
36063
36064- }
36065- return qxl_debugfs_add_files(qdev, qxl_mem_types_list, i);
36066+ return qxl_debugfs_add_files(qdev, qxl_mem_types_list, QXL_DEBUGFS_MEM_TYPES);
36067 #else
36068 return 0;
36069 #endif
36070diff --git a/drivers/gpu/drm/r128/r128_cce.c b/drivers/gpu/drm/r128/r128_cce.c
36071index d4660cf..70dbe65 100644
36072--- a/drivers/gpu/drm/r128/r128_cce.c
36073+++ b/drivers/gpu/drm/r128/r128_cce.c
36074@@ -377,7 +377,7 @@ static int r128_do_init_cce(struct drm_device *dev, drm_r128_init_t *init)
36075
36076 /* GH: Simple idle check.
36077 */
36078- atomic_set(&dev_priv->idle_count, 0);
36079+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36080
36081 /* We don't support anything other than bus-mastering ring mode,
36082 * but the ring can be in either AGP or PCI space for the ring
36083diff --git a/drivers/gpu/drm/r128/r128_drv.h b/drivers/gpu/drm/r128/r128_drv.h
36084index 930c71b..499aded 100644
36085--- a/drivers/gpu/drm/r128/r128_drv.h
36086+++ b/drivers/gpu/drm/r128/r128_drv.h
36087@@ -90,14 +90,14 @@ typedef struct drm_r128_private {
36088 int is_pci;
36089 unsigned long cce_buffers_offset;
36090
36091- atomic_t idle_count;
36092+ atomic_unchecked_t idle_count;
36093
36094 int page_flipping;
36095 int current_page;
36096 u32 crtc_offset;
36097 u32 crtc_offset_cntl;
36098
36099- atomic_t vbl_received;
36100+ atomic_unchecked_t vbl_received;
36101
36102 u32 color_fmt;
36103 unsigned int front_offset;
36104diff --git a/drivers/gpu/drm/r128/r128_ioc32.c b/drivers/gpu/drm/r128/r128_ioc32.c
36105index a954c54..9cc595c 100644
36106--- a/drivers/gpu/drm/r128/r128_ioc32.c
36107+++ b/drivers/gpu/drm/r128/r128_ioc32.c
36108@@ -177,7 +177,7 @@ static int compat_r128_getparam(struct file *file, unsigned int cmd,
36109 return drm_ioctl(file, DRM_IOCTL_R128_GETPARAM, (unsigned long)getparam);
36110 }
36111
36112-drm_ioctl_compat_t *r128_compat_ioctls[] = {
36113+drm_ioctl_compat_t r128_compat_ioctls[] = {
36114 [DRM_R128_INIT] = compat_r128_init,
36115 [DRM_R128_DEPTH] = compat_r128_depth,
36116 [DRM_R128_STIPPLE] = compat_r128_stipple,
36117@@ -196,18 +196,15 @@ drm_ioctl_compat_t *r128_compat_ioctls[] = {
36118 long r128_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36119 {
36120 unsigned int nr = DRM_IOCTL_NR(cmd);
36121- drm_ioctl_compat_t *fn = NULL;
36122 int ret;
36123
36124 if (nr < DRM_COMMAND_BASE)
36125 return drm_compat_ioctl(filp, cmd, arg);
36126
36127- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls))
36128- fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
36129-
36130- if (fn != NULL)
36131+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(r128_compat_ioctls)) {
36132+ drm_ioctl_compat_t fn = r128_compat_ioctls[nr - DRM_COMMAND_BASE];
36133 ret = (*fn) (filp, cmd, arg);
36134- else
36135+ } else
36136 ret = drm_ioctl(filp, cmd, arg);
36137
36138 return ret;
36139diff --git a/drivers/gpu/drm/r128/r128_irq.c b/drivers/gpu/drm/r128/r128_irq.c
36140index 2ea4f09..d391371 100644
36141--- a/drivers/gpu/drm/r128/r128_irq.c
36142+++ b/drivers/gpu/drm/r128/r128_irq.c
36143@@ -41,7 +41,7 @@ u32 r128_get_vblank_counter(struct drm_device *dev, int crtc)
36144 if (crtc != 0)
36145 return 0;
36146
36147- return atomic_read(&dev_priv->vbl_received);
36148+ return atomic_read_unchecked(&dev_priv->vbl_received);
36149 }
36150
36151 irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36152@@ -55,7 +55,7 @@ irqreturn_t r128_driver_irq_handler(DRM_IRQ_ARGS)
36153 /* VBLANK interrupt */
36154 if (status & R128_CRTC_VBLANK_INT) {
36155 R128_WRITE(R128_GEN_INT_STATUS, R128_CRTC_VBLANK_INT_AK);
36156- atomic_inc(&dev_priv->vbl_received);
36157+ atomic_inc_unchecked(&dev_priv->vbl_received);
36158 drm_handle_vblank(dev, 0);
36159 return IRQ_HANDLED;
36160 }
36161diff --git a/drivers/gpu/drm/r128/r128_state.c b/drivers/gpu/drm/r128/r128_state.c
36162index 19bb7e6..de7e2a2 100644
36163--- a/drivers/gpu/drm/r128/r128_state.c
36164+++ b/drivers/gpu/drm/r128/r128_state.c
36165@@ -320,10 +320,10 @@ static void r128_clear_box(drm_r128_private_t *dev_priv,
36166
36167 static void r128_cce_performance_boxes(drm_r128_private_t *dev_priv)
36168 {
36169- if (atomic_read(&dev_priv->idle_count) == 0)
36170+ if (atomic_read_unchecked(&dev_priv->idle_count) == 0)
36171 r128_clear_box(dev_priv, 64, 4, 8, 8, 0, 255, 0);
36172 else
36173- atomic_set(&dev_priv->idle_count, 0);
36174+ atomic_set_unchecked(&dev_priv->idle_count, 0);
36175 }
36176
36177 #endif
36178diff --git a/drivers/gpu/drm/radeon/mkregtable.c b/drivers/gpu/drm/radeon/mkregtable.c
36179index 5a82b6b..9e69c73 100644
36180--- a/drivers/gpu/drm/radeon/mkregtable.c
36181+++ b/drivers/gpu/drm/radeon/mkregtable.c
36182@@ -637,14 +637,14 @@ static int parser_auth(struct table *t, const char *filename)
36183 regex_t mask_rex;
36184 regmatch_t match[4];
36185 char buf[1024];
36186- size_t end;
36187+ long end;
36188 int len;
36189 int done = 0;
36190 int r;
36191 unsigned o;
36192 struct offset *offset;
36193 char last_reg_s[10];
36194- int last_reg;
36195+ unsigned long last_reg;
36196
36197 if (regcomp
36198 (&mask_rex, "(0x[0-9a-fA-F]*) *([_a-zA-Z0-9]*)", REG_EXTENDED)) {
36199diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
36200index b0dc0b6..a9bfe9c 100644
36201--- a/drivers/gpu/drm/radeon/radeon_device.c
36202+++ b/drivers/gpu/drm/radeon/radeon_device.c
36203@@ -1014,7 +1014,7 @@ static bool radeon_switcheroo_can_switch(struct pci_dev *pdev)
36204 bool can_switch;
36205
36206 spin_lock(&dev->count_lock);
36207- can_switch = (dev->open_count == 0);
36208+ can_switch = (local_read(&dev->open_count) == 0);
36209 spin_unlock(&dev->count_lock);
36210 return can_switch;
36211 }
36212diff --git a/drivers/gpu/drm/radeon/radeon_drv.h b/drivers/gpu/drm/radeon/radeon_drv.h
36213index b369d42..8dd04eb 100644
36214--- a/drivers/gpu/drm/radeon/radeon_drv.h
36215+++ b/drivers/gpu/drm/radeon/radeon_drv.h
36216@@ -258,7 +258,7 @@ typedef struct drm_radeon_private {
36217
36218 /* SW interrupt */
36219 wait_queue_head_t swi_queue;
36220- atomic_t swi_emitted;
36221+ atomic_unchecked_t swi_emitted;
36222 int vblank_crtc;
36223 uint32_t irq_enable_reg;
36224 uint32_t r500_disp_irq_reg;
36225diff --git a/drivers/gpu/drm/radeon/radeon_ioc32.c b/drivers/gpu/drm/radeon/radeon_ioc32.c
36226index c180df8..5fd8186 100644
36227--- a/drivers/gpu/drm/radeon/radeon_ioc32.c
36228+++ b/drivers/gpu/drm/radeon/radeon_ioc32.c
36229@@ -358,7 +358,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36230 request = compat_alloc_user_space(sizeof(*request));
36231 if (!access_ok(VERIFY_WRITE, request, sizeof(*request))
36232 || __put_user(req32.param, &request->param)
36233- || __put_user((void __user *)(unsigned long)req32.value,
36234+ || __put_user((unsigned long)req32.value,
36235 &request->value))
36236 return -EFAULT;
36237
36238@@ -368,7 +368,7 @@ static int compat_radeon_cp_setparam(struct file *file, unsigned int cmd,
36239 #define compat_radeon_cp_setparam NULL
36240 #endif /* X86_64 || IA64 */
36241
36242-static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36243+static drm_ioctl_compat_t radeon_compat_ioctls[] = {
36244 [DRM_RADEON_CP_INIT] = compat_radeon_cp_init,
36245 [DRM_RADEON_CLEAR] = compat_radeon_cp_clear,
36246 [DRM_RADEON_STIPPLE] = compat_radeon_cp_stipple,
36247@@ -393,18 +393,15 @@ static drm_ioctl_compat_t *radeon_compat_ioctls[] = {
36248 long radeon_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
36249 {
36250 unsigned int nr = DRM_IOCTL_NR(cmd);
36251- drm_ioctl_compat_t *fn = NULL;
36252 int ret;
36253
36254 if (nr < DRM_COMMAND_BASE)
36255 return drm_compat_ioctl(filp, cmd, arg);
36256
36257- if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls))
36258- fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36259-
36260- if (fn != NULL)
36261+ if (nr < DRM_COMMAND_BASE + DRM_ARRAY_SIZE(radeon_compat_ioctls)) {
36262+ drm_ioctl_compat_t fn = radeon_compat_ioctls[nr - DRM_COMMAND_BASE];
36263 ret = (*fn) (filp, cmd, arg);
36264- else
36265+ } else
36266 ret = drm_ioctl(filp, cmd, arg);
36267
36268 return ret;
36269diff --git a/drivers/gpu/drm/radeon/radeon_irq.c b/drivers/gpu/drm/radeon/radeon_irq.c
36270index 8d68e97..9dcfed8 100644
36271--- a/drivers/gpu/drm/radeon/radeon_irq.c
36272+++ b/drivers/gpu/drm/radeon/radeon_irq.c
36273@@ -226,8 +226,8 @@ static int radeon_emit_irq(struct drm_device * dev)
36274 unsigned int ret;
36275 RING_LOCALS;
36276
36277- atomic_inc(&dev_priv->swi_emitted);
36278- ret = atomic_read(&dev_priv->swi_emitted);
36279+ atomic_inc_unchecked(&dev_priv->swi_emitted);
36280+ ret = atomic_read_unchecked(&dev_priv->swi_emitted);
36281
36282 BEGIN_RING(4);
36283 OUT_RING_REG(RADEON_LAST_SWI_REG, ret);
36284@@ -353,7 +353,7 @@ int radeon_driver_irq_postinstall(struct drm_device *dev)
36285 drm_radeon_private_t *dev_priv =
36286 (drm_radeon_private_t *) dev->dev_private;
36287
36288- atomic_set(&dev_priv->swi_emitted, 0);
36289+ atomic_set_unchecked(&dev_priv->swi_emitted, 0);
36290 DRM_INIT_WAITQUEUE(&dev_priv->swi_queue);
36291
36292 dev->max_vblank_count = 0x001fffff;
36293diff --git a/drivers/gpu/drm/radeon/radeon_state.c b/drivers/gpu/drm/radeon/radeon_state.c
36294index 4d20910..6726b6d 100644
36295--- a/drivers/gpu/drm/radeon/radeon_state.c
36296+++ b/drivers/gpu/drm/radeon/radeon_state.c
36297@@ -2168,7 +2168,7 @@ static int radeon_cp_clear(struct drm_device *dev, void *data, struct drm_file *
36298 if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS)
36299 sarea_priv->nbox = RADEON_NR_SAREA_CLIPRECTS;
36300
36301- if (DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36302+ if (sarea_priv->nbox > RADEON_NR_SAREA_CLIPRECTS || DRM_COPY_FROM_USER(&depth_boxes, clear->depth_boxes,
36303 sarea_priv->nbox * sizeof(depth_boxes[0])))
36304 return -EFAULT;
36305
36306@@ -3031,7 +3031,7 @@ static int radeon_cp_getparam(struct drm_device *dev, void *data, struct drm_fil
36307 {
36308 drm_radeon_private_t *dev_priv = dev->dev_private;
36309 drm_radeon_getparam_t *param = data;
36310- int value;
36311+ int value = 0;
36312
36313 DRM_DEBUG("pid=%d\n", DRM_CURRENTPID);
36314
36315diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
36316index 6c0ce89..57a2529 100644
36317--- a/drivers/gpu/drm/radeon/radeon_ttm.c
36318+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
36319@@ -782,7 +782,7 @@ void radeon_ttm_set_active_vram_size(struct radeon_device *rdev, u64 size)
36320 man->size = size >> PAGE_SHIFT;
36321 }
36322
36323-static struct vm_operations_struct radeon_ttm_vm_ops;
36324+static vm_operations_struct_no_const radeon_ttm_vm_ops __read_only;
36325 static const struct vm_operations_struct *ttm_vm_ops = NULL;
36326
36327 static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
36328@@ -823,8 +823,10 @@ int radeon_mmap(struct file *filp, struct vm_area_struct *vma)
36329 }
36330 if (unlikely(ttm_vm_ops == NULL)) {
36331 ttm_vm_ops = vma->vm_ops;
36332+ pax_open_kernel();
36333 radeon_ttm_vm_ops = *ttm_vm_ops;
36334 radeon_ttm_vm_ops.fault = &radeon_ttm_fault;
36335+ pax_close_kernel();
36336 }
36337 vma->vm_ops = &radeon_ttm_vm_ops;
36338 return 0;
36339@@ -853,38 +855,33 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
36340 static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
36341 {
36342 #if defined(CONFIG_DEBUG_FS)
36343- static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2];
36344- static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES+2][32];
36345+ static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES+2] = {
36346+ {
36347+ .name = "radeon_vram_mm",
36348+ .show = &radeon_mm_dump_table,
36349+ },
36350+ {
36351+ .name = "radeon_gtt_mm",
36352+ .show = &radeon_mm_dump_table,
36353+ },
36354+ {
36355+ .name = "ttm_page_pool",
36356+ .show = &ttm_page_alloc_debugfs,
36357+ },
36358+ {
36359+ .name = "ttm_dma_page_pool",
36360+ .show = &ttm_dma_page_alloc_debugfs,
36361+ },
36362+ };
36363 unsigned i;
36364
36365- for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
36366- if (i == 0)
36367- sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
36368- else
36369- sprintf(radeon_mem_types_names[i], "radeon_gtt_mm");
36370- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36371- radeon_mem_types_list[i].show = &radeon_mm_dump_table;
36372- radeon_mem_types_list[i].driver_features = 0;
36373- if (i == 0)
36374- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36375- else
36376- radeon_mem_types_list[i].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36377-
36378- }
36379- /* Add ttm page pool to debugfs */
36380- sprintf(radeon_mem_types_names[i], "ttm_page_pool");
36381- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36382- radeon_mem_types_list[i].show = &ttm_page_alloc_debugfs;
36383- radeon_mem_types_list[i].driver_features = 0;
36384- radeon_mem_types_list[i++].data = NULL;
36385+ pax_open_kernel();
36386+ *(void **)&radeon_mem_types_list[0].data = rdev->mman.bdev.man[TTM_PL_VRAM].priv;
36387+ *(void **)&radeon_mem_types_list[1].data = rdev->mman.bdev.man[TTM_PL_TT].priv;
36388+ pax_close_kernel();
36389 #ifdef CONFIG_SWIOTLB
36390- if (swiotlb_nr_tbl()) {
36391- sprintf(radeon_mem_types_names[i], "ttm_dma_page_pool");
36392- radeon_mem_types_list[i].name = radeon_mem_types_names[i];
36393- radeon_mem_types_list[i].show = &ttm_dma_page_alloc_debugfs;
36394- radeon_mem_types_list[i].driver_features = 0;
36395- radeon_mem_types_list[i++].data = NULL;
36396- }
36397+ if (swiotlb_nr_tbl())
36398+ i++;
36399 #endif
36400 return radeon_debugfs_add_files(rdev, radeon_mem_types_list, i);
36401
36402diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
36403index 55880d5..9e95342 100644
36404--- a/drivers/gpu/drm/radeon/rs690.c
36405+++ b/drivers/gpu/drm/radeon/rs690.c
36406@@ -327,9 +327,11 @@ static void rs690_crtc_bandwidth_compute(struct radeon_device *rdev,
36407 if (rdev->pm.max_bandwidth.full > rdev->pm.sideport_bandwidth.full &&
36408 rdev->pm.sideport_bandwidth.full)
36409 rdev->pm.max_bandwidth = rdev->pm.sideport_bandwidth;
36410- read_delay_latency.full = dfixed_const(370 * 800 * 1000);
36411+ read_delay_latency.full = dfixed_const(800 * 1000);
36412 read_delay_latency.full = dfixed_div(read_delay_latency,
36413 rdev->pm.igp_sideport_mclk);
36414+ a.full = dfixed_const(370);
36415+ read_delay_latency.full = dfixed_mul(read_delay_latency, a);
36416 } else {
36417 if (rdev->pm.max_bandwidth.full > rdev->pm.k8_bandwidth.full &&
36418 rdev->pm.k8_bandwidth.full)
36419diff --git a/drivers/gpu/drm/ttm/ttm_memory.c b/drivers/gpu/drm/ttm/ttm_memory.c
36420index dbc2def..0a9f710 100644
36421--- a/drivers/gpu/drm/ttm/ttm_memory.c
36422+++ b/drivers/gpu/drm/ttm/ttm_memory.c
36423@@ -264,7 +264,7 @@ static int ttm_mem_init_kernel_zone(struct ttm_mem_global *glob,
36424 zone->glob = glob;
36425 glob->zone_kernel = zone;
36426 ret = kobject_init_and_add(
36427- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
36428+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
36429 if (unlikely(ret != 0)) {
36430 kobject_put(&zone->kobj);
36431 return ret;
36432@@ -347,7 +347,7 @@ static int ttm_mem_init_dma32_zone(struct ttm_mem_global *glob,
36433 zone->glob = glob;
36434 glob->zone_dma32 = zone;
36435 ret = kobject_init_and_add(
36436- &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, zone->name);
36437+ &zone->kobj, &ttm_mem_zone_kobj_type, &glob->kobj, "%s", zone->name);
36438 if (unlikely(ret != 0)) {
36439 kobject_put(&zone->kobj);
36440 return ret;
36441diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36442index bd2a3b4..122d9ad 100644
36443--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
36444+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
36445@@ -394,9 +394,9 @@ static int ttm_pool_get_num_unused_pages(void)
36446 static int ttm_pool_mm_shrink(struct shrinker *shrink,
36447 struct shrink_control *sc)
36448 {
36449- static atomic_t start_pool = ATOMIC_INIT(0);
36450+ static atomic_unchecked_t start_pool = ATOMIC_INIT(0);
36451 unsigned i;
36452- unsigned pool_offset = atomic_add_return(1, &start_pool);
36453+ unsigned pool_offset = atomic_add_return_unchecked(1, &start_pool);
36454 struct ttm_page_pool *pool;
36455 int shrink_pages = sc->nr_to_scan;
36456
36457diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
36458index dc0c065..58a0782 100644
36459--- a/drivers/gpu/drm/udl/udl_fb.c
36460+++ b/drivers/gpu/drm/udl/udl_fb.c
36461@@ -367,7 +367,6 @@ static int udl_fb_release(struct fb_info *info, int user)
36462 fb_deferred_io_cleanup(info);
36463 kfree(info->fbdefio);
36464 info->fbdefio = NULL;
36465- info->fbops->fb_mmap = udl_fb_mmap;
36466 }
36467
36468 pr_warn("released /dev/fb%d user=%d count=%d\n",
36469diff --git a/drivers/gpu/drm/via/via_drv.h b/drivers/gpu/drm/via/via_drv.h
36470index 893a650..6190d3b 100644
36471--- a/drivers/gpu/drm/via/via_drv.h
36472+++ b/drivers/gpu/drm/via/via_drv.h
36473@@ -51,7 +51,7 @@ typedef struct drm_via_ring_buffer {
36474 typedef uint32_t maskarray_t[5];
36475
36476 typedef struct drm_via_irq {
36477- atomic_t irq_received;
36478+ atomic_unchecked_t irq_received;
36479 uint32_t pending_mask;
36480 uint32_t enable_mask;
36481 wait_queue_head_t irq_queue;
36482@@ -75,7 +75,7 @@ typedef struct drm_via_private {
36483 struct timeval last_vblank;
36484 int last_vblank_valid;
36485 unsigned usec_per_vblank;
36486- atomic_t vbl_received;
36487+ atomic_unchecked_t vbl_received;
36488 drm_via_state_t hc_state;
36489 char pci_buf[VIA_PCI_BUF_SIZE];
36490 const uint32_t *fire_offsets[VIA_FIRE_BUF_SIZE];
36491diff --git a/drivers/gpu/drm/via/via_irq.c b/drivers/gpu/drm/via/via_irq.c
36492index ac98964..5dbf512 100644
36493--- a/drivers/gpu/drm/via/via_irq.c
36494+++ b/drivers/gpu/drm/via/via_irq.c
36495@@ -101,7 +101,7 @@ u32 via_get_vblank_counter(struct drm_device *dev, int crtc)
36496 if (crtc != 0)
36497 return 0;
36498
36499- return atomic_read(&dev_priv->vbl_received);
36500+ return atomic_read_unchecked(&dev_priv->vbl_received);
36501 }
36502
36503 irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36504@@ -116,8 +116,8 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36505
36506 status = VIA_READ(VIA_REG_INTERRUPT);
36507 if (status & VIA_IRQ_VBLANK_PENDING) {
36508- atomic_inc(&dev_priv->vbl_received);
36509- if (!(atomic_read(&dev_priv->vbl_received) & 0x0F)) {
36510+ atomic_inc_unchecked(&dev_priv->vbl_received);
36511+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0x0F)) {
36512 do_gettimeofday(&cur_vblank);
36513 if (dev_priv->last_vblank_valid) {
36514 dev_priv->usec_per_vblank =
36515@@ -127,7 +127,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36516 dev_priv->last_vblank = cur_vblank;
36517 dev_priv->last_vblank_valid = 1;
36518 }
36519- if (!(atomic_read(&dev_priv->vbl_received) & 0xFF)) {
36520+ if (!(atomic_read_unchecked(&dev_priv->vbl_received) & 0xFF)) {
36521 DRM_DEBUG("US per vblank is: %u\n",
36522 dev_priv->usec_per_vblank);
36523 }
36524@@ -137,7 +137,7 @@ irqreturn_t via_driver_irq_handler(DRM_IRQ_ARGS)
36525
36526 for (i = 0; i < dev_priv->num_irqs; ++i) {
36527 if (status & cur_irq->pending_mask) {
36528- atomic_inc(&cur_irq->irq_received);
36529+ atomic_inc_unchecked(&cur_irq->irq_received);
36530 DRM_WAKEUP(&cur_irq->irq_queue);
36531 handled = 1;
36532 if (dev_priv->irq_map[drm_via_irq_dma0_td] == i)
36533@@ -242,11 +242,11 @@ via_driver_irq_wait(struct drm_device *dev, unsigned int irq, int force_sequence
36534 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36535 ((VIA_READ(masks[irq][2]) & masks[irq][3]) ==
36536 masks[irq][4]));
36537- cur_irq_sequence = atomic_read(&cur_irq->irq_received);
36538+ cur_irq_sequence = atomic_read_unchecked(&cur_irq->irq_received);
36539 } else {
36540 DRM_WAIT_ON(ret, cur_irq->irq_queue, 3 * DRM_HZ,
36541 (((cur_irq_sequence =
36542- atomic_read(&cur_irq->irq_received)) -
36543+ atomic_read_unchecked(&cur_irq->irq_received)) -
36544 *sequence) <= (1 << 23)));
36545 }
36546 *sequence = cur_irq_sequence;
36547@@ -284,7 +284,7 @@ void via_driver_irq_preinstall(struct drm_device *dev)
36548 }
36549
36550 for (i = 0; i < dev_priv->num_irqs; ++i) {
36551- atomic_set(&cur_irq->irq_received, 0);
36552+ atomic_set_unchecked(&cur_irq->irq_received, 0);
36553 cur_irq->enable_mask = dev_priv->irq_masks[i][0];
36554 cur_irq->pending_mask = dev_priv->irq_masks[i][1];
36555 DRM_INIT_WAITQUEUE(&cur_irq->irq_queue);
36556@@ -366,7 +366,7 @@ int via_wait_irq(struct drm_device *dev, void *data, struct drm_file *file_priv)
36557 switch (irqwait->request.type & ~VIA_IRQ_FLAGS_MASK) {
36558 case VIA_IRQ_RELATIVE:
36559 irqwait->request.sequence +=
36560- atomic_read(&cur_irq->irq_received);
36561+ atomic_read_unchecked(&cur_irq->irq_received);
36562 irqwait->request.type &= ~_DRM_VBLANK_RELATIVE;
36563 case VIA_IRQ_ABSOLUTE:
36564 break;
36565diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36566index 13aeda7..4a952d1 100644
36567--- a/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36568+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_drv.h
36569@@ -290,7 +290,7 @@ struct vmw_private {
36570 * Fencing and IRQs.
36571 */
36572
36573- atomic_t marker_seq;
36574+ atomic_unchecked_t marker_seq;
36575 wait_queue_head_t fence_queue;
36576 wait_queue_head_t fifo_queue;
36577 int fence_queue_waiters; /* Protected by hw_mutex */
36578diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36579index 3eb1486..0a47ee9 100644
36580--- a/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36581+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_fifo.c
36582@@ -137,7 +137,7 @@ int vmw_fifo_init(struct vmw_private *dev_priv, struct vmw_fifo_state *fifo)
36583 (unsigned int) min,
36584 (unsigned int) fifo->capabilities);
36585
36586- atomic_set(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36587+ atomic_set_unchecked(&dev_priv->marker_seq, dev_priv->last_read_seqno);
36588 iowrite32(dev_priv->last_read_seqno, fifo_mem + SVGA_FIFO_FENCE);
36589 vmw_marker_queue_init(&fifo->marker_queue);
36590 return vmw_fifo_send_fence(dev_priv, &dummy);
36591@@ -355,7 +355,7 @@ void *vmw_fifo_reserve(struct vmw_private *dev_priv, uint32_t bytes)
36592 if (reserveable)
36593 iowrite32(bytes, fifo_mem +
36594 SVGA_FIFO_RESERVED);
36595- return fifo_mem + (next_cmd >> 2);
36596+ return (__le32 __force_kernel *)fifo_mem + (next_cmd >> 2);
36597 } else {
36598 need_bounce = true;
36599 }
36600@@ -475,7 +475,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36601
36602 fm = vmw_fifo_reserve(dev_priv, bytes);
36603 if (unlikely(fm == NULL)) {
36604- *seqno = atomic_read(&dev_priv->marker_seq);
36605+ *seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36606 ret = -ENOMEM;
36607 (void)vmw_fallback_wait(dev_priv, false, true, *seqno,
36608 false, 3*HZ);
36609@@ -483,7 +483,7 @@ int vmw_fifo_send_fence(struct vmw_private *dev_priv, uint32_t *seqno)
36610 }
36611
36612 do {
36613- *seqno = atomic_add_return(1, &dev_priv->marker_seq);
36614+ *seqno = atomic_add_return_unchecked(1, &dev_priv->marker_seq);
36615 } while (*seqno == 0);
36616
36617 if (!(fifo_state->capabilities & SVGA_FIFO_CAP_FENCE)) {
36618diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36619index c509d40..3b640c3 100644
36620--- a/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36621+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_ioctl.c
36622@@ -138,7 +138,7 @@ int vmw_present_ioctl(struct drm_device *dev, void *data,
36623 int ret;
36624
36625 num_clips = arg->num_clips;
36626- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
36627+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
36628
36629 if (unlikely(num_clips == 0))
36630 return 0;
36631@@ -222,7 +222,7 @@ int vmw_present_readback_ioctl(struct drm_device *dev, void *data,
36632 int ret;
36633
36634 num_clips = arg->num_clips;
36635- clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr;
36636+ clips_ptr = (struct drm_vmw_rect __user *)(unsigned long)arg->clips_ptr;
36637
36638 if (unlikely(num_clips == 0))
36639 return 0;
36640diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36641index 4640adb..e1384ed 100644
36642--- a/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36643+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_irq.c
36644@@ -107,7 +107,7 @@ bool vmw_seqno_passed(struct vmw_private *dev_priv,
36645 * emitted. Then the fence is stale and signaled.
36646 */
36647
36648- ret = ((atomic_read(&dev_priv->marker_seq) - seqno)
36649+ ret = ((atomic_read_unchecked(&dev_priv->marker_seq) - seqno)
36650 > VMW_FENCE_WRAP);
36651
36652 return ret;
36653@@ -138,7 +138,7 @@ int vmw_fallback_wait(struct vmw_private *dev_priv,
36654
36655 if (fifo_idle)
36656 down_read(&fifo_state->rwsem);
36657- signal_seq = atomic_read(&dev_priv->marker_seq);
36658+ signal_seq = atomic_read_unchecked(&dev_priv->marker_seq);
36659 ret = 0;
36660
36661 for (;;) {
36662diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36663index 8a8725c2..afed796 100644
36664--- a/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36665+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_marker.c
36666@@ -151,7 +151,7 @@ int vmw_wait_lag(struct vmw_private *dev_priv,
36667 while (!vmw_lag_lt(queue, us)) {
36668 spin_lock(&queue->lock);
36669 if (list_empty(&queue->head))
36670- seqno = atomic_read(&dev_priv->marker_seq);
36671+ seqno = atomic_read_unchecked(&dev_priv->marker_seq);
36672 else {
36673 marker = list_first_entry(&queue->head,
36674 struct vmw_marker, head);
36675diff --git a/drivers/gpu/host1x/drm/dc.c b/drivers/gpu/host1x/drm/dc.c
36676index 8c04943..4370ed9 100644
36677--- a/drivers/gpu/host1x/drm/dc.c
36678+++ b/drivers/gpu/host1x/drm/dc.c
36679@@ -999,7 +999,7 @@ static int tegra_dc_debugfs_init(struct tegra_dc *dc, struct drm_minor *minor)
36680 }
36681
36682 for (i = 0; i < ARRAY_SIZE(debugfs_files); i++)
36683- dc->debugfs_files[i].data = dc;
36684+ *(void **)&dc->debugfs_files[i].data = dc;
36685
36686 err = drm_debugfs_create_files(dc->debugfs_files,
36687 ARRAY_SIZE(debugfs_files),
36688diff --git a/drivers/hid/hid-core.c b/drivers/hid/hid-core.c
36689index 402f486..f862d7e 100644
36690--- a/drivers/hid/hid-core.c
36691+++ b/drivers/hid/hid-core.c
36692@@ -2275,7 +2275,7 @@ EXPORT_SYMBOL_GPL(hid_ignore);
36693
36694 int hid_add_device(struct hid_device *hdev)
36695 {
36696- static atomic_t id = ATOMIC_INIT(0);
36697+ static atomic_unchecked_t id = ATOMIC_INIT(0);
36698 int ret;
36699
36700 if (WARN_ON(hdev->status & HID_STAT_ADDED))
36701@@ -2309,7 +2309,7 @@ int hid_add_device(struct hid_device *hdev)
36702 /* XXX hack, any other cleaner solution after the driver core
36703 * is converted to allow more than 20 bytes as the device name? */
36704 dev_set_name(&hdev->dev, "%04X:%04X:%04X.%04X", hdev->bus,
36705- hdev->vendor, hdev->product, atomic_inc_return(&id));
36706+ hdev->vendor, hdev->product, atomic_inc_return_unchecked(&id));
36707
36708 hid_debug_register(hdev, dev_name(&hdev->dev));
36709 ret = device_add(&hdev->dev);
36710diff --git a/drivers/hid/hid-wiimote-debug.c b/drivers/hid/hid-wiimote-debug.c
36711index 90124ff..3761764 100644
36712--- a/drivers/hid/hid-wiimote-debug.c
36713+++ b/drivers/hid/hid-wiimote-debug.c
36714@@ -66,7 +66,7 @@ static ssize_t wiidebug_eeprom_read(struct file *f, char __user *u, size_t s,
36715 else if (size == 0)
36716 return -EIO;
36717
36718- if (copy_to_user(u, buf, size))
36719+ if (size > sizeof(buf) || copy_to_user(u, buf, size))
36720 return -EFAULT;
36721
36722 *off += size;
36723diff --git a/drivers/hv/channel.c b/drivers/hv/channel.c
36724index 0b122f8..b1d8160 100644
36725--- a/drivers/hv/channel.c
36726+++ b/drivers/hv/channel.c
36727@@ -394,8 +394,8 @@ int vmbus_establish_gpadl(struct vmbus_channel *channel, void *kbuffer,
36728 int ret = 0;
36729 int t;
36730
36731- next_gpadl_handle = atomic_read(&vmbus_connection.next_gpadl_handle);
36732- atomic_inc(&vmbus_connection.next_gpadl_handle);
36733+ next_gpadl_handle = atomic_read_unchecked(&vmbus_connection.next_gpadl_handle);
36734+ atomic_inc_unchecked(&vmbus_connection.next_gpadl_handle);
36735
36736 ret = create_gpadl_header(kbuffer, size, &msginfo, &msgcount);
36737 if (ret)
36738diff --git a/drivers/hv/hv.c b/drivers/hv/hv.c
36739index ae49237..380d4c9 100644
36740--- a/drivers/hv/hv.c
36741+++ b/drivers/hv/hv.c
36742@@ -112,7 +112,7 @@ static u64 do_hypercall(u64 control, void *input, void *output)
36743 u64 output_address = (output) ? virt_to_phys(output) : 0;
36744 u32 output_address_hi = output_address >> 32;
36745 u32 output_address_lo = output_address & 0xFFFFFFFF;
36746- void *hypercall_page = hv_context.hypercall_page;
36747+ void *hypercall_page = ktva_ktla(hv_context.hypercall_page);
36748
36749 __asm__ __volatile__ ("call *%8" : "=d"(hv_status_hi),
36750 "=a"(hv_status_lo) : "d" (control_hi),
36751diff --git a/drivers/hv/hyperv_vmbus.h b/drivers/hv/hyperv_vmbus.h
36752index 12f2f9e..679603c 100644
36753--- a/drivers/hv/hyperv_vmbus.h
36754+++ b/drivers/hv/hyperv_vmbus.h
36755@@ -591,7 +591,7 @@ enum vmbus_connect_state {
36756 struct vmbus_connection {
36757 enum vmbus_connect_state conn_state;
36758
36759- atomic_t next_gpadl_handle;
36760+ atomic_unchecked_t next_gpadl_handle;
36761
36762 /*
36763 * Represents channel interrupts. Each bit position represents a
36764diff --git a/drivers/hv/vmbus_drv.c b/drivers/hv/vmbus_drv.c
36765index 4004e54..c2de226 100644
36766--- a/drivers/hv/vmbus_drv.c
36767+++ b/drivers/hv/vmbus_drv.c
36768@@ -668,10 +668,10 @@ int vmbus_device_register(struct hv_device *child_device_obj)
36769 {
36770 int ret = 0;
36771
36772- static atomic_t device_num = ATOMIC_INIT(0);
36773+ static atomic_unchecked_t device_num = ATOMIC_INIT(0);
36774
36775 dev_set_name(&child_device_obj->device, "vmbus_0_%d",
36776- atomic_inc_return(&device_num));
36777+ atomic_inc_return_unchecked(&device_num));
36778
36779 child_device_obj->device.bus = &hv_bus;
36780 child_device_obj->device.parent = &hv_acpi_dev->dev;
36781diff --git a/drivers/hwmon/acpi_power_meter.c b/drivers/hwmon/acpi_power_meter.c
36782index 6351aba..dc4aaf4 100644
36783--- a/drivers/hwmon/acpi_power_meter.c
36784+++ b/drivers/hwmon/acpi_power_meter.c
36785@@ -117,7 +117,7 @@ struct sensor_template {
36786 struct device_attribute *devattr,
36787 const char *buf, size_t count);
36788 int index;
36789-};
36790+} __do_const;
36791
36792 /* Averaging interval */
36793 static int update_avg_interval(struct acpi_power_meter_resource *resource)
36794@@ -629,7 +629,7 @@ static int register_attrs(struct acpi_power_meter_resource *resource,
36795 struct sensor_template *attrs)
36796 {
36797 struct device *dev = &resource->acpi_dev->dev;
36798- struct sensor_device_attribute *sensors =
36799+ sensor_device_attribute_no_const *sensors =
36800 &resource->sensors[resource->num_sensors];
36801 int res = 0;
36802
36803diff --git a/drivers/hwmon/applesmc.c b/drivers/hwmon/applesmc.c
36804index 62c2e32..8f2859a 100644
36805--- a/drivers/hwmon/applesmc.c
36806+++ b/drivers/hwmon/applesmc.c
36807@@ -1084,7 +1084,7 @@ static int applesmc_create_nodes(struct applesmc_node_group *groups, int num)
36808 {
36809 struct applesmc_node_group *grp;
36810 struct applesmc_dev_attr *node;
36811- struct attribute *attr;
36812+ attribute_no_const *attr;
36813 int ret, i;
36814
36815 for (grp = groups; grp->format; grp++) {
36816diff --git a/drivers/hwmon/asus_atk0110.c b/drivers/hwmon/asus_atk0110.c
36817index b25c643..a13460d 100644
36818--- a/drivers/hwmon/asus_atk0110.c
36819+++ b/drivers/hwmon/asus_atk0110.c
36820@@ -152,10 +152,10 @@ MODULE_DEVICE_TABLE(acpi, atk_ids);
36821 struct atk_sensor_data {
36822 struct list_head list;
36823 struct atk_data *data;
36824- struct device_attribute label_attr;
36825- struct device_attribute input_attr;
36826- struct device_attribute limit1_attr;
36827- struct device_attribute limit2_attr;
36828+ device_attribute_no_const label_attr;
36829+ device_attribute_no_const input_attr;
36830+ device_attribute_no_const limit1_attr;
36831+ device_attribute_no_const limit2_attr;
36832 char label_attr_name[ATTR_NAME_SIZE];
36833 char input_attr_name[ATTR_NAME_SIZE];
36834 char limit1_attr_name[ATTR_NAME_SIZE];
36835@@ -275,7 +275,7 @@ static ssize_t atk_name_show(struct device *dev,
36836 static struct device_attribute atk_name_attr =
36837 __ATTR(name, 0444, atk_name_show, NULL);
36838
36839-static void atk_init_attribute(struct device_attribute *attr, char *name,
36840+static void atk_init_attribute(device_attribute_no_const *attr, char *name,
36841 sysfs_show_func show)
36842 {
36843 sysfs_attr_init(&attr->attr);
36844diff --git a/drivers/hwmon/coretemp.c b/drivers/hwmon/coretemp.c
36845index 658ce3a..0d0c2f3 100644
36846--- a/drivers/hwmon/coretemp.c
36847+++ b/drivers/hwmon/coretemp.c
36848@@ -790,7 +790,7 @@ static int __cpuinit coretemp_cpu_callback(struct notifier_block *nfb,
36849 return NOTIFY_OK;
36850 }
36851
36852-static struct notifier_block coretemp_cpu_notifier __refdata = {
36853+static struct notifier_block coretemp_cpu_notifier = {
36854 .notifier_call = coretemp_cpu_callback,
36855 };
36856
36857diff --git a/drivers/hwmon/ibmaem.c b/drivers/hwmon/ibmaem.c
36858index 1429f6e..ee03d59 100644
36859--- a/drivers/hwmon/ibmaem.c
36860+++ b/drivers/hwmon/ibmaem.c
36861@@ -926,7 +926,7 @@ static int aem_register_sensors(struct aem_data *data,
36862 struct aem_rw_sensor_template *rw)
36863 {
36864 struct device *dev = &data->pdev->dev;
36865- struct sensor_device_attribute *sensors = data->sensors;
36866+ sensor_device_attribute_no_const *sensors = data->sensors;
36867 int err;
36868
36869 /* Set up read-only sensors */
36870diff --git a/drivers/hwmon/iio_hwmon.c b/drivers/hwmon/iio_hwmon.c
36871index 52b77af..aed1ddf 100644
36872--- a/drivers/hwmon/iio_hwmon.c
36873+++ b/drivers/hwmon/iio_hwmon.c
36874@@ -73,7 +73,7 @@ static int iio_hwmon_probe(struct platform_device *pdev)
36875 {
36876 struct device *dev = &pdev->dev;
36877 struct iio_hwmon_state *st;
36878- struct sensor_device_attribute *a;
36879+ sensor_device_attribute_no_const *a;
36880 int ret, i;
36881 int in_i = 1, temp_i = 1, curr_i = 1;
36882 enum iio_chan_type type;
36883diff --git a/drivers/hwmon/pmbus/pmbus_core.c b/drivers/hwmon/pmbus/pmbus_core.c
36884index 9add6092..ee7ba3f 100644
36885--- a/drivers/hwmon/pmbus/pmbus_core.c
36886+++ b/drivers/hwmon/pmbus/pmbus_core.c
36887@@ -781,7 +781,7 @@ static int pmbus_add_attribute(struct pmbus_data *data, struct attribute *attr)
36888 return 0;
36889 }
36890
36891-static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36892+static void pmbus_dev_attr_init(device_attribute_no_const *dev_attr,
36893 const char *name,
36894 umode_t mode,
36895 ssize_t (*show)(struct device *dev,
36896@@ -798,7 +798,7 @@ static void pmbus_dev_attr_init(struct device_attribute *dev_attr,
36897 dev_attr->store = store;
36898 }
36899
36900-static void pmbus_attr_init(struct sensor_device_attribute *a,
36901+static void pmbus_attr_init(sensor_device_attribute_no_const *a,
36902 const char *name,
36903 umode_t mode,
36904 ssize_t (*show)(struct device *dev,
36905@@ -820,7 +820,7 @@ static int pmbus_add_boolean(struct pmbus_data *data,
36906 u16 reg, u8 mask)
36907 {
36908 struct pmbus_boolean *boolean;
36909- struct sensor_device_attribute *a;
36910+ sensor_device_attribute_no_const *a;
36911
36912 boolean = devm_kzalloc(data->dev, sizeof(*boolean), GFP_KERNEL);
36913 if (!boolean)
36914@@ -845,7 +845,7 @@ static struct pmbus_sensor *pmbus_add_sensor(struct pmbus_data *data,
36915 bool update, bool readonly)
36916 {
36917 struct pmbus_sensor *sensor;
36918- struct device_attribute *a;
36919+ device_attribute_no_const *a;
36920
36921 sensor = devm_kzalloc(data->dev, sizeof(*sensor), GFP_KERNEL);
36922 if (!sensor)
36923@@ -876,7 +876,7 @@ static int pmbus_add_label(struct pmbus_data *data,
36924 const char *lstring, int index)
36925 {
36926 struct pmbus_label *label;
36927- struct device_attribute *a;
36928+ device_attribute_no_const *a;
36929
36930 label = devm_kzalloc(data->dev, sizeof(*label), GFP_KERNEL);
36931 if (!label)
36932diff --git a/drivers/hwmon/sht15.c b/drivers/hwmon/sht15.c
36933index 2507f90..1645765 100644
36934--- a/drivers/hwmon/sht15.c
36935+++ b/drivers/hwmon/sht15.c
36936@@ -169,7 +169,7 @@ struct sht15_data {
36937 int supply_uv;
36938 bool supply_uv_valid;
36939 struct work_struct update_supply_work;
36940- atomic_t interrupt_handled;
36941+ atomic_unchecked_t interrupt_handled;
36942 };
36943
36944 /**
36945@@ -542,13 +542,13 @@ static int sht15_measurement(struct sht15_data *data,
36946 ret = gpio_direction_input(data->pdata->gpio_data);
36947 if (ret)
36948 return ret;
36949- atomic_set(&data->interrupt_handled, 0);
36950+ atomic_set_unchecked(&data->interrupt_handled, 0);
36951
36952 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36953 if (gpio_get_value(data->pdata->gpio_data) == 0) {
36954 disable_irq_nosync(gpio_to_irq(data->pdata->gpio_data));
36955 /* Only relevant if the interrupt hasn't occurred. */
36956- if (!atomic_read(&data->interrupt_handled))
36957+ if (!atomic_read_unchecked(&data->interrupt_handled))
36958 schedule_work(&data->read_work);
36959 }
36960 ret = wait_event_timeout(data->wait_queue,
36961@@ -820,7 +820,7 @@ static irqreturn_t sht15_interrupt_fired(int irq, void *d)
36962
36963 /* First disable the interrupt */
36964 disable_irq_nosync(irq);
36965- atomic_inc(&data->interrupt_handled);
36966+ atomic_inc_unchecked(&data->interrupt_handled);
36967 /* Then schedule a reading work struct */
36968 if (data->state != SHT15_READING_NOTHING)
36969 schedule_work(&data->read_work);
36970@@ -842,11 +842,11 @@ static void sht15_bh_read_data(struct work_struct *work_s)
36971 * If not, then start the interrupt again - care here as could
36972 * have gone low in meantime so verify it hasn't!
36973 */
36974- atomic_set(&data->interrupt_handled, 0);
36975+ atomic_set_unchecked(&data->interrupt_handled, 0);
36976 enable_irq(gpio_to_irq(data->pdata->gpio_data));
36977 /* If still not occurred or another handler was scheduled */
36978 if (gpio_get_value(data->pdata->gpio_data)
36979- || atomic_read(&data->interrupt_handled))
36980+ || atomic_read_unchecked(&data->interrupt_handled))
36981 return;
36982 }
36983
36984diff --git a/drivers/hwmon/via-cputemp.c b/drivers/hwmon/via-cputemp.c
36985index 76f157b..9c0db1b 100644
36986--- a/drivers/hwmon/via-cputemp.c
36987+++ b/drivers/hwmon/via-cputemp.c
36988@@ -296,7 +296,7 @@ static int __cpuinit via_cputemp_cpu_callback(struct notifier_block *nfb,
36989 return NOTIFY_OK;
36990 }
36991
36992-static struct notifier_block via_cputemp_cpu_notifier __refdata = {
36993+static struct notifier_block via_cputemp_cpu_notifier = {
36994 .notifier_call = via_cputemp_cpu_callback,
36995 };
36996
36997diff --git a/drivers/i2c/busses/i2c-amd756-s4882.c b/drivers/i2c/busses/i2c-amd756-s4882.c
36998index 07f01ac..d79ad3d 100644
36999--- a/drivers/i2c/busses/i2c-amd756-s4882.c
37000+++ b/drivers/i2c/busses/i2c-amd756-s4882.c
37001@@ -43,7 +43,7 @@
37002 extern struct i2c_adapter amd756_smbus;
37003
37004 static struct i2c_adapter *s4882_adapter;
37005-static struct i2c_algorithm *s4882_algo;
37006+static i2c_algorithm_no_const *s4882_algo;
37007
37008 /* Wrapper access functions for multiplexed SMBus */
37009 static DEFINE_MUTEX(amd756_lock);
37010diff --git a/drivers/i2c/busses/i2c-nforce2-s4985.c b/drivers/i2c/busses/i2c-nforce2-s4985.c
37011index 2ca268d..c6acbdf 100644
37012--- a/drivers/i2c/busses/i2c-nforce2-s4985.c
37013+++ b/drivers/i2c/busses/i2c-nforce2-s4985.c
37014@@ -41,7 +41,7 @@
37015 extern struct i2c_adapter *nforce2_smbus;
37016
37017 static struct i2c_adapter *s4985_adapter;
37018-static struct i2c_algorithm *s4985_algo;
37019+static i2c_algorithm_no_const *s4985_algo;
37020
37021 /* Wrapper access functions for multiplexed SMBus */
37022 static DEFINE_MUTEX(nforce2_lock);
37023diff --git a/drivers/i2c/i2c-dev.c b/drivers/i2c/i2c-dev.c
37024index c3ccdea..5b3dc1a 100644
37025--- a/drivers/i2c/i2c-dev.c
37026+++ b/drivers/i2c/i2c-dev.c
37027@@ -271,7 +271,7 @@ static noinline int i2cdev_ioctl_rdrw(struct i2c_client *client,
37028 break;
37029 }
37030
37031- data_ptrs[i] = (u8 __user *)rdwr_pa[i].buf;
37032+ data_ptrs[i] = (u8 __force_user *)rdwr_pa[i].buf;
37033 rdwr_pa[i].buf = memdup_user(data_ptrs[i], rdwr_pa[i].len);
37034 if (IS_ERR(rdwr_pa[i].buf)) {
37035 res = PTR_ERR(rdwr_pa[i].buf);
37036diff --git a/drivers/ide/ide-cd.c b/drivers/ide/ide-cd.c
37037index 2ff6204..218c16e 100644
37038--- a/drivers/ide/ide-cd.c
37039+++ b/drivers/ide/ide-cd.c
37040@@ -768,7 +768,7 @@ static void cdrom_do_block_pc(ide_drive_t *drive, struct request *rq)
37041 alignment = queue_dma_alignment(q) | q->dma_pad_mask;
37042 if ((unsigned long)buf & alignment
37043 || blk_rq_bytes(rq) & q->dma_pad_mask
37044- || object_is_on_stack(buf))
37045+ || object_starts_on_stack(buf))
37046 drive->dma = 0;
37047 }
37048 }
37049diff --git a/drivers/iio/industrialio-core.c b/drivers/iio/industrialio-core.c
37050index e145931..08bfc59 100644
37051--- a/drivers/iio/industrialio-core.c
37052+++ b/drivers/iio/industrialio-core.c
37053@@ -506,7 +506,7 @@ static ssize_t iio_write_channel_info(struct device *dev,
37054 }
37055
37056 static
37057-int __iio_device_attr_init(struct device_attribute *dev_attr,
37058+int __iio_device_attr_init(device_attribute_no_const *dev_attr,
37059 const char *postfix,
37060 struct iio_chan_spec const *chan,
37061 ssize_t (*readfunc)(struct device *dev,
37062diff --git a/drivers/infiniband/core/cm.c b/drivers/infiniband/core/cm.c
37063index 784b97c..c9ceadf 100644
37064--- a/drivers/infiniband/core/cm.c
37065+++ b/drivers/infiniband/core/cm.c
37066@@ -114,7 +114,7 @@ static char const counter_group_names[CM_COUNTER_GROUPS]
37067
37068 struct cm_counter_group {
37069 struct kobject obj;
37070- atomic_long_t counter[CM_ATTR_COUNT];
37071+ atomic_long_unchecked_t counter[CM_ATTR_COUNT];
37072 };
37073
37074 struct cm_counter_attribute {
37075@@ -1395,7 +1395,7 @@ static void cm_dup_req_handler(struct cm_work *work,
37076 struct ib_mad_send_buf *msg = NULL;
37077 int ret;
37078
37079- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37080+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37081 counter[CM_REQ_COUNTER]);
37082
37083 /* Quick state check to discard duplicate REQs. */
37084@@ -1779,7 +1779,7 @@ static void cm_dup_rep_handler(struct cm_work *work)
37085 if (!cm_id_priv)
37086 return;
37087
37088- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37089+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37090 counter[CM_REP_COUNTER]);
37091 ret = cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg);
37092 if (ret)
37093@@ -1946,7 +1946,7 @@ static int cm_rtu_handler(struct cm_work *work)
37094 if (cm_id_priv->id.state != IB_CM_REP_SENT &&
37095 cm_id_priv->id.state != IB_CM_MRA_REP_RCVD) {
37096 spin_unlock_irq(&cm_id_priv->lock);
37097- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37098+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37099 counter[CM_RTU_COUNTER]);
37100 goto out;
37101 }
37102@@ -2129,7 +2129,7 @@ static int cm_dreq_handler(struct cm_work *work)
37103 cm_id_priv = cm_acquire_id(dreq_msg->remote_comm_id,
37104 dreq_msg->local_comm_id);
37105 if (!cm_id_priv) {
37106- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37107+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37108 counter[CM_DREQ_COUNTER]);
37109 cm_issue_drep(work->port, work->mad_recv_wc);
37110 return -EINVAL;
37111@@ -2154,7 +2154,7 @@ static int cm_dreq_handler(struct cm_work *work)
37112 case IB_CM_MRA_REP_RCVD:
37113 break;
37114 case IB_CM_TIMEWAIT:
37115- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37116+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37117 counter[CM_DREQ_COUNTER]);
37118 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
37119 goto unlock;
37120@@ -2168,7 +2168,7 @@ static int cm_dreq_handler(struct cm_work *work)
37121 cm_free_msg(msg);
37122 goto deref;
37123 case IB_CM_DREQ_RCVD:
37124- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37125+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37126 counter[CM_DREQ_COUNTER]);
37127 goto unlock;
37128 default:
37129@@ -2535,7 +2535,7 @@ static int cm_mra_handler(struct cm_work *work)
37130 ib_modify_mad(cm_id_priv->av.port->mad_agent,
37131 cm_id_priv->msg, timeout)) {
37132 if (cm_id_priv->id.lap_state == IB_CM_MRA_LAP_RCVD)
37133- atomic_long_inc(&work->port->
37134+ atomic_long_inc_unchecked(&work->port->
37135 counter_group[CM_RECV_DUPLICATES].
37136 counter[CM_MRA_COUNTER]);
37137 goto out;
37138@@ -2544,7 +2544,7 @@ static int cm_mra_handler(struct cm_work *work)
37139 break;
37140 case IB_CM_MRA_REQ_RCVD:
37141 case IB_CM_MRA_REP_RCVD:
37142- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37143+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37144 counter[CM_MRA_COUNTER]);
37145 /* fall through */
37146 default:
37147@@ -2706,7 +2706,7 @@ static int cm_lap_handler(struct cm_work *work)
37148 case IB_CM_LAP_IDLE:
37149 break;
37150 case IB_CM_MRA_LAP_SENT:
37151- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37152+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37153 counter[CM_LAP_COUNTER]);
37154 if (cm_alloc_response_msg(work->port, work->mad_recv_wc, &msg))
37155 goto unlock;
37156@@ -2722,7 +2722,7 @@ static int cm_lap_handler(struct cm_work *work)
37157 cm_free_msg(msg);
37158 goto deref;
37159 case IB_CM_LAP_RCVD:
37160- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37161+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37162 counter[CM_LAP_COUNTER]);
37163 goto unlock;
37164 default:
37165@@ -3006,7 +3006,7 @@ static int cm_sidr_req_handler(struct cm_work *work)
37166 cur_cm_id_priv = cm_insert_remote_sidr(cm_id_priv);
37167 if (cur_cm_id_priv) {
37168 spin_unlock_irq(&cm.lock);
37169- atomic_long_inc(&work->port->counter_group[CM_RECV_DUPLICATES].
37170+ atomic_long_inc_unchecked(&work->port->counter_group[CM_RECV_DUPLICATES].
37171 counter[CM_SIDR_REQ_COUNTER]);
37172 goto out; /* Duplicate message. */
37173 }
37174@@ -3218,10 +3218,10 @@ static void cm_send_handler(struct ib_mad_agent *mad_agent,
37175 if (!msg->context[0] && (attr_index != CM_REJ_COUNTER))
37176 msg->retries = 1;
37177
37178- atomic_long_add(1 + msg->retries,
37179+ atomic_long_add_unchecked(1 + msg->retries,
37180 &port->counter_group[CM_XMIT].counter[attr_index]);
37181 if (msg->retries)
37182- atomic_long_add(msg->retries,
37183+ atomic_long_add_unchecked(msg->retries,
37184 &port->counter_group[CM_XMIT_RETRIES].
37185 counter[attr_index]);
37186
37187@@ -3431,7 +3431,7 @@ static void cm_recv_handler(struct ib_mad_agent *mad_agent,
37188 }
37189
37190 attr_id = be16_to_cpu(mad_recv_wc->recv_buf.mad->mad_hdr.attr_id);
37191- atomic_long_inc(&port->counter_group[CM_RECV].
37192+ atomic_long_inc_unchecked(&port->counter_group[CM_RECV].
37193 counter[attr_id - CM_ATTR_ID_OFFSET]);
37194
37195 work = kmalloc(sizeof *work + sizeof(struct ib_sa_path_rec) * paths,
37196@@ -3636,7 +3636,7 @@ static ssize_t cm_show_counter(struct kobject *obj, struct attribute *attr,
37197 cm_attr = container_of(attr, struct cm_counter_attribute, attr);
37198
37199 return sprintf(buf, "%ld\n",
37200- atomic_long_read(&group->counter[cm_attr->index]));
37201+ atomic_long_read_unchecked(&group->counter[cm_attr->index]));
37202 }
37203
37204 static const struct sysfs_ops cm_counter_ops = {
37205diff --git a/drivers/infiniband/core/fmr_pool.c b/drivers/infiniband/core/fmr_pool.c
37206index 9f5ad7c..588cd84 100644
37207--- a/drivers/infiniband/core/fmr_pool.c
37208+++ b/drivers/infiniband/core/fmr_pool.c
37209@@ -98,8 +98,8 @@ struct ib_fmr_pool {
37210
37211 struct task_struct *thread;
37212
37213- atomic_t req_ser;
37214- atomic_t flush_ser;
37215+ atomic_unchecked_t req_ser;
37216+ atomic_unchecked_t flush_ser;
37217
37218 wait_queue_head_t force_wait;
37219 };
37220@@ -179,10 +179,10 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37221 struct ib_fmr_pool *pool = pool_ptr;
37222
37223 do {
37224- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) < 0) {
37225+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) < 0) {
37226 ib_fmr_batch_release(pool);
37227
37228- atomic_inc(&pool->flush_ser);
37229+ atomic_inc_unchecked(&pool->flush_ser);
37230 wake_up_interruptible(&pool->force_wait);
37231
37232 if (pool->flush_function)
37233@@ -190,7 +190,7 @@ static int ib_fmr_cleanup_thread(void *pool_ptr)
37234 }
37235
37236 set_current_state(TASK_INTERRUPTIBLE);
37237- if (atomic_read(&pool->flush_ser) - atomic_read(&pool->req_ser) >= 0 &&
37238+ if (atomic_read_unchecked(&pool->flush_ser) - atomic_read_unchecked(&pool->req_ser) >= 0 &&
37239 !kthread_should_stop())
37240 schedule();
37241 __set_current_state(TASK_RUNNING);
37242@@ -282,8 +282,8 @@ struct ib_fmr_pool *ib_create_fmr_pool(struct ib_pd *pd,
37243 pool->dirty_watermark = params->dirty_watermark;
37244 pool->dirty_len = 0;
37245 spin_lock_init(&pool->pool_lock);
37246- atomic_set(&pool->req_ser, 0);
37247- atomic_set(&pool->flush_ser, 0);
37248+ atomic_set_unchecked(&pool->req_ser, 0);
37249+ atomic_set_unchecked(&pool->flush_ser, 0);
37250 init_waitqueue_head(&pool->force_wait);
37251
37252 pool->thread = kthread_run(ib_fmr_cleanup_thread,
37253@@ -411,11 +411,11 @@ int ib_flush_fmr_pool(struct ib_fmr_pool *pool)
37254 }
37255 spin_unlock_irq(&pool->pool_lock);
37256
37257- serial = atomic_inc_return(&pool->req_ser);
37258+ serial = atomic_inc_return_unchecked(&pool->req_ser);
37259 wake_up_process(pool->thread);
37260
37261 if (wait_event_interruptible(pool->force_wait,
37262- atomic_read(&pool->flush_ser) - serial >= 0))
37263+ atomic_read_unchecked(&pool->flush_ser) - serial >= 0))
37264 return -EINTR;
37265
37266 return 0;
37267@@ -525,7 +525,7 @@ int ib_fmr_pool_unmap(struct ib_pool_fmr *fmr)
37268 } else {
37269 list_add_tail(&fmr->list, &pool->dirty_list);
37270 if (++pool->dirty_len >= pool->dirty_watermark) {
37271- atomic_inc(&pool->req_ser);
37272+ atomic_inc_unchecked(&pool->req_ser);
37273 wake_up_process(pool->thread);
37274 }
37275 }
37276diff --git a/drivers/infiniband/hw/cxgb4/mem.c b/drivers/infiniband/hw/cxgb4/mem.c
37277index 4cb8eb2..146bf60 100644
37278--- a/drivers/infiniband/hw/cxgb4/mem.c
37279+++ b/drivers/infiniband/hw/cxgb4/mem.c
37280@@ -249,7 +249,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37281 int err;
37282 struct fw_ri_tpte tpt;
37283 u32 stag_idx;
37284- static atomic_t key;
37285+ static atomic_unchecked_t key;
37286
37287 if (c4iw_fatal_error(rdev))
37288 return -EIO;
37289@@ -266,7 +266,7 @@ static int write_tpt_entry(struct c4iw_rdev *rdev, u32 reset_tpt_entry,
37290 if (rdev->stats.stag.cur > rdev->stats.stag.max)
37291 rdev->stats.stag.max = rdev->stats.stag.cur;
37292 mutex_unlock(&rdev->stats.lock);
37293- *stag = (stag_idx << 8) | (atomic_inc_return(&key) & 0xff);
37294+ *stag = (stag_idx << 8) | (atomic_inc_return_unchecked(&key) & 0xff);
37295 }
37296 PDBG("%s stag_state 0x%0x type 0x%0x pdid 0x%0x, stag_idx 0x%x\n",
37297 __func__, stag_state, type, pdid, stag_idx);
37298diff --git a/drivers/infiniband/hw/ipath/ipath_rc.c b/drivers/infiniband/hw/ipath/ipath_rc.c
37299index 79b3dbc..96e5fcc 100644
37300--- a/drivers/infiniband/hw/ipath/ipath_rc.c
37301+++ b/drivers/infiniband/hw/ipath/ipath_rc.c
37302@@ -1868,7 +1868,7 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37303 struct ib_atomic_eth *ateth;
37304 struct ipath_ack_entry *e;
37305 u64 vaddr;
37306- atomic64_t *maddr;
37307+ atomic64_unchecked_t *maddr;
37308 u64 sdata;
37309 u32 rkey;
37310 u8 next;
37311@@ -1903,11 +1903,11 @@ void ipath_rc_rcv(struct ipath_ibdev *dev, struct ipath_ib_header *hdr,
37312 IB_ACCESS_REMOTE_ATOMIC)))
37313 goto nack_acc_unlck;
37314 /* Perform atomic OP and save result. */
37315- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37316+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37317 sdata = be64_to_cpu(ateth->swap_data);
37318 e = &qp->s_ack_queue[qp->r_head_ack_queue];
37319 e->atomic_data = (opcode == OP(FETCH_ADD)) ?
37320- (u64) atomic64_add_return(sdata, maddr) - sdata :
37321+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37322 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37323 be64_to_cpu(ateth->compare_data),
37324 sdata);
37325diff --git a/drivers/infiniband/hw/ipath/ipath_ruc.c b/drivers/infiniband/hw/ipath/ipath_ruc.c
37326index 1f95bba..9530f87 100644
37327--- a/drivers/infiniband/hw/ipath/ipath_ruc.c
37328+++ b/drivers/infiniband/hw/ipath/ipath_ruc.c
37329@@ -266,7 +266,7 @@ static void ipath_ruc_loopback(struct ipath_qp *sqp)
37330 unsigned long flags;
37331 struct ib_wc wc;
37332 u64 sdata;
37333- atomic64_t *maddr;
37334+ atomic64_unchecked_t *maddr;
37335 enum ib_wc_status send_status;
37336
37337 /*
37338@@ -382,11 +382,11 @@ again:
37339 IB_ACCESS_REMOTE_ATOMIC)))
37340 goto acc_err;
37341 /* Perform atomic OP and save result. */
37342- maddr = (atomic64_t *) qp->r_sge.sge.vaddr;
37343+ maddr = (atomic64_unchecked_t *) qp->r_sge.sge.vaddr;
37344 sdata = wqe->wr.wr.atomic.compare_add;
37345 *(u64 *) sqp->s_sge.sge.vaddr =
37346 (wqe->wr.opcode == IB_WR_ATOMIC_FETCH_AND_ADD) ?
37347- (u64) atomic64_add_return(sdata, maddr) - sdata :
37348+ (u64) atomic64_add_return_unchecked(sdata, maddr) - sdata :
37349 (u64) cmpxchg((u64 *) qp->r_sge.sge.vaddr,
37350 sdata, wqe->wr.wr.atomic.swap);
37351 goto send_comp;
37352diff --git a/drivers/infiniband/hw/mthca/mthca_cmd.c b/drivers/infiniband/hw/mthca/mthca_cmd.c
37353index 9d3e5c1..d9afe4a 100644
37354--- a/drivers/infiniband/hw/mthca/mthca_cmd.c
37355+++ b/drivers/infiniband/hw/mthca/mthca_cmd.c
37356@@ -772,7 +772,7 @@ static void mthca_setup_cmd_doorbells(struct mthca_dev *dev, u64 base)
37357 mthca_dbg(dev, "Mapped doorbell page for posting FW commands\n");
37358 }
37359
37360-int mthca_QUERY_FW(struct mthca_dev *dev)
37361+int __intentional_overflow(-1) mthca_QUERY_FW(struct mthca_dev *dev)
37362 {
37363 struct mthca_mailbox *mailbox;
37364 u32 *outbox;
37365diff --git a/drivers/infiniband/hw/mthca/mthca_mr.c b/drivers/infiniband/hw/mthca/mthca_mr.c
37366index ed9a989..e0c5871 100644
37367--- a/drivers/infiniband/hw/mthca/mthca_mr.c
37368+++ b/drivers/infiniband/hw/mthca/mthca_mr.c
37369@@ -426,7 +426,7 @@ static inline u32 adjust_key(struct mthca_dev *dev, u32 key)
37370 return key;
37371 }
37372
37373-int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37374+int __intentional_overflow(-1) mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
37375 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
37376 {
37377 struct mthca_mailbox *mailbox;
37378diff --git a/drivers/infiniband/hw/nes/nes.c b/drivers/infiniband/hw/nes/nes.c
37379index 4291410..d2ab1fb 100644
37380--- a/drivers/infiniband/hw/nes/nes.c
37381+++ b/drivers/infiniband/hw/nes/nes.c
37382@@ -98,7 +98,7 @@ MODULE_PARM_DESC(limit_maxrdreqsz, "Limit max read request size to 256 Bytes");
37383 LIST_HEAD(nes_adapter_list);
37384 static LIST_HEAD(nes_dev_list);
37385
37386-atomic_t qps_destroyed;
37387+atomic_unchecked_t qps_destroyed;
37388
37389 static unsigned int ee_flsh_adapter;
37390 static unsigned int sysfs_nonidx_addr;
37391@@ -269,7 +269,7 @@ static void nes_cqp_rem_ref_callback(struct nes_device *nesdev, struct nes_cqp_r
37392 struct nes_qp *nesqp = cqp_request->cqp_callback_pointer;
37393 struct nes_adapter *nesadapter = nesdev->nesadapter;
37394
37395- atomic_inc(&qps_destroyed);
37396+ atomic_inc_unchecked(&qps_destroyed);
37397
37398 /* Free the control structures */
37399
37400diff --git a/drivers/infiniband/hw/nes/nes.h b/drivers/infiniband/hw/nes/nes.h
37401index 33cc589..3bd6538 100644
37402--- a/drivers/infiniband/hw/nes/nes.h
37403+++ b/drivers/infiniband/hw/nes/nes.h
37404@@ -177,17 +177,17 @@ extern unsigned int nes_debug_level;
37405 extern unsigned int wqm_quanta;
37406 extern struct list_head nes_adapter_list;
37407
37408-extern atomic_t cm_connects;
37409-extern atomic_t cm_accepts;
37410-extern atomic_t cm_disconnects;
37411-extern atomic_t cm_closes;
37412-extern atomic_t cm_connecteds;
37413-extern atomic_t cm_connect_reqs;
37414-extern atomic_t cm_rejects;
37415-extern atomic_t mod_qp_timouts;
37416-extern atomic_t qps_created;
37417-extern atomic_t qps_destroyed;
37418-extern atomic_t sw_qps_destroyed;
37419+extern atomic_unchecked_t cm_connects;
37420+extern atomic_unchecked_t cm_accepts;
37421+extern atomic_unchecked_t cm_disconnects;
37422+extern atomic_unchecked_t cm_closes;
37423+extern atomic_unchecked_t cm_connecteds;
37424+extern atomic_unchecked_t cm_connect_reqs;
37425+extern atomic_unchecked_t cm_rejects;
37426+extern atomic_unchecked_t mod_qp_timouts;
37427+extern atomic_unchecked_t qps_created;
37428+extern atomic_unchecked_t qps_destroyed;
37429+extern atomic_unchecked_t sw_qps_destroyed;
37430 extern u32 mh_detected;
37431 extern u32 mh_pauses_sent;
37432 extern u32 cm_packets_sent;
37433@@ -196,16 +196,16 @@ extern u32 cm_packets_created;
37434 extern u32 cm_packets_received;
37435 extern u32 cm_packets_dropped;
37436 extern u32 cm_packets_retrans;
37437-extern atomic_t cm_listens_created;
37438-extern atomic_t cm_listens_destroyed;
37439+extern atomic_unchecked_t cm_listens_created;
37440+extern atomic_unchecked_t cm_listens_destroyed;
37441 extern u32 cm_backlog_drops;
37442-extern atomic_t cm_loopbacks;
37443-extern atomic_t cm_nodes_created;
37444-extern atomic_t cm_nodes_destroyed;
37445-extern atomic_t cm_accel_dropped_pkts;
37446-extern atomic_t cm_resets_recvd;
37447-extern atomic_t pau_qps_created;
37448-extern atomic_t pau_qps_destroyed;
37449+extern atomic_unchecked_t cm_loopbacks;
37450+extern atomic_unchecked_t cm_nodes_created;
37451+extern atomic_unchecked_t cm_nodes_destroyed;
37452+extern atomic_unchecked_t cm_accel_dropped_pkts;
37453+extern atomic_unchecked_t cm_resets_recvd;
37454+extern atomic_unchecked_t pau_qps_created;
37455+extern atomic_unchecked_t pau_qps_destroyed;
37456
37457 extern u32 int_mod_timer_init;
37458 extern u32 int_mod_cq_depth_256;
37459diff --git a/drivers/infiniband/hw/nes/nes_cm.c b/drivers/infiniband/hw/nes/nes_cm.c
37460index 24b9f1a..00fd004 100644
37461--- a/drivers/infiniband/hw/nes/nes_cm.c
37462+++ b/drivers/infiniband/hw/nes/nes_cm.c
37463@@ -68,14 +68,14 @@ u32 cm_packets_dropped;
37464 u32 cm_packets_retrans;
37465 u32 cm_packets_created;
37466 u32 cm_packets_received;
37467-atomic_t cm_listens_created;
37468-atomic_t cm_listens_destroyed;
37469+atomic_unchecked_t cm_listens_created;
37470+atomic_unchecked_t cm_listens_destroyed;
37471 u32 cm_backlog_drops;
37472-atomic_t cm_loopbacks;
37473-atomic_t cm_nodes_created;
37474-atomic_t cm_nodes_destroyed;
37475-atomic_t cm_accel_dropped_pkts;
37476-atomic_t cm_resets_recvd;
37477+atomic_unchecked_t cm_loopbacks;
37478+atomic_unchecked_t cm_nodes_created;
37479+atomic_unchecked_t cm_nodes_destroyed;
37480+atomic_unchecked_t cm_accel_dropped_pkts;
37481+atomic_unchecked_t cm_resets_recvd;
37482
37483 static inline int mini_cm_accelerated(struct nes_cm_core *, struct nes_cm_node *);
37484 static struct nes_cm_listener *mini_cm_listen(struct nes_cm_core *, struct nes_vnic *, struct nes_cm_info *);
37485@@ -148,13 +148,13 @@ static struct nes_cm_ops nes_cm_api = {
37486
37487 static struct nes_cm_core *g_cm_core;
37488
37489-atomic_t cm_connects;
37490-atomic_t cm_accepts;
37491-atomic_t cm_disconnects;
37492-atomic_t cm_closes;
37493-atomic_t cm_connecteds;
37494-atomic_t cm_connect_reqs;
37495-atomic_t cm_rejects;
37496+atomic_unchecked_t cm_connects;
37497+atomic_unchecked_t cm_accepts;
37498+atomic_unchecked_t cm_disconnects;
37499+atomic_unchecked_t cm_closes;
37500+atomic_unchecked_t cm_connecteds;
37501+atomic_unchecked_t cm_connect_reqs;
37502+atomic_unchecked_t cm_rejects;
37503
37504 int nes_add_ref_cm_node(struct nes_cm_node *cm_node)
37505 {
37506@@ -1272,7 +1272,7 @@ static int mini_cm_dec_refcnt_listen(struct nes_cm_core *cm_core,
37507 kfree(listener);
37508 listener = NULL;
37509 ret = 0;
37510- atomic_inc(&cm_listens_destroyed);
37511+ atomic_inc_unchecked(&cm_listens_destroyed);
37512 } else {
37513 spin_unlock_irqrestore(&cm_core->listen_list_lock, flags);
37514 }
37515@@ -1466,7 +1466,7 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
37516 cm_node->rem_mac);
37517
37518 add_hte_node(cm_core, cm_node);
37519- atomic_inc(&cm_nodes_created);
37520+ atomic_inc_unchecked(&cm_nodes_created);
37521
37522 return cm_node;
37523 }
37524@@ -1524,7 +1524,7 @@ static int rem_ref_cm_node(struct nes_cm_core *cm_core,
37525 }
37526
37527 atomic_dec(&cm_core->node_cnt);
37528- atomic_inc(&cm_nodes_destroyed);
37529+ atomic_inc_unchecked(&cm_nodes_destroyed);
37530 nesqp = cm_node->nesqp;
37531 if (nesqp) {
37532 nesqp->cm_node = NULL;
37533@@ -1588,7 +1588,7 @@ static int process_options(struct nes_cm_node *cm_node, u8 *optionsloc,
37534
37535 static void drop_packet(struct sk_buff *skb)
37536 {
37537- atomic_inc(&cm_accel_dropped_pkts);
37538+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37539 dev_kfree_skb_any(skb);
37540 }
37541
37542@@ -1651,7 +1651,7 @@ static void handle_rst_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
37543 {
37544
37545 int reset = 0; /* whether to send reset in case of err.. */
37546- atomic_inc(&cm_resets_recvd);
37547+ atomic_inc_unchecked(&cm_resets_recvd);
37548 nes_debug(NES_DBG_CM, "Received Reset, cm_node = %p, state = %u."
37549 " refcnt=%d\n", cm_node, cm_node->state,
37550 atomic_read(&cm_node->ref_count));
37551@@ -2292,7 +2292,7 @@ static struct nes_cm_node *mini_cm_connect(struct nes_cm_core *cm_core,
37552 rem_ref_cm_node(cm_node->cm_core, cm_node);
37553 return NULL;
37554 }
37555- atomic_inc(&cm_loopbacks);
37556+ atomic_inc_unchecked(&cm_loopbacks);
37557 loopbackremotenode->loopbackpartner = cm_node;
37558 loopbackremotenode->tcp_cntxt.rcv_wscale =
37559 NES_CM_DEFAULT_RCV_WND_SCALE;
37560@@ -2567,7 +2567,7 @@ static int mini_cm_recv_pkt(struct nes_cm_core *cm_core,
37561 nes_queue_mgt_skbs(skb, nesvnic, cm_node->nesqp);
37562 else {
37563 rem_ref_cm_node(cm_core, cm_node);
37564- atomic_inc(&cm_accel_dropped_pkts);
37565+ atomic_inc_unchecked(&cm_accel_dropped_pkts);
37566 dev_kfree_skb_any(skb);
37567 }
37568 break;
37569@@ -2875,7 +2875,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37570
37571 if ((cm_id) && (cm_id->event_handler)) {
37572 if (issue_disconn) {
37573- atomic_inc(&cm_disconnects);
37574+ atomic_inc_unchecked(&cm_disconnects);
37575 cm_event.event = IW_CM_EVENT_DISCONNECT;
37576 cm_event.status = disconn_status;
37577 cm_event.local_addr = cm_id->local_addr;
37578@@ -2897,7 +2897,7 @@ static int nes_cm_disconn_true(struct nes_qp *nesqp)
37579 }
37580
37581 if (issue_close) {
37582- atomic_inc(&cm_closes);
37583+ atomic_inc_unchecked(&cm_closes);
37584 nes_disconnect(nesqp, 1);
37585
37586 cm_id->provider_data = nesqp;
37587@@ -3033,7 +3033,7 @@ int nes_accept(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37588
37589 nes_debug(NES_DBG_CM, "QP%u, cm_node=%p, jiffies = %lu listener = %p\n",
37590 nesqp->hwqp.qp_id, cm_node, jiffies, cm_node->listener);
37591- atomic_inc(&cm_accepts);
37592+ atomic_inc_unchecked(&cm_accepts);
37593
37594 nes_debug(NES_DBG_CM, "netdev refcnt = %u.\n",
37595 netdev_refcnt_read(nesvnic->netdev));
37596@@ -3228,7 +3228,7 @@ int nes_reject(struct iw_cm_id *cm_id, const void *pdata, u8 pdata_len)
37597 struct nes_cm_core *cm_core;
37598 u8 *start_buff;
37599
37600- atomic_inc(&cm_rejects);
37601+ atomic_inc_unchecked(&cm_rejects);
37602 cm_node = (struct nes_cm_node *)cm_id->provider_data;
37603 loopback = cm_node->loopbackpartner;
37604 cm_core = cm_node->cm_core;
37605@@ -3288,7 +3288,7 @@ int nes_connect(struct iw_cm_id *cm_id, struct iw_cm_conn_param *conn_param)
37606 ntohl(cm_id->local_addr.sin_addr.s_addr),
37607 ntohs(cm_id->local_addr.sin_port));
37608
37609- atomic_inc(&cm_connects);
37610+ atomic_inc_unchecked(&cm_connects);
37611 nesqp->active_conn = 1;
37612
37613 /* cache the cm_id in the qp */
37614@@ -3398,7 +3398,7 @@ int nes_create_listen(struct iw_cm_id *cm_id, int backlog)
37615 g_cm_core->api->stop_listener(g_cm_core, (void *)cm_node);
37616 return err;
37617 }
37618- atomic_inc(&cm_listens_created);
37619+ atomic_inc_unchecked(&cm_listens_created);
37620 }
37621
37622 cm_id->add_ref(cm_id);
37623@@ -3499,7 +3499,7 @@ static void cm_event_connected(struct nes_cm_event *event)
37624
37625 if (nesqp->destroyed)
37626 return;
37627- atomic_inc(&cm_connecteds);
37628+ atomic_inc_unchecked(&cm_connecteds);
37629 nes_debug(NES_DBG_CM, "QP%u attempting to connect to 0x%08X:0x%04X on"
37630 " local port 0x%04X. jiffies = %lu.\n",
37631 nesqp->hwqp.qp_id,
37632@@ -3679,7 +3679,7 @@ static void cm_event_reset(struct nes_cm_event *event)
37633
37634 cm_id->add_ref(cm_id);
37635 ret = cm_id->event_handler(cm_id, &cm_event);
37636- atomic_inc(&cm_closes);
37637+ atomic_inc_unchecked(&cm_closes);
37638 cm_event.event = IW_CM_EVENT_CLOSE;
37639 cm_event.status = 0;
37640 cm_event.provider_data = cm_id->provider_data;
37641@@ -3715,7 +3715,7 @@ static void cm_event_mpa_req(struct nes_cm_event *event)
37642 return;
37643 cm_id = cm_node->cm_id;
37644
37645- atomic_inc(&cm_connect_reqs);
37646+ atomic_inc_unchecked(&cm_connect_reqs);
37647 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37648 cm_node, cm_id, jiffies);
37649
37650@@ -3755,7 +3755,7 @@ static void cm_event_mpa_reject(struct nes_cm_event *event)
37651 return;
37652 cm_id = cm_node->cm_id;
37653
37654- atomic_inc(&cm_connect_reqs);
37655+ atomic_inc_unchecked(&cm_connect_reqs);
37656 nes_debug(NES_DBG_CM, "cm_node = %p - cm_id = %p, jiffies = %lu\n",
37657 cm_node, cm_id, jiffies);
37658
37659diff --git a/drivers/infiniband/hw/nes/nes_mgt.c b/drivers/infiniband/hw/nes/nes_mgt.c
37660index 4166452..fc952c3 100644
37661--- a/drivers/infiniband/hw/nes/nes_mgt.c
37662+++ b/drivers/infiniband/hw/nes/nes_mgt.c
37663@@ -40,8 +40,8 @@
37664 #include "nes.h"
37665 #include "nes_mgt.h"
37666
37667-atomic_t pau_qps_created;
37668-atomic_t pau_qps_destroyed;
37669+atomic_unchecked_t pau_qps_created;
37670+atomic_unchecked_t pau_qps_destroyed;
37671
37672 static void nes_replenish_mgt_rq(struct nes_vnic_mgt *mgtvnic)
37673 {
37674@@ -621,7 +621,7 @@ void nes_destroy_pau_qp(struct nes_device *nesdev, struct nes_qp *nesqp)
37675 {
37676 struct sk_buff *skb;
37677 unsigned long flags;
37678- atomic_inc(&pau_qps_destroyed);
37679+ atomic_inc_unchecked(&pau_qps_destroyed);
37680
37681 /* Free packets that have not yet been forwarded */
37682 /* Lock is acquired by skb_dequeue when removing the skb */
37683@@ -810,7 +810,7 @@ static void nes_mgt_ce_handler(struct nes_device *nesdev, struct nes_hw_nic_cq *
37684 cq->cq_vbase[head].cqe_words[NES_NIC_CQE_HASH_RCVNXT]);
37685 skb_queue_head_init(&nesqp->pau_list);
37686 spin_lock_init(&nesqp->pau_lock);
37687- atomic_inc(&pau_qps_created);
37688+ atomic_inc_unchecked(&pau_qps_created);
37689 nes_change_quad_hash(nesdev, mgtvnic->nesvnic, nesqp);
37690 }
37691
37692diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
37693index 49eb511..a774366 100644
37694--- a/drivers/infiniband/hw/nes/nes_nic.c
37695+++ b/drivers/infiniband/hw/nes/nes_nic.c
37696@@ -1273,39 +1273,39 @@ static void nes_netdev_get_ethtool_stats(struct net_device *netdev,
37697 target_stat_values[++index] = mh_detected;
37698 target_stat_values[++index] = mh_pauses_sent;
37699 target_stat_values[++index] = nesvnic->endnode_ipv4_tcp_retransmits;
37700- target_stat_values[++index] = atomic_read(&cm_connects);
37701- target_stat_values[++index] = atomic_read(&cm_accepts);
37702- target_stat_values[++index] = atomic_read(&cm_disconnects);
37703- target_stat_values[++index] = atomic_read(&cm_connecteds);
37704- target_stat_values[++index] = atomic_read(&cm_connect_reqs);
37705- target_stat_values[++index] = atomic_read(&cm_rejects);
37706- target_stat_values[++index] = atomic_read(&mod_qp_timouts);
37707- target_stat_values[++index] = atomic_read(&qps_created);
37708- target_stat_values[++index] = atomic_read(&sw_qps_destroyed);
37709- target_stat_values[++index] = atomic_read(&qps_destroyed);
37710- target_stat_values[++index] = atomic_read(&cm_closes);
37711+ target_stat_values[++index] = atomic_read_unchecked(&cm_connects);
37712+ target_stat_values[++index] = atomic_read_unchecked(&cm_accepts);
37713+ target_stat_values[++index] = atomic_read_unchecked(&cm_disconnects);
37714+ target_stat_values[++index] = atomic_read_unchecked(&cm_connecteds);
37715+ target_stat_values[++index] = atomic_read_unchecked(&cm_connect_reqs);
37716+ target_stat_values[++index] = atomic_read_unchecked(&cm_rejects);
37717+ target_stat_values[++index] = atomic_read_unchecked(&mod_qp_timouts);
37718+ target_stat_values[++index] = atomic_read_unchecked(&qps_created);
37719+ target_stat_values[++index] = atomic_read_unchecked(&sw_qps_destroyed);
37720+ target_stat_values[++index] = atomic_read_unchecked(&qps_destroyed);
37721+ target_stat_values[++index] = atomic_read_unchecked(&cm_closes);
37722 target_stat_values[++index] = cm_packets_sent;
37723 target_stat_values[++index] = cm_packets_bounced;
37724 target_stat_values[++index] = cm_packets_created;
37725 target_stat_values[++index] = cm_packets_received;
37726 target_stat_values[++index] = cm_packets_dropped;
37727 target_stat_values[++index] = cm_packets_retrans;
37728- target_stat_values[++index] = atomic_read(&cm_listens_created);
37729- target_stat_values[++index] = atomic_read(&cm_listens_destroyed);
37730+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_created);
37731+ target_stat_values[++index] = atomic_read_unchecked(&cm_listens_destroyed);
37732 target_stat_values[++index] = cm_backlog_drops;
37733- target_stat_values[++index] = atomic_read(&cm_loopbacks);
37734- target_stat_values[++index] = atomic_read(&cm_nodes_created);
37735- target_stat_values[++index] = atomic_read(&cm_nodes_destroyed);
37736- target_stat_values[++index] = atomic_read(&cm_accel_dropped_pkts);
37737- target_stat_values[++index] = atomic_read(&cm_resets_recvd);
37738+ target_stat_values[++index] = atomic_read_unchecked(&cm_loopbacks);
37739+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_created);
37740+ target_stat_values[++index] = atomic_read_unchecked(&cm_nodes_destroyed);
37741+ target_stat_values[++index] = atomic_read_unchecked(&cm_accel_dropped_pkts);
37742+ target_stat_values[++index] = atomic_read_unchecked(&cm_resets_recvd);
37743 target_stat_values[++index] = nesadapter->free_4kpbl;
37744 target_stat_values[++index] = nesadapter->free_256pbl;
37745 target_stat_values[++index] = int_mod_timer_init;
37746 target_stat_values[++index] = nesvnic->lro_mgr.stats.aggregated;
37747 target_stat_values[++index] = nesvnic->lro_mgr.stats.flushed;
37748 target_stat_values[++index] = nesvnic->lro_mgr.stats.no_desc;
37749- target_stat_values[++index] = atomic_read(&pau_qps_created);
37750- target_stat_values[++index] = atomic_read(&pau_qps_destroyed);
37751+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_created);
37752+ target_stat_values[++index] = atomic_read_unchecked(&pau_qps_destroyed);
37753 }
37754
37755 /**
37756diff --git a/drivers/infiniband/hw/nes/nes_verbs.c b/drivers/infiniband/hw/nes/nes_verbs.c
37757index 8f67fe2..8960859 100644
37758--- a/drivers/infiniband/hw/nes/nes_verbs.c
37759+++ b/drivers/infiniband/hw/nes/nes_verbs.c
37760@@ -46,9 +46,9 @@
37761
37762 #include <rdma/ib_umem.h>
37763
37764-atomic_t mod_qp_timouts;
37765-atomic_t qps_created;
37766-atomic_t sw_qps_destroyed;
37767+atomic_unchecked_t mod_qp_timouts;
37768+atomic_unchecked_t qps_created;
37769+atomic_unchecked_t sw_qps_destroyed;
37770
37771 static void nes_unregister_ofa_device(struct nes_ib_device *nesibdev);
37772
37773@@ -1134,7 +1134,7 @@ static struct ib_qp *nes_create_qp(struct ib_pd *ibpd,
37774 if (init_attr->create_flags)
37775 return ERR_PTR(-EINVAL);
37776
37777- atomic_inc(&qps_created);
37778+ atomic_inc_unchecked(&qps_created);
37779 switch (init_attr->qp_type) {
37780 case IB_QPT_RC:
37781 if (nes_drv_opt & NES_DRV_OPT_NO_INLINE_DATA) {
37782@@ -1465,7 +1465,7 @@ static int nes_destroy_qp(struct ib_qp *ibqp)
37783 struct iw_cm_event cm_event;
37784 int ret = 0;
37785
37786- atomic_inc(&sw_qps_destroyed);
37787+ atomic_inc_unchecked(&sw_qps_destroyed);
37788 nesqp->destroyed = 1;
37789
37790 /* Blow away the connection if it exists. */
37791diff --git a/drivers/infiniband/hw/qib/qib.h b/drivers/infiniband/hw/qib/qib.h
37792index 4d11575..3e890e5 100644
37793--- a/drivers/infiniband/hw/qib/qib.h
37794+++ b/drivers/infiniband/hw/qib/qib.h
37795@@ -51,6 +51,7 @@
37796 #include <linux/completion.h>
37797 #include <linux/kref.h>
37798 #include <linux/sched.h>
37799+#include <linux/slab.h>
37800
37801 #include "qib_common.h"
37802 #include "qib_verbs.h"
37803diff --git a/drivers/input/gameport/gameport.c b/drivers/input/gameport/gameport.c
37804index da739d9..da1c7f4 100644
37805--- a/drivers/input/gameport/gameport.c
37806+++ b/drivers/input/gameport/gameport.c
37807@@ -487,14 +487,14 @@ EXPORT_SYMBOL(gameport_set_phys);
37808 */
37809 static void gameport_init_port(struct gameport *gameport)
37810 {
37811- static atomic_t gameport_no = ATOMIC_INIT(0);
37812+ static atomic_unchecked_t gameport_no = ATOMIC_INIT(0);
37813
37814 __module_get(THIS_MODULE);
37815
37816 mutex_init(&gameport->drv_mutex);
37817 device_initialize(&gameport->dev);
37818 dev_set_name(&gameport->dev, "gameport%lu",
37819- (unsigned long)atomic_inc_return(&gameport_no) - 1);
37820+ (unsigned long)atomic_inc_return_unchecked(&gameport_no) - 1);
37821 gameport->dev.bus = &gameport_bus;
37822 gameport->dev.release = gameport_release_port;
37823 if (gameport->parent)
37824diff --git a/drivers/input/input.c b/drivers/input/input.c
37825index c044699..174d71a 100644
37826--- a/drivers/input/input.c
37827+++ b/drivers/input/input.c
37828@@ -2019,7 +2019,7 @@ static void devm_input_device_unregister(struct device *dev, void *res)
37829 */
37830 int input_register_device(struct input_dev *dev)
37831 {
37832- static atomic_t input_no = ATOMIC_INIT(0);
37833+ static atomic_unchecked_t input_no = ATOMIC_INIT(0);
37834 struct input_devres *devres = NULL;
37835 struct input_handler *handler;
37836 unsigned int packet_size;
37837@@ -2074,7 +2074,7 @@ int input_register_device(struct input_dev *dev)
37838 dev->setkeycode = input_default_setkeycode;
37839
37840 dev_set_name(&dev->dev, "input%ld",
37841- (unsigned long) atomic_inc_return(&input_no) - 1);
37842+ (unsigned long) atomic_inc_return_unchecked(&input_no) - 1);
37843
37844 error = device_add(&dev->dev);
37845 if (error)
37846diff --git a/drivers/input/joystick/sidewinder.c b/drivers/input/joystick/sidewinder.c
37847index 04c69af..5f92d00 100644
37848--- a/drivers/input/joystick/sidewinder.c
37849+++ b/drivers/input/joystick/sidewinder.c
37850@@ -30,6 +30,7 @@
37851 #include <linux/kernel.h>
37852 #include <linux/module.h>
37853 #include <linux/slab.h>
37854+#include <linux/sched.h>
37855 #include <linux/init.h>
37856 #include <linux/input.h>
37857 #include <linux/gameport.h>
37858diff --git a/drivers/input/joystick/xpad.c b/drivers/input/joystick/xpad.c
37859index fa061d4..4a6957c 100644
37860--- a/drivers/input/joystick/xpad.c
37861+++ b/drivers/input/joystick/xpad.c
37862@@ -735,7 +735,7 @@ static void xpad_led_set(struct led_classdev *led_cdev,
37863
37864 static int xpad_led_probe(struct usb_xpad *xpad)
37865 {
37866- static atomic_t led_seq = ATOMIC_INIT(0);
37867+ static atomic_unchecked_t led_seq = ATOMIC_INIT(0);
37868 long led_no;
37869 struct xpad_led *led;
37870 struct led_classdev *led_cdev;
37871@@ -748,7 +748,7 @@ static int xpad_led_probe(struct usb_xpad *xpad)
37872 if (!led)
37873 return -ENOMEM;
37874
37875- led_no = (long)atomic_inc_return(&led_seq) - 1;
37876+ led_no = (long)atomic_inc_return_unchecked(&led_seq) - 1;
37877
37878 snprintf(led->name, sizeof(led->name), "xpad%ld", led_no);
37879 led->xpad = xpad;
37880diff --git a/drivers/input/mouse/psmouse.h b/drivers/input/mouse/psmouse.h
37881index 2f0b39d..7370f13 100644
37882--- a/drivers/input/mouse/psmouse.h
37883+++ b/drivers/input/mouse/psmouse.h
37884@@ -116,7 +116,7 @@ struct psmouse_attribute {
37885 ssize_t (*set)(struct psmouse *psmouse, void *data,
37886 const char *buf, size_t count);
37887 bool protect;
37888-};
37889+} __do_const;
37890 #define to_psmouse_attr(a) container_of((a), struct psmouse_attribute, dattr)
37891
37892 ssize_t psmouse_attr_show_helper(struct device *dev, struct device_attribute *attr,
37893diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
37894index 4c842c3..590b0bf 100644
37895--- a/drivers/input/mousedev.c
37896+++ b/drivers/input/mousedev.c
37897@@ -738,7 +738,7 @@ static ssize_t mousedev_read(struct file *file, char __user *buffer,
37898
37899 spin_unlock_irq(&client->packet_lock);
37900
37901- if (copy_to_user(buffer, data, count))
37902+ if (count > sizeof(data) || copy_to_user(buffer, data, count))
37903 return -EFAULT;
37904
37905 return count;
37906diff --git a/drivers/input/serio/serio.c b/drivers/input/serio/serio.c
37907index 25fc597..558bf3b3 100644
37908--- a/drivers/input/serio/serio.c
37909+++ b/drivers/input/serio/serio.c
37910@@ -496,7 +496,7 @@ static void serio_release_port(struct device *dev)
37911 */
37912 static void serio_init_port(struct serio *serio)
37913 {
37914- static atomic_t serio_no = ATOMIC_INIT(0);
37915+ static atomic_unchecked_t serio_no = ATOMIC_INIT(0);
37916
37917 __module_get(THIS_MODULE);
37918
37919@@ -507,7 +507,7 @@ static void serio_init_port(struct serio *serio)
37920 mutex_init(&serio->drv_mutex);
37921 device_initialize(&serio->dev);
37922 dev_set_name(&serio->dev, "serio%ld",
37923- (long)atomic_inc_return(&serio_no) - 1);
37924+ (long)atomic_inc_return_unchecked(&serio_no) - 1);
37925 serio->dev.bus = &serio_bus;
37926 serio->dev.release = serio_release_port;
37927 serio->dev.groups = serio_device_attr_groups;
37928diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
37929index d8f98b1..f62a640 100644
37930--- a/drivers/iommu/iommu.c
37931+++ b/drivers/iommu/iommu.c
37932@@ -583,7 +583,7 @@ static struct notifier_block iommu_bus_nb = {
37933 static void iommu_bus_init(struct bus_type *bus, struct iommu_ops *ops)
37934 {
37935 bus_register_notifier(bus, &iommu_bus_nb);
37936- bus_for_each_dev(bus, NULL, ops, add_iommu_group);
37937+ bus_for_each_dev(bus, NULL, (void *)ops, add_iommu_group);
37938 }
37939
37940 /**
37941diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
37942index dcfea4e..f4226b2 100644
37943--- a/drivers/iommu/irq_remapping.c
37944+++ b/drivers/iommu/irq_remapping.c
37945@@ -354,7 +354,7 @@ int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
37946 void panic_if_irq_remap(const char *msg)
37947 {
37948 if (irq_remapping_enabled)
37949- panic(msg);
37950+ panic("%s", msg);
37951 }
37952
37953 static void ir_ack_apic_edge(struct irq_data *data)
37954@@ -375,10 +375,12 @@ static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
37955
37956 void irq_remap_modify_chip_defaults(struct irq_chip *chip)
37957 {
37958- chip->irq_print_chip = ir_print_prefix;
37959- chip->irq_ack = ir_ack_apic_edge;
37960- chip->irq_eoi = ir_ack_apic_level;
37961- chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37962+ pax_open_kernel();
37963+ *(void **)&chip->irq_print_chip = ir_print_prefix;
37964+ *(void **)&chip->irq_ack = ir_ack_apic_edge;
37965+ *(void **)&chip->irq_eoi = ir_ack_apic_level;
37966+ *(void **)&chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
37967+ pax_close_kernel();
37968 }
37969
37970 bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
37971diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c
37972index 19ceaa6..3625818 100644
37973--- a/drivers/irqchip/irq-gic.c
37974+++ b/drivers/irqchip/irq-gic.c
37975@@ -84,7 +84,7 @@ static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
37976 * Supported arch specific GIC irq extension.
37977 * Default make them NULL.
37978 */
37979-struct irq_chip gic_arch_extn = {
37980+irq_chip_no_const gic_arch_extn = {
37981 .irq_eoi = NULL,
37982 .irq_mask = NULL,
37983 .irq_unmask = NULL,
37984@@ -333,7 +333,7 @@ static void gic_handle_cascade_irq(unsigned int irq, struct irq_desc *desc)
37985 chained_irq_exit(chip, desc);
37986 }
37987
37988-static struct irq_chip gic_chip = {
37989+static irq_chip_no_const gic_chip __read_only = {
37990 .name = "GIC",
37991 .irq_mask = gic_mask_irq,
37992 .irq_unmask = gic_unmask_irq,
37993diff --git a/drivers/isdn/capi/capi.c b/drivers/isdn/capi/capi.c
37994index ac6f72b..81150f2 100644
37995--- a/drivers/isdn/capi/capi.c
37996+++ b/drivers/isdn/capi/capi.c
37997@@ -81,8 +81,8 @@ struct capiminor {
37998
37999 struct capi20_appl *ap;
38000 u32 ncci;
38001- atomic_t datahandle;
38002- atomic_t msgid;
38003+ atomic_unchecked_t datahandle;
38004+ atomic_unchecked_t msgid;
38005
38006 struct tty_port port;
38007 int ttyinstop;
38008@@ -391,7 +391,7 @@ gen_data_b3_resp_for(struct capiminor *mp, struct sk_buff *skb)
38009 capimsg_setu16(s, 2, mp->ap->applid);
38010 capimsg_setu8 (s, 4, CAPI_DATA_B3);
38011 capimsg_setu8 (s, 5, CAPI_RESP);
38012- capimsg_setu16(s, 6, atomic_inc_return(&mp->msgid));
38013+ capimsg_setu16(s, 6, atomic_inc_return_unchecked(&mp->msgid));
38014 capimsg_setu32(s, 8, mp->ncci);
38015 capimsg_setu16(s, 12, datahandle);
38016 }
38017@@ -512,14 +512,14 @@ static void handle_minor_send(struct capiminor *mp)
38018 mp->outbytes -= len;
38019 spin_unlock_bh(&mp->outlock);
38020
38021- datahandle = atomic_inc_return(&mp->datahandle);
38022+ datahandle = atomic_inc_return_unchecked(&mp->datahandle);
38023 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
38024 memset(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
38025 capimsg_setu16(skb->data, 0, CAPI_DATA_B3_REQ_LEN);
38026 capimsg_setu16(skb->data, 2, mp->ap->applid);
38027 capimsg_setu8 (skb->data, 4, CAPI_DATA_B3);
38028 capimsg_setu8 (skb->data, 5, CAPI_REQ);
38029- capimsg_setu16(skb->data, 6, atomic_inc_return(&mp->msgid));
38030+ capimsg_setu16(skb->data, 6, atomic_inc_return_unchecked(&mp->msgid));
38031 capimsg_setu32(skb->data, 8, mp->ncci); /* NCCI */
38032 capimsg_setu32(skb->data, 12, (u32)(long)skb->data);/* Data32 */
38033 capimsg_setu16(skb->data, 16, len); /* Data length */
38034diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
38035index 600c79b..3752bab 100644
38036--- a/drivers/isdn/gigaset/interface.c
38037+++ b/drivers/isdn/gigaset/interface.c
38038@@ -130,9 +130,9 @@ static int if_open(struct tty_struct *tty, struct file *filp)
38039 }
38040 tty->driver_data = cs;
38041
38042- ++cs->port.count;
38043+ atomic_inc(&cs->port.count);
38044
38045- if (cs->port.count == 1) {
38046+ if (atomic_read(&cs->port.count) == 1) {
38047 tty_port_tty_set(&cs->port, tty);
38048 cs->port.low_latency = 1;
38049 }
38050@@ -156,9 +156,9 @@ static void if_close(struct tty_struct *tty, struct file *filp)
38051
38052 if (!cs->connected)
38053 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
38054- else if (!cs->port.count)
38055+ else if (!atomic_read(&cs->port.count))
38056 dev_warn(cs->dev, "%s: device not opened\n", __func__);
38057- else if (!--cs->port.count)
38058+ else if (!atomic_dec_return(&cs->port.count))
38059 tty_port_tty_set(&cs->port, NULL);
38060
38061 mutex_unlock(&cs->mutex);
38062diff --git a/drivers/isdn/hardware/avm/b1.c b/drivers/isdn/hardware/avm/b1.c
38063index 4d9b195..455075c 100644
38064--- a/drivers/isdn/hardware/avm/b1.c
38065+++ b/drivers/isdn/hardware/avm/b1.c
38066@@ -176,7 +176,7 @@ int b1_load_t4file(avmcard *card, capiloaddatapart *t4file)
38067 }
38068 if (left) {
38069 if (t4file->user) {
38070- if (copy_from_user(buf, dp, left))
38071+ if (left > sizeof buf || copy_from_user(buf, dp, left))
38072 return -EFAULT;
38073 } else {
38074 memcpy(buf, dp, left);
38075@@ -224,7 +224,7 @@ int b1_load_config(avmcard *card, capiloaddatapart *config)
38076 }
38077 if (left) {
38078 if (config->user) {
38079- if (copy_from_user(buf, dp, left))
38080+ if (left > sizeof buf || copy_from_user(buf, dp, left))
38081 return -EFAULT;
38082 } else {
38083 memcpy(buf, dp, left);
38084diff --git a/drivers/isdn/i4l/isdn_tty.c b/drivers/isdn/i4l/isdn_tty.c
38085index 3c5f249..5fac4d0 100644
38086--- a/drivers/isdn/i4l/isdn_tty.c
38087+++ b/drivers/isdn/i4l/isdn_tty.c
38088@@ -1508,9 +1508,9 @@ isdn_tty_open(struct tty_struct *tty, struct file *filp)
38089
38090 #ifdef ISDN_DEBUG_MODEM_OPEN
38091 printk(KERN_DEBUG "isdn_tty_open %s, count = %d\n", tty->name,
38092- port->count);
38093+ atomic_read(&port->count));
38094 #endif
38095- port->count++;
38096+ atomic_inc(&port->count);
38097 port->tty = tty;
38098 /*
38099 * Start up serial port
38100@@ -1554,7 +1554,7 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
38101 #endif
38102 return;
38103 }
38104- if ((tty->count == 1) && (port->count != 1)) {
38105+ if ((tty->count == 1) && (atomic_read(&port->count) != 1)) {
38106 /*
38107 * Uh, oh. tty->count is 1, which means that the tty
38108 * structure will be freed. Info->count should always
38109@@ -1563,15 +1563,15 @@ isdn_tty_close(struct tty_struct *tty, struct file *filp)
38110 * serial port won't be shutdown.
38111 */
38112 printk(KERN_ERR "isdn_tty_close: bad port count; tty->count is 1, "
38113- "info->count is %d\n", port->count);
38114- port->count = 1;
38115+ "info->count is %d\n", atomic_read(&port->count));
38116+ atomic_set(&port->count, 1);
38117 }
38118- if (--port->count < 0) {
38119+ if (atomic_dec_return(&port->count) < 0) {
38120 printk(KERN_ERR "isdn_tty_close: bad port count for ttyi%d: %d\n",
38121- info->line, port->count);
38122- port->count = 0;
38123+ info->line, atomic_read(&port->count));
38124+ atomic_set(&port->count, 0);
38125 }
38126- if (port->count) {
38127+ if (atomic_read(&port->count)) {
38128 #ifdef ISDN_DEBUG_MODEM_OPEN
38129 printk(KERN_DEBUG "isdn_tty_close after info->count != 0\n");
38130 #endif
38131@@ -1625,7 +1625,7 @@ isdn_tty_hangup(struct tty_struct *tty)
38132 if (isdn_tty_paranoia_check(info, tty->name, "isdn_tty_hangup"))
38133 return;
38134 isdn_tty_shutdown(info);
38135- port->count = 0;
38136+ atomic_set(&port->count, 0);
38137 port->flags &= ~ASYNC_NORMAL_ACTIVE;
38138 port->tty = NULL;
38139 wake_up_interruptible(&port->open_wait);
38140@@ -1970,7 +1970,7 @@ isdn_tty_find_icall(int di, int ch, setup_parm *setup)
38141 for (i = 0; i < ISDN_MAX_CHANNELS; i++) {
38142 modem_info *info = &dev->mdm.info[i];
38143
38144- if (info->port.count == 0)
38145+ if (atomic_read(&info->port.count) == 0)
38146 continue;
38147 if ((info->emu.mdmreg[REG_SI1] & si2bit[si1]) && /* SI1 is matching */
38148 (info->emu.mdmreg[REG_SI2] == si2)) { /* SI2 is matching */
38149diff --git a/drivers/isdn/icn/icn.c b/drivers/isdn/icn/icn.c
38150index e74df7c..03a03ba 100644
38151--- a/drivers/isdn/icn/icn.c
38152+++ b/drivers/isdn/icn/icn.c
38153@@ -1045,7 +1045,7 @@ icn_writecmd(const u_char *buf, int len, int user, icn_card *card)
38154 if (count > len)
38155 count = len;
38156 if (user) {
38157- if (copy_from_user(msg, buf, count))
38158+ if (count > sizeof msg || copy_from_user(msg, buf, count))
38159 return -EFAULT;
38160 } else
38161 memcpy(msg, buf, count);
38162diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
38163index 6a8405d..0bd1c7e 100644
38164--- a/drivers/leds/leds-clevo-mail.c
38165+++ b/drivers/leds/leds-clevo-mail.c
38166@@ -40,7 +40,7 @@ static int __init clevo_mail_led_dmi_callback(const struct dmi_system_id *id)
38167 * detected as working, but in reality it is not) as low as
38168 * possible.
38169 */
38170-static struct dmi_system_id __initdata clevo_mail_led_dmi_table[] = {
38171+static const struct dmi_system_id __initconst clevo_mail_led_dmi_table[] = {
38172 {
38173 .callback = clevo_mail_led_dmi_callback,
38174 .ident = "Clevo D410J",
38175diff --git a/drivers/leds/leds-ss4200.c b/drivers/leds/leds-ss4200.c
38176index 64e204e..c6bf189 100644
38177--- a/drivers/leds/leds-ss4200.c
38178+++ b/drivers/leds/leds-ss4200.c
38179@@ -91,7 +91,7 @@ MODULE_PARM_DESC(nodetect, "Skip DMI-based hardware detection");
38180 * detected as working, but in reality it is not) as low as
38181 * possible.
38182 */
38183-static struct dmi_system_id __initdata nas_led_whitelist[] = {
38184+static const struct dmi_system_id __initconst nas_led_whitelist[] = {
38185 {
38186 .callback = ss4200_led_dmi_callback,
38187 .ident = "Intel SS4200-E",
38188diff --git a/drivers/lguest/core.c b/drivers/lguest/core.c
38189index 0bf1e4e..b4bf44e 100644
38190--- a/drivers/lguest/core.c
38191+++ b/drivers/lguest/core.c
38192@@ -97,9 +97,17 @@ static __init int map_switcher(void)
38193 * The end address needs +1 because __get_vm_area allocates an
38194 * extra guard page, so we need space for that.
38195 */
38196+
38197+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
38198+ switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38199+ VM_ALLOC | VM_KERNEXEC, switcher_addr, switcher_addr
38200+ + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38201+#else
38202 switcher_vma = __get_vm_area(TOTAL_SWITCHER_PAGES * PAGE_SIZE,
38203 VM_ALLOC, switcher_addr, switcher_addr
38204 + (TOTAL_SWITCHER_PAGES+1) * PAGE_SIZE);
38205+#endif
38206+
38207 if (!switcher_vma) {
38208 err = -ENOMEM;
38209 printk("lguest: could not map switcher pages high\n");
38210@@ -124,7 +132,7 @@ static __init int map_switcher(void)
38211 * Now the Switcher is mapped at the right address, we can't fail!
38212 * Copy in the compiled-in Switcher code (from x86/switcher_32.S).
38213 */
38214- memcpy(switcher_vma->addr, start_switcher_text,
38215+ memcpy(switcher_vma->addr, ktla_ktva(start_switcher_text),
38216 end_switcher_text - start_switcher_text);
38217
38218 printk(KERN_INFO "lguest: mapped switcher at %p\n",
38219diff --git a/drivers/lguest/page_tables.c b/drivers/lguest/page_tables.c
38220index 5b9ac32..2ef4f26 100644
38221--- a/drivers/lguest/page_tables.c
38222+++ b/drivers/lguest/page_tables.c
38223@@ -559,7 +559,7 @@ void pin_page(struct lg_cpu *cpu, unsigned long vaddr)
38224 /*:*/
38225
38226 #ifdef CONFIG_X86_PAE
38227-static void release_pmd(pmd_t *spmd)
38228+static void __intentional_overflow(-1) release_pmd(pmd_t *spmd)
38229 {
38230 /* If the entry's not present, there's nothing to release. */
38231 if (pmd_flags(*spmd) & _PAGE_PRESENT) {
38232diff --git a/drivers/lguest/x86/core.c b/drivers/lguest/x86/core.c
38233index f0a3347..f6608b2 100644
38234--- a/drivers/lguest/x86/core.c
38235+++ b/drivers/lguest/x86/core.c
38236@@ -59,7 +59,7 @@ static struct {
38237 /* Offset from where switcher.S was compiled to where we've copied it */
38238 static unsigned long switcher_offset(void)
38239 {
38240- return switcher_addr - (unsigned long)start_switcher_text;
38241+ return switcher_addr - (unsigned long)ktla_ktva(start_switcher_text);
38242 }
38243
38244 /* This cpu's struct lguest_pages (after the Switcher text page) */
38245@@ -99,7 +99,13 @@ static void copy_in_guest_info(struct lg_cpu *cpu, struct lguest_pages *pages)
38246 * These copies are pretty cheap, so we do them unconditionally: */
38247 /* Save the current Host top-level page directory.
38248 */
38249+
38250+#ifdef CONFIG_PAX_PER_CPU_PGD
38251+ pages->state.host_cr3 = read_cr3();
38252+#else
38253 pages->state.host_cr3 = __pa(current->mm->pgd);
38254+#endif
38255+
38256 /*
38257 * Set up the Guest's page tables to see this CPU's pages (and no
38258 * other CPU's pages).
38259@@ -475,7 +481,7 @@ void __init lguest_arch_host_init(void)
38260 * compiled-in switcher code and the high-mapped copy we just made.
38261 */
38262 for (i = 0; i < IDT_ENTRIES; i++)
38263- default_idt_entries[i] += switcher_offset();
38264+ default_idt_entries[i] = ktla_ktva(default_idt_entries[i]) + switcher_offset();
38265
38266 /*
38267 * Set up the Switcher's per-cpu areas.
38268@@ -558,7 +564,7 @@ void __init lguest_arch_host_init(void)
38269 * it will be undisturbed when we switch. To change %cs and jump we
38270 * need this structure to feed to Intel's "lcall" instruction.
38271 */
38272- lguest_entry.offset = (long)switch_to_guest + switcher_offset();
38273+ lguest_entry.offset = (long)ktla_ktva(switch_to_guest) + switcher_offset();
38274 lguest_entry.segment = LGUEST_CS;
38275
38276 /*
38277diff --git a/drivers/lguest/x86/switcher_32.S b/drivers/lguest/x86/switcher_32.S
38278index 40634b0..4f5855e 100644
38279--- a/drivers/lguest/x86/switcher_32.S
38280+++ b/drivers/lguest/x86/switcher_32.S
38281@@ -87,6 +87,7 @@
38282 #include <asm/page.h>
38283 #include <asm/segment.h>
38284 #include <asm/lguest.h>
38285+#include <asm/processor-flags.h>
38286
38287 // We mark the start of the code to copy
38288 // It's placed in .text tho it's never run here
38289@@ -149,6 +150,13 @@ ENTRY(switch_to_guest)
38290 // Changes type when we load it: damn Intel!
38291 // For after we switch over our page tables
38292 // That entry will be read-only: we'd crash.
38293+
38294+#ifdef CONFIG_PAX_KERNEXEC
38295+ mov %cr0, %edx
38296+ xor $X86_CR0_WP, %edx
38297+ mov %edx, %cr0
38298+#endif
38299+
38300 movl $(GDT_ENTRY_TSS*8), %edx
38301 ltr %dx
38302
38303@@ -157,9 +165,15 @@ ENTRY(switch_to_guest)
38304 // Let's clear it again for our return.
38305 // The GDT descriptor of the Host
38306 // Points to the table after two "size" bytes
38307- movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %edx
38308+ movl (LGUEST_PAGES_host_gdt_desc+2)(%eax), %eax
38309 // Clear "used" from type field (byte 5, bit 2)
38310- andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%edx)
38311+ andb $0xFD, (GDT_ENTRY_TSS*8 + 5)(%eax)
38312+
38313+#ifdef CONFIG_PAX_KERNEXEC
38314+ mov %cr0, %eax
38315+ xor $X86_CR0_WP, %eax
38316+ mov %eax, %cr0
38317+#endif
38318
38319 // Once our page table's switched, the Guest is live!
38320 // The Host fades as we run this final step.
38321@@ -295,13 +309,12 @@ deliver_to_host:
38322 // I consulted gcc, and it gave
38323 // These instructions, which I gladly credit:
38324 leal (%edx,%ebx,8), %eax
38325- movzwl (%eax),%edx
38326- movl 4(%eax), %eax
38327- xorw %ax, %ax
38328- orl %eax, %edx
38329+ movl 4(%eax), %edx
38330+ movw (%eax), %dx
38331 // Now the address of the handler's in %edx
38332 // We call it now: its "iret" drops us home.
38333- jmp *%edx
38334+ ljmp $__KERNEL_CS, $1f
38335+1: jmp *%edx
38336
38337 // Every interrupt can come to us here
38338 // But we must truly tell each apart.
38339diff --git a/drivers/md/bcache/closure.h b/drivers/md/bcache/closure.h
38340index 0003992..854bbce 100644
38341--- a/drivers/md/bcache/closure.h
38342+++ b/drivers/md/bcache/closure.h
38343@@ -622,7 +622,7 @@ static inline void closure_wake_up(struct closure_waitlist *list)
38344 static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
38345 struct workqueue_struct *wq)
38346 {
38347- BUG_ON(object_is_on_stack(cl));
38348+ BUG_ON(object_starts_on_stack(cl));
38349 closure_set_ip(cl);
38350 cl->fn = fn;
38351 cl->wq = wq;
38352diff --git a/drivers/md/bitmap.c b/drivers/md/bitmap.c
38353index 5a2c754..0fa55db 100644
38354--- a/drivers/md/bitmap.c
38355+++ b/drivers/md/bitmap.c
38356@@ -1779,7 +1779,7 @@ void bitmap_status(struct seq_file *seq, struct bitmap *bitmap)
38357 chunk_kb ? "KB" : "B");
38358 if (bitmap->storage.file) {
38359 seq_printf(seq, ", file: ");
38360- seq_path(seq, &bitmap->storage.file->f_path, " \t\n");
38361+ seq_path(seq, &bitmap->storage.file->f_path, " \t\n\\");
38362 }
38363
38364 seq_printf(seq, "\n");
38365diff --git a/drivers/md/dm-ioctl.c b/drivers/md/dm-ioctl.c
38366index aa04f02..2a1309e 100644
38367--- a/drivers/md/dm-ioctl.c
38368+++ b/drivers/md/dm-ioctl.c
38369@@ -1694,7 +1694,7 @@ static int validate_params(uint cmd, struct dm_ioctl *param)
38370 cmd == DM_LIST_VERSIONS_CMD)
38371 return 0;
38372
38373- if ((cmd == DM_DEV_CREATE_CMD)) {
38374+ if (cmd == DM_DEV_CREATE_CMD) {
38375 if (!*param->name) {
38376 DMWARN("name not supplied when creating device");
38377 return -EINVAL;
38378diff --git a/drivers/md/dm-raid1.c b/drivers/md/dm-raid1.c
38379index 699b5be..eac0a15 100644
38380--- a/drivers/md/dm-raid1.c
38381+++ b/drivers/md/dm-raid1.c
38382@@ -40,7 +40,7 @@ enum dm_raid1_error {
38383
38384 struct mirror {
38385 struct mirror_set *ms;
38386- atomic_t error_count;
38387+ atomic_unchecked_t error_count;
38388 unsigned long error_type;
38389 struct dm_dev *dev;
38390 sector_t offset;
38391@@ -186,7 +186,7 @@ static struct mirror *get_valid_mirror(struct mirror_set *ms)
38392 struct mirror *m;
38393
38394 for (m = ms->mirror; m < ms->mirror + ms->nr_mirrors; m++)
38395- if (!atomic_read(&m->error_count))
38396+ if (!atomic_read_unchecked(&m->error_count))
38397 return m;
38398
38399 return NULL;
38400@@ -218,7 +218,7 @@ static void fail_mirror(struct mirror *m, enum dm_raid1_error error_type)
38401 * simple way to tell if a device has encountered
38402 * errors.
38403 */
38404- atomic_inc(&m->error_count);
38405+ atomic_inc_unchecked(&m->error_count);
38406
38407 if (test_and_set_bit(error_type, &m->error_type))
38408 return;
38409@@ -409,7 +409,7 @@ static struct mirror *choose_mirror(struct mirror_set *ms, sector_t sector)
38410 struct mirror *m = get_default_mirror(ms);
38411
38412 do {
38413- if (likely(!atomic_read(&m->error_count)))
38414+ if (likely(!atomic_read_unchecked(&m->error_count)))
38415 return m;
38416
38417 if (m-- == ms->mirror)
38418@@ -423,7 +423,7 @@ static int default_ok(struct mirror *m)
38419 {
38420 struct mirror *default_mirror = get_default_mirror(m->ms);
38421
38422- return !atomic_read(&default_mirror->error_count);
38423+ return !atomic_read_unchecked(&default_mirror->error_count);
38424 }
38425
38426 static int mirror_available(struct mirror_set *ms, struct bio *bio)
38427@@ -560,7 +560,7 @@ static void do_reads(struct mirror_set *ms, struct bio_list *reads)
38428 */
38429 if (likely(region_in_sync(ms, region, 1)))
38430 m = choose_mirror(ms, bio->bi_sector);
38431- else if (m && atomic_read(&m->error_count))
38432+ else if (m && atomic_read_unchecked(&m->error_count))
38433 m = NULL;
38434
38435 if (likely(m))
38436@@ -927,7 +927,7 @@ static int get_mirror(struct mirror_set *ms, struct dm_target *ti,
38437 }
38438
38439 ms->mirror[mirror].ms = ms;
38440- atomic_set(&(ms->mirror[mirror].error_count), 0);
38441+ atomic_set_unchecked(&(ms->mirror[mirror].error_count), 0);
38442 ms->mirror[mirror].error_type = 0;
38443 ms->mirror[mirror].offset = offset;
38444
38445@@ -1340,7 +1340,7 @@ static void mirror_resume(struct dm_target *ti)
38446 */
38447 static char device_status_char(struct mirror *m)
38448 {
38449- if (!atomic_read(&(m->error_count)))
38450+ if (!atomic_read_unchecked(&(m->error_count)))
38451 return 'A';
38452
38453 return (test_bit(DM_RAID1_FLUSH_ERROR, &(m->error_type))) ? 'F' :
38454diff --git a/drivers/md/dm-stripe.c b/drivers/md/dm-stripe.c
38455index d907ca6..cfb8384 100644
38456--- a/drivers/md/dm-stripe.c
38457+++ b/drivers/md/dm-stripe.c
38458@@ -20,7 +20,7 @@ struct stripe {
38459 struct dm_dev *dev;
38460 sector_t physical_start;
38461
38462- atomic_t error_count;
38463+ atomic_unchecked_t error_count;
38464 };
38465
38466 struct stripe_c {
38467@@ -185,7 +185,7 @@ static int stripe_ctr(struct dm_target *ti, unsigned int argc, char **argv)
38468 kfree(sc);
38469 return r;
38470 }
38471- atomic_set(&(sc->stripe[i].error_count), 0);
38472+ atomic_set_unchecked(&(sc->stripe[i].error_count), 0);
38473 }
38474
38475 ti->private = sc;
38476@@ -326,7 +326,7 @@ static void stripe_status(struct dm_target *ti, status_type_t type,
38477 DMEMIT("%d ", sc->stripes);
38478 for (i = 0; i < sc->stripes; i++) {
38479 DMEMIT("%s ", sc->stripe[i].dev->name);
38480- buffer[i] = atomic_read(&(sc->stripe[i].error_count)) ?
38481+ buffer[i] = atomic_read_unchecked(&(sc->stripe[i].error_count)) ?
38482 'D' : 'A';
38483 }
38484 buffer[i] = '\0';
38485@@ -371,8 +371,8 @@ static int stripe_end_io(struct dm_target *ti, struct bio *bio, int error)
38486 */
38487 for (i = 0; i < sc->stripes; i++)
38488 if (!strcmp(sc->stripe[i].dev->name, major_minor)) {
38489- atomic_inc(&(sc->stripe[i].error_count));
38490- if (atomic_read(&(sc->stripe[i].error_count)) <
38491+ atomic_inc_unchecked(&(sc->stripe[i].error_count));
38492+ if (atomic_read_unchecked(&(sc->stripe[i].error_count)) <
38493 DM_IO_ERROR_THRESHOLD)
38494 schedule_work(&sc->trigger_event);
38495 }
38496diff --git a/drivers/md/dm-table.c b/drivers/md/dm-table.c
38497index 1ff252a..ee384c1 100644
38498--- a/drivers/md/dm-table.c
38499+++ b/drivers/md/dm-table.c
38500@@ -389,7 +389,7 @@ static int device_area_is_invalid(struct dm_target *ti, struct dm_dev *dev,
38501 if (!dev_size)
38502 return 0;
38503
38504- if ((start >= dev_size) || (start + len > dev_size)) {
38505+ if ((start >= dev_size) || (len > dev_size - start)) {
38506 DMWARN("%s: %s too small for target: "
38507 "start=%llu, len=%llu, dev_size=%llu",
38508 dm_device_name(ti->table->md), bdevname(bdev, b),
38509diff --git a/drivers/md/dm-thin-metadata.c b/drivers/md/dm-thin-metadata.c
38510index 60bce43..9b997d0 100644
38511--- a/drivers/md/dm-thin-metadata.c
38512+++ b/drivers/md/dm-thin-metadata.c
38513@@ -397,7 +397,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38514 {
38515 pmd->info.tm = pmd->tm;
38516 pmd->info.levels = 2;
38517- pmd->info.value_type.context = pmd->data_sm;
38518+ pmd->info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38519 pmd->info.value_type.size = sizeof(__le64);
38520 pmd->info.value_type.inc = data_block_inc;
38521 pmd->info.value_type.dec = data_block_dec;
38522@@ -416,7 +416,7 @@ static void __setup_btree_details(struct dm_pool_metadata *pmd)
38523
38524 pmd->bl_info.tm = pmd->tm;
38525 pmd->bl_info.levels = 1;
38526- pmd->bl_info.value_type.context = pmd->data_sm;
38527+ pmd->bl_info.value_type.context = (dm_space_map_no_const *)pmd->data_sm;
38528 pmd->bl_info.value_type.size = sizeof(__le64);
38529 pmd->bl_info.value_type.inc = data_block_inc;
38530 pmd->bl_info.value_type.dec = data_block_dec;
38531diff --git a/drivers/md/dm.c b/drivers/md/dm.c
38532index d5370a9..8761bbc 100644
38533--- a/drivers/md/dm.c
38534+++ b/drivers/md/dm.c
38535@@ -169,9 +169,9 @@ struct mapped_device {
38536 /*
38537 * Event handling.
38538 */
38539- atomic_t event_nr;
38540+ atomic_unchecked_t event_nr;
38541 wait_queue_head_t eventq;
38542- atomic_t uevent_seq;
38543+ atomic_unchecked_t uevent_seq;
38544 struct list_head uevent_list;
38545 spinlock_t uevent_lock; /* Protect access to uevent_list */
38546
38547@@ -1877,8 +1877,8 @@ static struct mapped_device *alloc_dev(int minor)
38548 rwlock_init(&md->map_lock);
38549 atomic_set(&md->holders, 1);
38550 atomic_set(&md->open_count, 0);
38551- atomic_set(&md->event_nr, 0);
38552- atomic_set(&md->uevent_seq, 0);
38553+ atomic_set_unchecked(&md->event_nr, 0);
38554+ atomic_set_unchecked(&md->uevent_seq, 0);
38555 INIT_LIST_HEAD(&md->uevent_list);
38556 spin_lock_init(&md->uevent_lock);
38557
38558@@ -2026,7 +2026,7 @@ static void event_callback(void *context)
38559
38560 dm_send_uevents(&uevents, &disk_to_dev(md->disk)->kobj);
38561
38562- atomic_inc(&md->event_nr);
38563+ atomic_inc_unchecked(&md->event_nr);
38564 wake_up(&md->eventq);
38565 }
38566
38567@@ -2683,18 +2683,18 @@ int dm_kobject_uevent(struct mapped_device *md, enum kobject_action action,
38568
38569 uint32_t dm_next_uevent_seq(struct mapped_device *md)
38570 {
38571- return atomic_add_return(1, &md->uevent_seq);
38572+ return atomic_add_return_unchecked(1, &md->uevent_seq);
38573 }
38574
38575 uint32_t dm_get_event_nr(struct mapped_device *md)
38576 {
38577- return atomic_read(&md->event_nr);
38578+ return atomic_read_unchecked(&md->event_nr);
38579 }
38580
38581 int dm_wait_event(struct mapped_device *md, int event_nr)
38582 {
38583 return wait_event_interruptible(md->eventq,
38584- (event_nr != atomic_read(&md->event_nr)));
38585+ (event_nr != atomic_read_unchecked(&md->event_nr)));
38586 }
38587
38588 void dm_uevent_add(struct mapped_device *md, struct list_head *elist)
38589diff --git a/drivers/md/md.c b/drivers/md/md.c
38590index 9b82377..6b6922d 100644
38591--- a/drivers/md/md.c
38592+++ b/drivers/md/md.c
38593@@ -234,10 +234,10 @@ EXPORT_SYMBOL_GPL(md_trim_bio);
38594 * start build, activate spare
38595 */
38596 static DECLARE_WAIT_QUEUE_HEAD(md_event_waiters);
38597-static atomic_t md_event_count;
38598+static atomic_unchecked_t md_event_count;
38599 void md_new_event(struct mddev *mddev)
38600 {
38601- atomic_inc(&md_event_count);
38602+ atomic_inc_unchecked(&md_event_count);
38603 wake_up(&md_event_waiters);
38604 }
38605 EXPORT_SYMBOL_GPL(md_new_event);
38606@@ -247,7 +247,7 @@ EXPORT_SYMBOL_GPL(md_new_event);
38607 */
38608 static void md_new_event_inintr(struct mddev *mddev)
38609 {
38610- atomic_inc(&md_event_count);
38611+ atomic_inc_unchecked(&md_event_count);
38612 wake_up(&md_event_waiters);
38613 }
38614
38615@@ -1501,7 +1501,7 @@ static int super_1_load(struct md_rdev *rdev, struct md_rdev *refdev, int minor_
38616 if ((le32_to_cpu(sb->feature_map) & MD_FEATURE_RESHAPE_ACTIVE) &&
38617 (le32_to_cpu(sb->feature_map) & MD_FEATURE_NEW_OFFSET))
38618 rdev->new_data_offset += (s32)le32_to_cpu(sb->new_offset);
38619- atomic_set(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38620+ atomic_set_unchecked(&rdev->corrected_errors, le32_to_cpu(sb->cnt_corrected_read));
38621
38622 rdev->sb_size = le32_to_cpu(sb->max_dev) * 2 + 256;
38623 bmask = queue_logical_block_size(rdev->bdev->bd_disk->queue)-1;
38624@@ -1745,7 +1745,7 @@ static void super_1_sync(struct mddev *mddev, struct md_rdev *rdev)
38625 else
38626 sb->resync_offset = cpu_to_le64(0);
38627
38628- sb->cnt_corrected_read = cpu_to_le32(atomic_read(&rdev->corrected_errors));
38629+ sb->cnt_corrected_read = cpu_to_le32(atomic_read_unchecked(&rdev->corrected_errors));
38630
38631 sb->raid_disks = cpu_to_le32(mddev->raid_disks);
38632 sb->size = cpu_to_le64(mddev->dev_sectors);
38633@@ -2750,7 +2750,7 @@ __ATTR(state, S_IRUGO|S_IWUSR, state_show, state_store);
38634 static ssize_t
38635 errors_show(struct md_rdev *rdev, char *page)
38636 {
38637- return sprintf(page, "%d\n", atomic_read(&rdev->corrected_errors));
38638+ return sprintf(page, "%d\n", atomic_read_unchecked(&rdev->corrected_errors));
38639 }
38640
38641 static ssize_t
38642@@ -2759,7 +2759,7 @@ errors_store(struct md_rdev *rdev, const char *buf, size_t len)
38643 char *e;
38644 unsigned long n = simple_strtoul(buf, &e, 10);
38645 if (*buf && (*e == 0 || *e == '\n')) {
38646- atomic_set(&rdev->corrected_errors, n);
38647+ atomic_set_unchecked(&rdev->corrected_errors, n);
38648 return len;
38649 }
38650 return -EINVAL;
38651@@ -3207,8 +3207,8 @@ int md_rdev_init(struct md_rdev *rdev)
38652 rdev->sb_loaded = 0;
38653 rdev->bb_page = NULL;
38654 atomic_set(&rdev->nr_pending, 0);
38655- atomic_set(&rdev->read_errors, 0);
38656- atomic_set(&rdev->corrected_errors, 0);
38657+ atomic_set_unchecked(&rdev->read_errors, 0);
38658+ atomic_set_unchecked(&rdev->corrected_errors, 0);
38659
38660 INIT_LIST_HEAD(&rdev->same_set);
38661 init_waitqueue_head(&rdev->blocked_wait);
38662@@ -7009,7 +7009,7 @@ static int md_seq_show(struct seq_file *seq, void *v)
38663
38664 spin_unlock(&pers_lock);
38665 seq_printf(seq, "\n");
38666- seq->poll_event = atomic_read(&md_event_count);
38667+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38668 return 0;
38669 }
38670 if (v == (void*)2) {
38671@@ -7112,7 +7112,7 @@ static int md_seq_open(struct inode *inode, struct file *file)
38672 return error;
38673
38674 seq = file->private_data;
38675- seq->poll_event = atomic_read(&md_event_count);
38676+ seq->poll_event = atomic_read_unchecked(&md_event_count);
38677 return error;
38678 }
38679
38680@@ -7126,7 +7126,7 @@ static unsigned int mdstat_poll(struct file *filp, poll_table *wait)
38681 /* always allow read */
38682 mask = POLLIN | POLLRDNORM;
38683
38684- if (seq->poll_event != atomic_read(&md_event_count))
38685+ if (seq->poll_event != atomic_read_unchecked(&md_event_count))
38686 mask |= POLLERR | POLLPRI;
38687 return mask;
38688 }
38689@@ -7170,7 +7170,7 @@ static int is_mddev_idle(struct mddev *mddev, int init)
38690 struct gendisk *disk = rdev->bdev->bd_contains->bd_disk;
38691 curr_events = (int)part_stat_read(&disk->part0, sectors[0]) +
38692 (int)part_stat_read(&disk->part0, sectors[1]) -
38693- atomic_read(&disk->sync_io);
38694+ atomic_read_unchecked(&disk->sync_io);
38695 /* sync IO will cause sync_io to increase before the disk_stats
38696 * as sync_io is counted when a request starts, and
38697 * disk_stats is counted when it completes.
38698diff --git a/drivers/md/md.h b/drivers/md/md.h
38699index 653f992b6..6af6c40 100644
38700--- a/drivers/md/md.h
38701+++ b/drivers/md/md.h
38702@@ -94,13 +94,13 @@ struct md_rdev {
38703 * only maintained for arrays that
38704 * support hot removal
38705 */
38706- atomic_t read_errors; /* number of consecutive read errors that
38707+ atomic_unchecked_t read_errors; /* number of consecutive read errors that
38708 * we have tried to ignore.
38709 */
38710 struct timespec last_read_error; /* monotonic time since our
38711 * last read error
38712 */
38713- atomic_t corrected_errors; /* number of corrected read errors,
38714+ atomic_unchecked_t corrected_errors; /* number of corrected read errors,
38715 * for reporting to userspace and storing
38716 * in superblock.
38717 */
38718@@ -434,7 +434,7 @@ static inline void rdev_dec_pending(struct md_rdev *rdev, struct mddev *mddev)
38719
38720 static inline void md_sync_acct(struct block_device *bdev, unsigned long nr_sectors)
38721 {
38722- atomic_add(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38723+ atomic_add_unchecked(nr_sectors, &bdev->bd_contains->bd_disk->sync_io);
38724 }
38725
38726 struct md_personality
38727diff --git a/drivers/md/persistent-data/dm-space-map.h b/drivers/md/persistent-data/dm-space-map.h
38728index 3e6d115..ffecdeb 100644
38729--- a/drivers/md/persistent-data/dm-space-map.h
38730+++ b/drivers/md/persistent-data/dm-space-map.h
38731@@ -71,6 +71,7 @@ struct dm_space_map {
38732 dm_sm_threshold_fn fn,
38733 void *context);
38734 };
38735+typedef struct dm_space_map __no_const dm_space_map_no_const;
38736
38737 /*----------------------------------------------------------------*/
38738
38739diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c
38740index 6e17f81..140f717 100644
38741--- a/drivers/md/raid1.c
38742+++ b/drivers/md/raid1.c
38743@@ -1822,7 +1822,7 @@ static int fix_sync_read_error(struct r1bio *r1_bio)
38744 if (r1_sync_page_io(rdev, sect, s,
38745 bio->bi_io_vec[idx].bv_page,
38746 READ) != 0)
38747- atomic_add(s, &rdev->corrected_errors);
38748+ atomic_add_unchecked(s, &rdev->corrected_errors);
38749 }
38750 sectors -= s;
38751 sect += s;
38752@@ -2042,7 +2042,7 @@ static void fix_read_error(struct r1conf *conf, int read_disk,
38753 test_bit(In_sync, &rdev->flags)) {
38754 if (r1_sync_page_io(rdev, sect, s,
38755 conf->tmppage, READ)) {
38756- atomic_add(s, &rdev->corrected_errors);
38757+ atomic_add_unchecked(s, &rdev->corrected_errors);
38758 printk(KERN_INFO
38759 "md/raid1:%s: read error corrected "
38760 "(%d sectors at %llu on %s)\n",
38761diff --git a/drivers/md/raid10.c b/drivers/md/raid10.c
38762index d61eb7e..adfd00a 100644
38763--- a/drivers/md/raid10.c
38764+++ b/drivers/md/raid10.c
38765@@ -1940,7 +1940,7 @@ static void end_sync_read(struct bio *bio, int error)
38766 /* The write handler will notice the lack of
38767 * R10BIO_Uptodate and record any errors etc
38768 */
38769- atomic_add(r10_bio->sectors,
38770+ atomic_add_unchecked(r10_bio->sectors,
38771 &conf->mirrors[d].rdev->corrected_errors);
38772
38773 /* for reconstruct, we always reschedule after a read.
38774@@ -2292,7 +2292,7 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38775 {
38776 struct timespec cur_time_mon;
38777 unsigned long hours_since_last;
38778- unsigned int read_errors = atomic_read(&rdev->read_errors);
38779+ unsigned int read_errors = atomic_read_unchecked(&rdev->read_errors);
38780
38781 ktime_get_ts(&cur_time_mon);
38782
38783@@ -2314,9 +2314,9 @@ static void check_decay_read_errors(struct mddev *mddev, struct md_rdev *rdev)
38784 * overflowing the shift of read_errors by hours_since_last.
38785 */
38786 if (hours_since_last >= 8 * sizeof(read_errors))
38787- atomic_set(&rdev->read_errors, 0);
38788+ atomic_set_unchecked(&rdev->read_errors, 0);
38789 else
38790- atomic_set(&rdev->read_errors, read_errors >> hours_since_last);
38791+ atomic_set_unchecked(&rdev->read_errors, read_errors >> hours_since_last);
38792 }
38793
38794 static int r10_sync_page_io(struct md_rdev *rdev, sector_t sector,
38795@@ -2370,8 +2370,8 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38796 return;
38797
38798 check_decay_read_errors(mddev, rdev);
38799- atomic_inc(&rdev->read_errors);
38800- if (atomic_read(&rdev->read_errors) > max_read_errors) {
38801+ atomic_inc_unchecked(&rdev->read_errors);
38802+ if (atomic_read_unchecked(&rdev->read_errors) > max_read_errors) {
38803 char b[BDEVNAME_SIZE];
38804 bdevname(rdev->bdev, b);
38805
38806@@ -2379,7 +2379,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38807 "md/raid10:%s: %s: Raid device exceeded "
38808 "read_error threshold [cur %d:max %d]\n",
38809 mdname(mddev), b,
38810- atomic_read(&rdev->read_errors), max_read_errors);
38811+ atomic_read_unchecked(&rdev->read_errors), max_read_errors);
38812 printk(KERN_NOTICE
38813 "md/raid10:%s: %s: Failing raid device\n",
38814 mdname(mddev), b);
38815@@ -2534,7 +2534,7 @@ static void fix_read_error(struct r10conf *conf, struct mddev *mddev, struct r10
38816 sect +
38817 choose_data_offset(r10_bio, rdev)),
38818 bdevname(rdev->bdev, b));
38819- atomic_add(s, &rdev->corrected_errors);
38820+ atomic_add_unchecked(s, &rdev->corrected_errors);
38821 }
38822
38823 rdev_dec_pending(rdev, mddev);
38824diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
38825index 05e4a10..48fbe37 100644
38826--- a/drivers/md/raid5.c
38827+++ b/drivers/md/raid5.c
38828@@ -1764,21 +1764,21 @@ static void raid5_end_read_request(struct bio * bi, int error)
38829 mdname(conf->mddev), STRIPE_SECTORS,
38830 (unsigned long long)s,
38831 bdevname(rdev->bdev, b));
38832- atomic_add(STRIPE_SECTORS, &rdev->corrected_errors);
38833+ atomic_add_unchecked(STRIPE_SECTORS, &rdev->corrected_errors);
38834 clear_bit(R5_ReadError, &sh->dev[i].flags);
38835 clear_bit(R5_ReWrite, &sh->dev[i].flags);
38836 } else if (test_bit(R5_ReadNoMerge, &sh->dev[i].flags))
38837 clear_bit(R5_ReadNoMerge, &sh->dev[i].flags);
38838
38839- if (atomic_read(&rdev->read_errors))
38840- atomic_set(&rdev->read_errors, 0);
38841+ if (atomic_read_unchecked(&rdev->read_errors))
38842+ atomic_set_unchecked(&rdev->read_errors, 0);
38843 } else {
38844 const char *bdn = bdevname(rdev->bdev, b);
38845 int retry = 0;
38846 int set_bad = 0;
38847
38848 clear_bit(R5_UPTODATE, &sh->dev[i].flags);
38849- atomic_inc(&rdev->read_errors);
38850+ atomic_inc_unchecked(&rdev->read_errors);
38851 if (test_bit(R5_ReadRepl, &sh->dev[i].flags))
38852 printk_ratelimited(
38853 KERN_WARNING
38854@@ -1806,7 +1806,7 @@ static void raid5_end_read_request(struct bio * bi, int error)
38855 mdname(conf->mddev),
38856 (unsigned long long)s,
38857 bdn);
38858- } else if (atomic_read(&rdev->read_errors)
38859+ } else if (atomic_read_unchecked(&rdev->read_errors)
38860 > conf->max_nr_stripes)
38861 printk(KERN_WARNING
38862 "md/raid:%s: Too many read errors, failing device %s.\n",
38863diff --git a/drivers/media/dvb-core/dvbdev.c b/drivers/media/dvb-core/dvbdev.c
38864index 401ef64..836e563 100644
38865--- a/drivers/media/dvb-core/dvbdev.c
38866+++ b/drivers/media/dvb-core/dvbdev.c
38867@@ -192,7 +192,7 @@ int dvb_register_device(struct dvb_adapter *adap, struct dvb_device **pdvbdev,
38868 const struct dvb_device *template, void *priv, int type)
38869 {
38870 struct dvb_device *dvbdev;
38871- struct file_operations *dvbdevfops;
38872+ file_operations_no_const *dvbdevfops;
38873 struct device *clsdev;
38874 int minor;
38875 int id;
38876diff --git a/drivers/media/dvb-frontends/dib3000.h b/drivers/media/dvb-frontends/dib3000.h
38877index 9b6c3bb..baeb5c7 100644
38878--- a/drivers/media/dvb-frontends/dib3000.h
38879+++ b/drivers/media/dvb-frontends/dib3000.h
38880@@ -39,7 +39,7 @@ struct dib_fe_xfer_ops
38881 int (*fifo_ctrl)(struct dvb_frontend *fe, int onoff);
38882 int (*pid_ctrl)(struct dvb_frontend *fe, int index, int pid, int onoff);
38883 int (*tuner_pass_ctrl)(struct dvb_frontend *fe, int onoff, u8 pll_ctrl);
38884-};
38885+} __no_const;
38886
38887 #if IS_ENABLED(CONFIG_DVB_DIB3000MB)
38888 extern struct dvb_frontend* dib3000mb_attach(const struct dib3000_config* config,
38889diff --git a/drivers/media/pci/cx88/cx88-video.c b/drivers/media/pci/cx88/cx88-video.c
38890index c7a9be1..683f6f8 100644
38891--- a/drivers/media/pci/cx88/cx88-video.c
38892+++ b/drivers/media/pci/cx88/cx88-video.c
38893@@ -50,9 +50,9 @@ MODULE_VERSION(CX88_VERSION);
38894
38895 /* ------------------------------------------------------------------ */
38896
38897-static unsigned int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38898-static unsigned int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38899-static unsigned int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38900+static int video_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38901+static int vbi_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38902+static int radio_nr[] = {[0 ... (CX88_MAXBOARDS - 1)] = UNSET };
38903
38904 module_param_array(video_nr, int, NULL, 0444);
38905 module_param_array(vbi_nr, int, NULL, 0444);
38906diff --git a/drivers/media/platform/omap/omap_vout.c b/drivers/media/platform/omap/omap_vout.c
38907index d338b19..aae4f9e 100644
38908--- a/drivers/media/platform/omap/omap_vout.c
38909+++ b/drivers/media/platform/omap/omap_vout.c
38910@@ -63,7 +63,6 @@ enum omap_vout_channels {
38911 OMAP_VIDEO2,
38912 };
38913
38914-static struct videobuf_queue_ops video_vbq_ops;
38915 /* Variables configurable through module params*/
38916 static u32 video1_numbuffers = 3;
38917 static u32 video2_numbuffers = 3;
38918@@ -1015,6 +1014,12 @@ static int omap_vout_open(struct file *file)
38919 {
38920 struct videobuf_queue *q;
38921 struct omap_vout_device *vout = NULL;
38922+ static struct videobuf_queue_ops video_vbq_ops = {
38923+ .buf_setup = omap_vout_buffer_setup,
38924+ .buf_prepare = omap_vout_buffer_prepare,
38925+ .buf_release = omap_vout_buffer_release,
38926+ .buf_queue = omap_vout_buffer_queue,
38927+ };
38928
38929 vout = video_drvdata(file);
38930 v4l2_dbg(1, debug, &vout->vid_dev->v4l2_dev, "Entering %s\n", __func__);
38931@@ -1032,10 +1037,6 @@ static int omap_vout_open(struct file *file)
38932 vout->type = V4L2_BUF_TYPE_VIDEO_OUTPUT;
38933
38934 q = &vout->vbq;
38935- video_vbq_ops.buf_setup = omap_vout_buffer_setup;
38936- video_vbq_ops.buf_prepare = omap_vout_buffer_prepare;
38937- video_vbq_ops.buf_release = omap_vout_buffer_release;
38938- video_vbq_ops.buf_queue = omap_vout_buffer_queue;
38939 spin_lock_init(&vout->vbq_lock);
38940
38941 videobuf_queue_dma_contig_init(q, &video_vbq_ops, q->dev,
38942diff --git a/drivers/media/platform/s5p-tv/mixer.h b/drivers/media/platform/s5p-tv/mixer.h
38943index 04e6490..2df65bf 100644
38944--- a/drivers/media/platform/s5p-tv/mixer.h
38945+++ b/drivers/media/platform/s5p-tv/mixer.h
38946@@ -156,7 +156,7 @@ struct mxr_layer {
38947 /** layer index (unique identifier) */
38948 int idx;
38949 /** callbacks for layer methods */
38950- struct mxr_layer_ops ops;
38951+ struct mxr_layer_ops *ops;
38952 /** format array */
38953 const struct mxr_format **fmt_array;
38954 /** size of format array */
38955diff --git a/drivers/media/platform/s5p-tv/mixer_grp_layer.c b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38956index b93a21f..2535195 100644
38957--- a/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38958+++ b/drivers/media/platform/s5p-tv/mixer_grp_layer.c
38959@@ -235,7 +235,7 @@ struct mxr_layer *mxr_graph_layer_create(struct mxr_device *mdev, int idx)
38960 {
38961 struct mxr_layer *layer;
38962 int ret;
38963- struct mxr_layer_ops ops = {
38964+ static struct mxr_layer_ops ops = {
38965 .release = mxr_graph_layer_release,
38966 .buffer_set = mxr_graph_buffer_set,
38967 .stream_set = mxr_graph_stream_set,
38968diff --git a/drivers/media/platform/s5p-tv/mixer_reg.c b/drivers/media/platform/s5p-tv/mixer_reg.c
38969index b713403..53cb5ad 100644
38970--- a/drivers/media/platform/s5p-tv/mixer_reg.c
38971+++ b/drivers/media/platform/s5p-tv/mixer_reg.c
38972@@ -276,7 +276,7 @@ static void mxr_irq_layer_handle(struct mxr_layer *layer)
38973 layer->update_buf = next;
38974 }
38975
38976- layer->ops.buffer_set(layer, layer->update_buf);
38977+ layer->ops->buffer_set(layer, layer->update_buf);
38978
38979 if (done && done != layer->shadow_buf)
38980 vb2_buffer_done(&done->vb, VB2_BUF_STATE_DONE);
38981diff --git a/drivers/media/platform/s5p-tv/mixer_video.c b/drivers/media/platform/s5p-tv/mixer_video.c
38982index ef0efdf..8c78eb6 100644
38983--- a/drivers/media/platform/s5p-tv/mixer_video.c
38984+++ b/drivers/media/platform/s5p-tv/mixer_video.c
38985@@ -209,7 +209,7 @@ static void mxr_layer_default_geo(struct mxr_layer *layer)
38986 layer->geo.src.height = layer->geo.src.full_height;
38987
38988 mxr_geometry_dump(mdev, &layer->geo);
38989- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38990+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38991 mxr_geometry_dump(mdev, &layer->geo);
38992 }
38993
38994@@ -227,7 +227,7 @@ static void mxr_layer_update_output(struct mxr_layer *layer)
38995 layer->geo.dst.full_width = mbus_fmt.width;
38996 layer->geo.dst.full_height = mbus_fmt.height;
38997 layer->geo.dst.field = mbus_fmt.field;
38998- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
38999+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SINK, 0);
39000
39001 mxr_geometry_dump(mdev, &layer->geo);
39002 }
39003@@ -333,7 +333,7 @@ static int mxr_s_fmt(struct file *file, void *priv,
39004 /* set source size to highest accepted value */
39005 geo->src.full_width = max(geo->dst.full_width, pix->width);
39006 geo->src.full_height = max(geo->dst.full_height, pix->height);
39007- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39008+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39009 mxr_geometry_dump(mdev, &layer->geo);
39010 /* set cropping to total visible screen */
39011 geo->src.width = pix->width;
39012@@ -341,12 +341,12 @@ static int mxr_s_fmt(struct file *file, void *priv,
39013 geo->src.x_offset = 0;
39014 geo->src.y_offset = 0;
39015 /* assure consistency of geometry */
39016- layer->ops.fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
39017+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_CROP, MXR_NO_OFFSET);
39018 mxr_geometry_dump(mdev, &layer->geo);
39019 /* set full size to lowest possible value */
39020 geo->src.full_width = 0;
39021 geo->src.full_height = 0;
39022- layer->ops.fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39023+ layer->ops->fix_geometry(layer, MXR_GEOMETRY_SOURCE, 0);
39024 mxr_geometry_dump(mdev, &layer->geo);
39025
39026 /* returning results */
39027@@ -473,7 +473,7 @@ static int mxr_s_selection(struct file *file, void *fh,
39028 target->width = s->r.width;
39029 target->height = s->r.height;
39030
39031- layer->ops.fix_geometry(layer, stage, s->flags);
39032+ layer->ops->fix_geometry(layer, stage, s->flags);
39033
39034 /* retrieve update selection rectangle */
39035 res.left = target->x_offset;
39036@@ -954,13 +954,13 @@ static int start_streaming(struct vb2_queue *vq, unsigned int count)
39037 mxr_output_get(mdev);
39038
39039 mxr_layer_update_output(layer);
39040- layer->ops.format_set(layer);
39041+ layer->ops->format_set(layer);
39042 /* enabling layer in hardware */
39043 spin_lock_irqsave(&layer->enq_slock, flags);
39044 layer->state = MXR_LAYER_STREAMING;
39045 spin_unlock_irqrestore(&layer->enq_slock, flags);
39046
39047- layer->ops.stream_set(layer, MXR_ENABLE);
39048+ layer->ops->stream_set(layer, MXR_ENABLE);
39049 mxr_streamer_get(mdev);
39050
39051 return 0;
39052@@ -1030,7 +1030,7 @@ static int stop_streaming(struct vb2_queue *vq)
39053 spin_unlock_irqrestore(&layer->enq_slock, flags);
39054
39055 /* disabling layer in hardware */
39056- layer->ops.stream_set(layer, MXR_DISABLE);
39057+ layer->ops->stream_set(layer, MXR_DISABLE);
39058 /* remove one streamer */
39059 mxr_streamer_put(mdev);
39060 /* allow changes in output configuration */
39061@@ -1069,8 +1069,8 @@ void mxr_base_layer_unregister(struct mxr_layer *layer)
39062
39063 void mxr_layer_release(struct mxr_layer *layer)
39064 {
39065- if (layer->ops.release)
39066- layer->ops.release(layer);
39067+ if (layer->ops->release)
39068+ layer->ops->release(layer);
39069 }
39070
39071 void mxr_base_layer_release(struct mxr_layer *layer)
39072@@ -1096,7 +1096,7 @@ struct mxr_layer *mxr_base_layer_create(struct mxr_device *mdev,
39073
39074 layer->mdev = mdev;
39075 layer->idx = idx;
39076- layer->ops = *ops;
39077+ layer->ops = ops;
39078
39079 spin_lock_init(&layer->enq_slock);
39080 INIT_LIST_HEAD(&layer->enq_list);
39081diff --git a/drivers/media/platform/s5p-tv/mixer_vp_layer.c b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39082index 3d13a63..da31bf1 100644
39083--- a/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39084+++ b/drivers/media/platform/s5p-tv/mixer_vp_layer.c
39085@@ -206,7 +206,7 @@ struct mxr_layer *mxr_vp_layer_create(struct mxr_device *mdev, int idx)
39086 {
39087 struct mxr_layer *layer;
39088 int ret;
39089- struct mxr_layer_ops ops = {
39090+ static struct mxr_layer_ops ops = {
39091 .release = mxr_vp_layer_release,
39092 .buffer_set = mxr_vp_buffer_set,
39093 .stream_set = mxr_vp_stream_set,
39094diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
39095index 545c04c..a14bded 100644
39096--- a/drivers/media/radio/radio-cadet.c
39097+++ b/drivers/media/radio/radio-cadet.c
39098@@ -324,6 +324,8 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
39099 unsigned char readbuf[RDS_BUFFER];
39100 int i = 0;
39101
39102+ if (count > RDS_BUFFER)
39103+ return -EFAULT;
39104 mutex_lock(&dev->lock);
39105 if (dev->rdsstat == 0)
39106 cadet_start_rds(dev);
39107@@ -339,7 +341,7 @@ static ssize_t cadet_read(struct file *file, char __user *data, size_t count, lo
39108 while (i < count && dev->rdsin != dev->rdsout)
39109 readbuf[i++] = dev->rdsbuf[dev->rdsout++];
39110
39111- if (i && copy_to_user(data, readbuf, i))
39112+ if (i > sizeof(readbuf) || copy_to_user(data, readbuf, i))
39113 i = -EFAULT;
39114 unlock:
39115 mutex_unlock(&dev->lock);
39116diff --git a/drivers/media/usb/dvb-usb/cxusb.c b/drivers/media/usb/dvb-usb/cxusb.c
39117index 3940bb0..fb3952a 100644
39118--- a/drivers/media/usb/dvb-usb/cxusb.c
39119+++ b/drivers/media/usb/dvb-usb/cxusb.c
39120@@ -1068,7 +1068,7 @@ static struct dib0070_config dib7070p_dib0070_config = {
39121
39122 struct dib0700_adapter_state {
39123 int (*set_param_save) (struct dvb_frontend *);
39124-};
39125+} __no_const;
39126
39127 static int dib7070_set_param_override(struct dvb_frontend *fe)
39128 {
39129diff --git a/drivers/media/usb/dvb-usb/dw2102.c b/drivers/media/usb/dvb-usb/dw2102.c
39130index 6e237b6..dc25556 100644
39131--- a/drivers/media/usb/dvb-usb/dw2102.c
39132+++ b/drivers/media/usb/dvb-usb/dw2102.c
39133@@ -118,7 +118,7 @@ struct su3000_state {
39134
39135 struct s6x0_state {
39136 int (*old_set_voltage)(struct dvb_frontend *f, fe_sec_voltage_t v);
39137-};
39138+} __no_const;
39139
39140 /* debug */
39141 static int dvb_usb_dw2102_debug;
39142diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
39143index f129551..ecf6514 100644
39144--- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
39145+++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c
39146@@ -326,7 +326,7 @@ struct v4l2_buffer32 {
39147 __u32 reserved;
39148 };
39149
39150-static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
39151+static int get_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
39152 enum v4l2_memory memory)
39153 {
39154 void __user *up_pln;
39155@@ -355,7 +355,7 @@ static int get_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
39156 return 0;
39157 }
39158
39159-static int put_v4l2_plane32(struct v4l2_plane *up, struct v4l2_plane32 *up32,
39160+static int put_v4l2_plane32(struct v4l2_plane __user *up, struct v4l2_plane32 __user *up32,
39161 enum v4l2_memory memory)
39162 {
39163 if (copy_in_user(up32, up, 2 * sizeof(__u32)) ||
39164@@ -772,7 +772,7 @@ static int put_v4l2_subdev_edid32(struct v4l2_subdev_edid *kp, struct v4l2_subde
39165 put_user(kp->start_block, &up->start_block) ||
39166 put_user(kp->blocks, &up->blocks) ||
39167 put_user(tmp, &up->edid) ||
39168- copy_to_user(kp->reserved, up->reserved, sizeof(kp->reserved)))
39169+ copy_to_user(up->reserved, kp->reserved, sizeof(kp->reserved)))
39170 return -EFAULT;
39171 return 0;
39172 }
39173diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c
39174index 7658586..1079260 100644
39175--- a/drivers/media/v4l2-core/v4l2-ioctl.c
39176+++ b/drivers/media/v4l2-core/v4l2-ioctl.c
39177@@ -1995,7 +1995,8 @@ struct v4l2_ioctl_info {
39178 struct file *file, void *fh, void *p);
39179 } u;
39180 void (*debug)(const void *arg, bool write_only);
39181-};
39182+} __do_const;
39183+typedef struct v4l2_ioctl_info __no_const v4l2_ioctl_info_no_const;
39184
39185 /* This control needs a priority check */
39186 #define INFO_FL_PRIO (1 << 0)
39187@@ -2177,7 +2178,7 @@ static long __video_do_ioctl(struct file *file,
39188 struct video_device *vfd = video_devdata(file);
39189 const struct v4l2_ioctl_ops *ops = vfd->ioctl_ops;
39190 bool write_only = false;
39191- struct v4l2_ioctl_info default_info;
39192+ v4l2_ioctl_info_no_const default_info;
39193 const struct v4l2_ioctl_info *info;
39194 void *fh = file->private_data;
39195 struct v4l2_fh *vfh = NULL;
39196@@ -2251,7 +2252,7 @@ done:
39197 }
39198
39199 static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39200- void * __user *user_ptr, void ***kernel_ptr)
39201+ void __user **user_ptr, void ***kernel_ptr)
39202 {
39203 int ret = 0;
39204
39205@@ -2267,7 +2268,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39206 ret = -EINVAL;
39207 break;
39208 }
39209- *user_ptr = (void __user *)buf->m.planes;
39210+ *user_ptr = (void __force_user *)buf->m.planes;
39211 *kernel_ptr = (void *)&buf->m.planes;
39212 *array_size = sizeof(struct v4l2_plane) * buf->length;
39213 ret = 1;
39214@@ -2302,7 +2303,7 @@ static int check_array_args(unsigned int cmd, void *parg, size_t *array_size,
39215 ret = -EINVAL;
39216 break;
39217 }
39218- *user_ptr = (void __user *)ctrls->controls;
39219+ *user_ptr = (void __force_user *)ctrls->controls;
39220 *kernel_ptr = (void *)&ctrls->controls;
39221 *array_size = sizeof(struct v4l2_ext_control)
39222 * ctrls->count;
39223diff --git a/drivers/message/fusion/mptbase.c b/drivers/message/fusion/mptbase.c
39224index 767ff4d..c69d259 100644
39225--- a/drivers/message/fusion/mptbase.c
39226+++ b/drivers/message/fusion/mptbase.c
39227@@ -6755,8 +6755,13 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39228 seq_printf(m, " MaxChainDepth = 0x%02x frames\n", ioc->facts.MaxChainDepth);
39229 seq_printf(m, " MinBlockSize = 0x%02x bytes\n", 4*ioc->facts.BlockSize);
39230
39231+#ifdef CONFIG_GRKERNSEC_HIDESYM
39232+ seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n", NULL, NULL);
39233+#else
39234 seq_printf(m, " RequestFrames @ 0x%p (Dma @ 0x%p)\n",
39235 (void *)ioc->req_frames, (void *)(ulong)ioc->req_frames_dma);
39236+#endif
39237+
39238 /*
39239 * Rounding UP to nearest 4-kB boundary here...
39240 */
39241@@ -6769,7 +6774,11 @@ static int mpt_iocinfo_proc_show(struct seq_file *m, void *v)
39242 ioc->facts.GlobalCredits);
39243
39244 seq_printf(m, " Frames @ 0x%p (Dma @ 0x%p)\n",
39245+#ifdef CONFIG_GRKERNSEC_HIDESYM
39246+ NULL, NULL);
39247+#else
39248 (void *)ioc->alloc, (void *)(ulong)ioc->alloc_dma);
39249+#endif
39250 sz = (ioc->reply_sz * ioc->reply_depth) + 128;
39251 seq_printf(m, " {CurRepSz=%d} x {CurRepDepth=%d} = %d bytes ^= 0x%x\n",
39252 ioc->reply_sz, ioc->reply_depth, ioc->reply_sz*ioc->reply_depth, sz);
39253diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
39254index dd239bd..689c4f7 100644
39255--- a/drivers/message/fusion/mptsas.c
39256+++ b/drivers/message/fusion/mptsas.c
39257@@ -446,6 +446,23 @@ mptsas_is_end_device(struct mptsas_devinfo * attached)
39258 return 0;
39259 }
39260
39261+static inline void
39262+mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39263+{
39264+ if (phy_info->port_details) {
39265+ phy_info->port_details->rphy = rphy;
39266+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39267+ ioc->name, rphy));
39268+ }
39269+
39270+ if (rphy) {
39271+ dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39272+ &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39273+ dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39274+ ioc->name, rphy, rphy->dev.release));
39275+ }
39276+}
39277+
39278 /* no mutex */
39279 static void
39280 mptsas_port_delete(MPT_ADAPTER *ioc, struct mptsas_portinfo_details * port_details)
39281@@ -484,23 +501,6 @@ mptsas_get_rphy(struct mptsas_phyinfo *phy_info)
39282 return NULL;
39283 }
39284
39285-static inline void
39286-mptsas_set_rphy(MPT_ADAPTER *ioc, struct mptsas_phyinfo *phy_info, struct sas_rphy *rphy)
39287-{
39288- if (phy_info->port_details) {
39289- phy_info->port_details->rphy = rphy;
39290- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "sas_rphy_add: rphy=%p\n",
39291- ioc->name, rphy));
39292- }
39293-
39294- if (rphy) {
39295- dsaswideprintk(ioc, dev_printk(KERN_DEBUG,
39296- &rphy->dev, MYIOC_s_FMT "add:", ioc->name));
39297- dsaswideprintk(ioc, printk(MYIOC_s_DEBUG_FMT "rphy=%p release=%p\n",
39298- ioc->name, rphy, rphy->dev.release));
39299- }
39300-}
39301-
39302 static inline struct sas_port *
39303 mptsas_get_port(struct mptsas_phyinfo *phy_info)
39304 {
39305diff --git a/drivers/message/fusion/mptscsih.c b/drivers/message/fusion/mptscsih.c
39306index 727819c..ad74694 100644
39307--- a/drivers/message/fusion/mptscsih.c
39308+++ b/drivers/message/fusion/mptscsih.c
39309@@ -1271,15 +1271,16 @@ mptscsih_info(struct Scsi_Host *SChost)
39310
39311 h = shost_priv(SChost);
39312
39313- if (h) {
39314- if (h->info_kbuf == NULL)
39315- if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39316- return h->info_kbuf;
39317- h->info_kbuf[0] = '\0';
39318+ if (!h)
39319+ return NULL;
39320
39321- mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39322- h->info_kbuf[size-1] = '\0';
39323- }
39324+ if (h->info_kbuf == NULL)
39325+ if ((h->info_kbuf = kmalloc(0x1000 /* 4Kb */, GFP_KERNEL)) == NULL)
39326+ return h->info_kbuf;
39327+ h->info_kbuf[0] = '\0';
39328+
39329+ mpt_print_ioc_summary(h->ioc, h->info_kbuf, &size, 0, 0);
39330+ h->info_kbuf[size-1] = '\0';
39331
39332 return h->info_kbuf;
39333 }
39334diff --git a/drivers/message/i2o/i2o_proc.c b/drivers/message/i2o/i2o_proc.c
39335index b7d87cd..9890039 100644
39336--- a/drivers/message/i2o/i2o_proc.c
39337+++ b/drivers/message/i2o/i2o_proc.c
39338@@ -255,12 +255,6 @@ static char *scsi_devices[] = {
39339 "Array Controller Device"
39340 };
39341
39342-static char *chtostr(char *tmp, u8 *chars, int n)
39343-{
39344- tmp[0] = 0;
39345- return strncat(tmp, (char *)chars, n);
39346-}
39347-
39348 static int i2o_report_query_status(struct seq_file *seq, int block_status,
39349 char *group)
39350 {
39351@@ -790,7 +784,6 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39352 } *result;
39353
39354 i2o_exec_execute_ddm_table ddm_table;
39355- char tmp[28 + 1];
39356
39357 result = kmalloc(sizeof(*result), GFP_KERNEL);
39358 if (!result)
39359@@ -825,8 +818,7 @@ static int i2o_seq_show_ddm_table(struct seq_file *seq, void *v)
39360
39361 seq_printf(seq, "%-#7x", ddm_table.i2o_vendor_id);
39362 seq_printf(seq, "%-#8x", ddm_table.module_id);
39363- seq_printf(seq, "%-29s",
39364- chtostr(tmp, ddm_table.module_name_version, 28));
39365+ seq_printf(seq, "%-.28s", ddm_table.module_name_version);
39366 seq_printf(seq, "%9d ", ddm_table.data_size);
39367 seq_printf(seq, "%8d", ddm_table.code_size);
39368
39369@@ -893,7 +885,6 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39370
39371 i2o_driver_result_table *result;
39372 i2o_driver_store_table *dst;
39373- char tmp[28 + 1];
39374
39375 result = kmalloc(sizeof(i2o_driver_result_table), GFP_KERNEL);
39376 if (result == NULL)
39377@@ -928,9 +919,8 @@ static int i2o_seq_show_drivers_stored(struct seq_file *seq, void *v)
39378
39379 seq_printf(seq, "%-#7x", dst->i2o_vendor_id);
39380 seq_printf(seq, "%-#8x", dst->module_id);
39381- seq_printf(seq, "%-29s",
39382- chtostr(tmp, dst->module_name_version, 28));
39383- seq_printf(seq, "%-9s", chtostr(tmp, dst->date, 8));
39384+ seq_printf(seq, "%-.28s", dst->module_name_version);
39385+ seq_printf(seq, "%-.8s", dst->date);
39386 seq_printf(seq, "%8d ", dst->module_size);
39387 seq_printf(seq, "%8d ", dst->mpb_size);
39388 seq_printf(seq, "0x%04x", dst->module_flags);
39389@@ -1250,7 +1240,6 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39390 // == (allow) 512d bytes (max)
39391 static u16 *work16 = (u16 *) work32;
39392 int token;
39393- char tmp[16 + 1];
39394
39395 token = i2o_parm_field_get(d, 0xF100, -1, &work32, sizeof(work32));
39396
39397@@ -1262,14 +1251,10 @@ static int i2o_seq_show_dev_identity(struct seq_file *seq, void *v)
39398 seq_printf(seq, "Device Class : %s\n", i2o_get_class_name(work16[0]));
39399 seq_printf(seq, "Owner TID : %0#5x\n", work16[2]);
39400 seq_printf(seq, "Parent TID : %0#5x\n", work16[3]);
39401- seq_printf(seq, "Vendor info : %s\n",
39402- chtostr(tmp, (u8 *) (work32 + 2), 16));
39403- seq_printf(seq, "Product info : %s\n",
39404- chtostr(tmp, (u8 *) (work32 + 6), 16));
39405- seq_printf(seq, "Description : %s\n",
39406- chtostr(tmp, (u8 *) (work32 + 10), 16));
39407- seq_printf(seq, "Product rev. : %s\n",
39408- chtostr(tmp, (u8 *) (work32 + 14), 8));
39409+ seq_printf(seq, "Vendor info : %.16s\n", (u8 *) (work32 + 2));
39410+ seq_printf(seq, "Product info : %.16s\n", (u8 *) (work32 + 6));
39411+ seq_printf(seq, "Description : %.16s\n", (u8 *) (work32 + 10));
39412+ seq_printf(seq, "Product rev. : %.8s\n", (u8 *) (work32 + 14));
39413
39414 seq_printf(seq, "Serial number : ");
39415 print_serial_number(seq, (u8 *) (work32 + 16),
39416@@ -1306,8 +1291,6 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39417 u8 pad[256]; // allow up to 256 byte (max) serial number
39418 } result;
39419
39420- char tmp[24 + 1];
39421-
39422 token = i2o_parm_field_get(d, 0xF101, -1, &result, sizeof(result));
39423
39424 if (token < 0) {
39425@@ -1316,10 +1299,8 @@ static int i2o_seq_show_ddm_identity(struct seq_file *seq, void *v)
39426 }
39427
39428 seq_printf(seq, "Registering DDM TID : 0x%03x\n", result.ddm_tid);
39429- seq_printf(seq, "Module name : %s\n",
39430- chtostr(tmp, result.module_name, 24));
39431- seq_printf(seq, "Module revision : %s\n",
39432- chtostr(tmp, result.module_rev, 8));
39433+ seq_printf(seq, "Module name : %.24s\n", result.module_name);
39434+ seq_printf(seq, "Module revision : %.8s\n", result.module_rev);
39435
39436 seq_printf(seq, "Serial number : ");
39437 print_serial_number(seq, result.serial_number, sizeof(result) - 36);
39438@@ -1343,8 +1324,6 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39439 u8 instance_number[4];
39440 } result;
39441
39442- char tmp[64 + 1];
39443-
39444 token = i2o_parm_field_get(d, 0xF102, -1, &result, sizeof(result));
39445
39446 if (token < 0) {
39447@@ -1352,14 +1331,10 @@ static int i2o_seq_show_uinfo(struct seq_file *seq, void *v)
39448 return 0;
39449 }
39450
39451- seq_printf(seq, "Device name : %s\n",
39452- chtostr(tmp, result.device_name, 64));
39453- seq_printf(seq, "Service name : %s\n",
39454- chtostr(tmp, result.service_name, 64));
39455- seq_printf(seq, "Physical name : %s\n",
39456- chtostr(tmp, result.physical_location, 64));
39457- seq_printf(seq, "Instance number : %s\n",
39458- chtostr(tmp, result.instance_number, 4));
39459+ seq_printf(seq, "Device name : %.64s\n", result.device_name);
39460+ seq_printf(seq, "Service name : %.64s\n", result.service_name);
39461+ seq_printf(seq, "Physical name : %.64s\n", result.physical_location);
39462+ seq_printf(seq, "Instance number : %.4s\n", result.instance_number);
39463
39464 return 0;
39465 }
39466diff --git a/drivers/message/i2o/iop.c b/drivers/message/i2o/iop.c
39467index a8c08f3..155fe3d 100644
39468--- a/drivers/message/i2o/iop.c
39469+++ b/drivers/message/i2o/iop.c
39470@@ -111,10 +111,10 @@ u32 i2o_cntxt_list_add(struct i2o_controller * c, void *ptr)
39471
39472 spin_lock_irqsave(&c->context_list_lock, flags);
39473
39474- if (unlikely(atomic_inc_and_test(&c->context_list_counter)))
39475- atomic_inc(&c->context_list_counter);
39476+ if (unlikely(atomic_inc_and_test_unchecked(&c->context_list_counter)))
39477+ atomic_inc_unchecked(&c->context_list_counter);
39478
39479- entry->context = atomic_read(&c->context_list_counter);
39480+ entry->context = atomic_read_unchecked(&c->context_list_counter);
39481
39482 list_add(&entry->list, &c->context_list);
39483
39484@@ -1077,7 +1077,7 @@ struct i2o_controller *i2o_iop_alloc(void)
39485
39486 #if BITS_PER_LONG == 64
39487 spin_lock_init(&c->context_list_lock);
39488- atomic_set(&c->context_list_counter, 0);
39489+ atomic_set_unchecked(&c->context_list_counter, 0);
39490 INIT_LIST_HEAD(&c->context_list);
39491 #endif
39492
39493diff --git a/drivers/mfd/janz-cmodio.c b/drivers/mfd/janz-cmodio.c
39494index 45ece11..8efa218 100644
39495--- a/drivers/mfd/janz-cmodio.c
39496+++ b/drivers/mfd/janz-cmodio.c
39497@@ -13,6 +13,7 @@
39498
39499 #include <linux/kernel.h>
39500 #include <linux/module.h>
39501+#include <linux/slab.h>
39502 #include <linux/init.h>
39503 #include <linux/pci.h>
39504 #include <linux/interrupt.h>
39505diff --git a/drivers/mfd/twl4030-irq.c b/drivers/mfd/twl4030-irq.c
39506index a5f9888..1c0ed56 100644
39507--- a/drivers/mfd/twl4030-irq.c
39508+++ b/drivers/mfd/twl4030-irq.c
39509@@ -35,6 +35,7 @@
39510 #include <linux/of.h>
39511 #include <linux/irqdomain.h>
39512 #include <linux/i2c/twl.h>
39513+#include <asm/pgtable.h>
39514
39515 #include "twl-core.h"
39516
39517@@ -728,10 +729,12 @@ int twl4030_init_irq(struct device *dev, int irq_num)
39518 * Install an irq handler for each of the SIH modules;
39519 * clone dummy irq_chip since PIH can't *do* anything
39520 */
39521- twl4030_irq_chip = dummy_irq_chip;
39522- twl4030_irq_chip.name = "twl4030";
39523+ pax_open_kernel();
39524+ memcpy((void *)&twl4030_irq_chip, &dummy_irq_chip, sizeof twl4030_irq_chip);
39525+ *(const char **)&twl4030_irq_chip.name = "twl4030";
39526
39527- twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39528+ *(void **)&twl4030_sih_irq_chip.irq_ack = dummy_irq_chip.irq_ack;
39529+ pax_close_kernel();
39530
39531 for (i = irq_base; i < irq_end; i++) {
39532 irq_set_chip_and_handler(i, &twl4030_irq_chip,
39533diff --git a/drivers/mfd/twl6030-irq.c b/drivers/mfd/twl6030-irq.c
39534index 277a8db..0e0b754 100644
39535--- a/drivers/mfd/twl6030-irq.c
39536+++ b/drivers/mfd/twl6030-irq.c
39537@@ -387,10 +387,12 @@ int twl6030_init_irq(struct device *dev, int irq_num)
39538 * install an irq handler for each of the modules;
39539 * clone dummy irq_chip since PIH can't *do* anything
39540 */
39541- twl6030_irq_chip = dummy_irq_chip;
39542- twl6030_irq_chip.name = "twl6030";
39543- twl6030_irq_chip.irq_set_type = NULL;
39544- twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39545+ pax_open_kernel();
39546+ memcpy((void *)&twl6030_irq_chip, &dummy_irq_chip, sizeof twl6030_irq_chip);
39547+ *(const char **)&twl6030_irq_chip.name = "twl6030";
39548+ *(void **)&twl6030_irq_chip.irq_set_type = NULL;
39549+ *(void **)&twl6030_irq_chip.irq_set_wake = twl6030_irq_set_wake;
39550+ pax_close_kernel();
39551
39552 for (i = irq_base; i < irq_end; i++) {
39553 irq_set_chip_and_handler(i, &twl6030_irq_chip,
39554diff --git a/drivers/misc/c2port/core.c b/drivers/misc/c2port/core.c
39555index f32550a..e3e52a2 100644
39556--- a/drivers/misc/c2port/core.c
39557+++ b/drivers/misc/c2port/core.c
39558@@ -920,7 +920,9 @@ struct c2port_device *c2port_device_register(char *name,
39559 mutex_init(&c2dev->mutex);
39560
39561 /* Create binary file */
39562- c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39563+ pax_open_kernel();
39564+ *(size_t *)&c2port_bin_attrs.size = ops->blocks_num * ops->block_size;
39565+ pax_close_kernel();
39566 ret = device_create_bin_file(c2dev->dev, &c2port_bin_attrs);
39567 if (unlikely(ret))
39568 goto error_device_create_bin_file;
39569diff --git a/drivers/misc/kgdbts.c b/drivers/misc/kgdbts.c
39570index 36f5d52..32311c3 100644
39571--- a/drivers/misc/kgdbts.c
39572+++ b/drivers/misc/kgdbts.c
39573@@ -834,7 +834,7 @@ static void run_plant_and_detach_test(int is_early)
39574 char before[BREAK_INSTR_SIZE];
39575 char after[BREAK_INSTR_SIZE];
39576
39577- probe_kernel_read(before, (char *)kgdbts_break_test,
39578+ probe_kernel_read(before, ktla_ktva((char *)kgdbts_break_test),
39579 BREAK_INSTR_SIZE);
39580 init_simple_test();
39581 ts.tst = plant_and_detach_test;
39582@@ -842,7 +842,7 @@ static void run_plant_and_detach_test(int is_early)
39583 /* Activate test with initial breakpoint */
39584 if (!is_early)
39585 kgdb_breakpoint();
39586- probe_kernel_read(after, (char *)kgdbts_break_test,
39587+ probe_kernel_read(after, ktla_ktva((char *)kgdbts_break_test),
39588 BREAK_INSTR_SIZE);
39589 if (memcmp(before, after, BREAK_INSTR_SIZE)) {
39590 printk(KERN_CRIT "kgdbts: ERROR kgdb corrupted memory\n");
39591diff --git a/drivers/misc/lis3lv02d/lis3lv02d.c b/drivers/misc/lis3lv02d/lis3lv02d.c
39592index 4cd4a3d..b48cbc7 100644
39593--- a/drivers/misc/lis3lv02d/lis3lv02d.c
39594+++ b/drivers/misc/lis3lv02d/lis3lv02d.c
39595@@ -498,7 +498,7 @@ static irqreturn_t lis302dl_interrupt(int irq, void *data)
39596 * the lid is closed. This leads to interrupts as soon as a little move
39597 * is done.
39598 */
39599- atomic_inc(&lis3->count);
39600+ atomic_inc_unchecked(&lis3->count);
39601
39602 wake_up_interruptible(&lis3->misc_wait);
39603 kill_fasync(&lis3->async_queue, SIGIO, POLL_IN);
39604@@ -584,7 +584,7 @@ static int lis3lv02d_misc_open(struct inode *inode, struct file *file)
39605 if (lis3->pm_dev)
39606 pm_runtime_get_sync(lis3->pm_dev);
39607
39608- atomic_set(&lis3->count, 0);
39609+ atomic_set_unchecked(&lis3->count, 0);
39610 return 0;
39611 }
39612
39613@@ -616,7 +616,7 @@ static ssize_t lis3lv02d_misc_read(struct file *file, char __user *buf,
39614 add_wait_queue(&lis3->misc_wait, &wait);
39615 while (true) {
39616 set_current_state(TASK_INTERRUPTIBLE);
39617- data = atomic_xchg(&lis3->count, 0);
39618+ data = atomic_xchg_unchecked(&lis3->count, 0);
39619 if (data)
39620 break;
39621
39622@@ -657,7 +657,7 @@ static unsigned int lis3lv02d_misc_poll(struct file *file, poll_table *wait)
39623 struct lis3lv02d, miscdev);
39624
39625 poll_wait(file, &lis3->misc_wait, wait);
39626- if (atomic_read(&lis3->count))
39627+ if (atomic_read_unchecked(&lis3->count))
39628 return POLLIN | POLLRDNORM;
39629 return 0;
39630 }
39631diff --git a/drivers/misc/lis3lv02d/lis3lv02d.h b/drivers/misc/lis3lv02d/lis3lv02d.h
39632index c439c82..1f20f57 100644
39633--- a/drivers/misc/lis3lv02d/lis3lv02d.h
39634+++ b/drivers/misc/lis3lv02d/lis3lv02d.h
39635@@ -297,7 +297,7 @@ struct lis3lv02d {
39636 struct input_polled_dev *idev; /* input device */
39637 struct platform_device *pdev; /* platform device */
39638 struct regulator_bulk_data regulators[2];
39639- atomic_t count; /* interrupt count after last read */
39640+ atomic_unchecked_t count; /* interrupt count after last read */
39641 union axis_conversion ac; /* hw -> logical axis */
39642 int mapped_btns[3];
39643
39644diff --git a/drivers/misc/sgi-gru/gruhandles.c b/drivers/misc/sgi-gru/gruhandles.c
39645index 2f30bad..c4c13d0 100644
39646--- a/drivers/misc/sgi-gru/gruhandles.c
39647+++ b/drivers/misc/sgi-gru/gruhandles.c
39648@@ -44,8 +44,8 @@ static void update_mcs_stats(enum mcs_op op, unsigned long clks)
39649 unsigned long nsec;
39650
39651 nsec = CLKS2NSEC(clks);
39652- atomic_long_inc(&mcs_op_statistics[op].count);
39653- atomic_long_add(nsec, &mcs_op_statistics[op].total);
39654+ atomic_long_inc_unchecked(&mcs_op_statistics[op].count);
39655+ atomic_long_add_unchecked(nsec, &mcs_op_statistics[op].total);
39656 if (mcs_op_statistics[op].max < nsec)
39657 mcs_op_statistics[op].max = nsec;
39658 }
39659diff --git a/drivers/misc/sgi-gru/gruprocfs.c b/drivers/misc/sgi-gru/gruprocfs.c
39660index 797d796..ae8f01e 100644
39661--- a/drivers/misc/sgi-gru/gruprocfs.c
39662+++ b/drivers/misc/sgi-gru/gruprocfs.c
39663@@ -32,9 +32,9 @@
39664
39665 #define printstat(s, f) printstat_val(s, &gru_stats.f, #f)
39666
39667-static void printstat_val(struct seq_file *s, atomic_long_t *v, char *id)
39668+static void printstat_val(struct seq_file *s, atomic_long_unchecked_t *v, char *id)
39669 {
39670- unsigned long val = atomic_long_read(v);
39671+ unsigned long val = atomic_long_read_unchecked(v);
39672
39673 seq_printf(s, "%16lu %s\n", val, id);
39674 }
39675@@ -134,8 +134,8 @@ static int mcs_statistics_show(struct seq_file *s, void *p)
39676
39677 seq_printf(s, "%-20s%12s%12s%12s\n", "#id", "count", "aver-clks", "max-clks");
39678 for (op = 0; op < mcsop_last; op++) {
39679- count = atomic_long_read(&mcs_op_statistics[op].count);
39680- total = atomic_long_read(&mcs_op_statistics[op].total);
39681+ count = atomic_long_read_unchecked(&mcs_op_statistics[op].count);
39682+ total = atomic_long_read_unchecked(&mcs_op_statistics[op].total);
39683 max = mcs_op_statistics[op].max;
39684 seq_printf(s, "%-20s%12ld%12ld%12ld\n", id[op], count,
39685 count ? total / count : 0, max);
39686diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
39687index 5c3ce24..4915ccb 100644
39688--- a/drivers/misc/sgi-gru/grutables.h
39689+++ b/drivers/misc/sgi-gru/grutables.h
39690@@ -167,82 +167,82 @@ extern unsigned int gru_max_gids;
39691 * GRU statistics.
39692 */
39693 struct gru_stats_s {
39694- atomic_long_t vdata_alloc;
39695- atomic_long_t vdata_free;
39696- atomic_long_t gts_alloc;
39697- atomic_long_t gts_free;
39698- atomic_long_t gms_alloc;
39699- atomic_long_t gms_free;
39700- atomic_long_t gts_double_allocate;
39701- atomic_long_t assign_context;
39702- atomic_long_t assign_context_failed;
39703- atomic_long_t free_context;
39704- atomic_long_t load_user_context;
39705- atomic_long_t load_kernel_context;
39706- atomic_long_t lock_kernel_context;
39707- atomic_long_t unlock_kernel_context;
39708- atomic_long_t steal_user_context;
39709- atomic_long_t steal_kernel_context;
39710- atomic_long_t steal_context_failed;
39711- atomic_long_t nopfn;
39712- atomic_long_t asid_new;
39713- atomic_long_t asid_next;
39714- atomic_long_t asid_wrap;
39715- atomic_long_t asid_reuse;
39716- atomic_long_t intr;
39717- atomic_long_t intr_cbr;
39718- atomic_long_t intr_tfh;
39719- atomic_long_t intr_spurious;
39720- atomic_long_t intr_mm_lock_failed;
39721- atomic_long_t call_os;
39722- atomic_long_t call_os_wait_queue;
39723- atomic_long_t user_flush_tlb;
39724- atomic_long_t user_unload_context;
39725- atomic_long_t user_exception;
39726- atomic_long_t set_context_option;
39727- atomic_long_t check_context_retarget_intr;
39728- atomic_long_t check_context_unload;
39729- atomic_long_t tlb_dropin;
39730- atomic_long_t tlb_preload_page;
39731- atomic_long_t tlb_dropin_fail_no_asid;
39732- atomic_long_t tlb_dropin_fail_upm;
39733- atomic_long_t tlb_dropin_fail_invalid;
39734- atomic_long_t tlb_dropin_fail_range_active;
39735- atomic_long_t tlb_dropin_fail_idle;
39736- atomic_long_t tlb_dropin_fail_fmm;
39737- atomic_long_t tlb_dropin_fail_no_exception;
39738- atomic_long_t tfh_stale_on_fault;
39739- atomic_long_t mmu_invalidate_range;
39740- atomic_long_t mmu_invalidate_page;
39741- atomic_long_t flush_tlb;
39742- atomic_long_t flush_tlb_gru;
39743- atomic_long_t flush_tlb_gru_tgh;
39744- atomic_long_t flush_tlb_gru_zero_asid;
39745+ atomic_long_unchecked_t vdata_alloc;
39746+ atomic_long_unchecked_t vdata_free;
39747+ atomic_long_unchecked_t gts_alloc;
39748+ atomic_long_unchecked_t gts_free;
39749+ atomic_long_unchecked_t gms_alloc;
39750+ atomic_long_unchecked_t gms_free;
39751+ atomic_long_unchecked_t gts_double_allocate;
39752+ atomic_long_unchecked_t assign_context;
39753+ atomic_long_unchecked_t assign_context_failed;
39754+ atomic_long_unchecked_t free_context;
39755+ atomic_long_unchecked_t load_user_context;
39756+ atomic_long_unchecked_t load_kernel_context;
39757+ atomic_long_unchecked_t lock_kernel_context;
39758+ atomic_long_unchecked_t unlock_kernel_context;
39759+ atomic_long_unchecked_t steal_user_context;
39760+ atomic_long_unchecked_t steal_kernel_context;
39761+ atomic_long_unchecked_t steal_context_failed;
39762+ atomic_long_unchecked_t nopfn;
39763+ atomic_long_unchecked_t asid_new;
39764+ atomic_long_unchecked_t asid_next;
39765+ atomic_long_unchecked_t asid_wrap;
39766+ atomic_long_unchecked_t asid_reuse;
39767+ atomic_long_unchecked_t intr;
39768+ atomic_long_unchecked_t intr_cbr;
39769+ atomic_long_unchecked_t intr_tfh;
39770+ atomic_long_unchecked_t intr_spurious;
39771+ atomic_long_unchecked_t intr_mm_lock_failed;
39772+ atomic_long_unchecked_t call_os;
39773+ atomic_long_unchecked_t call_os_wait_queue;
39774+ atomic_long_unchecked_t user_flush_tlb;
39775+ atomic_long_unchecked_t user_unload_context;
39776+ atomic_long_unchecked_t user_exception;
39777+ atomic_long_unchecked_t set_context_option;
39778+ atomic_long_unchecked_t check_context_retarget_intr;
39779+ atomic_long_unchecked_t check_context_unload;
39780+ atomic_long_unchecked_t tlb_dropin;
39781+ atomic_long_unchecked_t tlb_preload_page;
39782+ atomic_long_unchecked_t tlb_dropin_fail_no_asid;
39783+ atomic_long_unchecked_t tlb_dropin_fail_upm;
39784+ atomic_long_unchecked_t tlb_dropin_fail_invalid;
39785+ atomic_long_unchecked_t tlb_dropin_fail_range_active;
39786+ atomic_long_unchecked_t tlb_dropin_fail_idle;
39787+ atomic_long_unchecked_t tlb_dropin_fail_fmm;
39788+ atomic_long_unchecked_t tlb_dropin_fail_no_exception;
39789+ atomic_long_unchecked_t tfh_stale_on_fault;
39790+ atomic_long_unchecked_t mmu_invalidate_range;
39791+ atomic_long_unchecked_t mmu_invalidate_page;
39792+ atomic_long_unchecked_t flush_tlb;
39793+ atomic_long_unchecked_t flush_tlb_gru;
39794+ atomic_long_unchecked_t flush_tlb_gru_tgh;
39795+ atomic_long_unchecked_t flush_tlb_gru_zero_asid;
39796
39797- atomic_long_t copy_gpa;
39798- atomic_long_t read_gpa;
39799+ atomic_long_unchecked_t copy_gpa;
39800+ atomic_long_unchecked_t read_gpa;
39801
39802- atomic_long_t mesq_receive;
39803- atomic_long_t mesq_receive_none;
39804- atomic_long_t mesq_send;
39805- atomic_long_t mesq_send_failed;
39806- atomic_long_t mesq_noop;
39807- atomic_long_t mesq_send_unexpected_error;
39808- atomic_long_t mesq_send_lb_overflow;
39809- atomic_long_t mesq_send_qlimit_reached;
39810- atomic_long_t mesq_send_amo_nacked;
39811- atomic_long_t mesq_send_put_nacked;
39812- atomic_long_t mesq_page_overflow;
39813- atomic_long_t mesq_qf_locked;
39814- atomic_long_t mesq_qf_noop_not_full;
39815- atomic_long_t mesq_qf_switch_head_failed;
39816- atomic_long_t mesq_qf_unexpected_error;
39817- atomic_long_t mesq_noop_unexpected_error;
39818- atomic_long_t mesq_noop_lb_overflow;
39819- atomic_long_t mesq_noop_qlimit_reached;
39820- atomic_long_t mesq_noop_amo_nacked;
39821- atomic_long_t mesq_noop_put_nacked;
39822- atomic_long_t mesq_noop_page_overflow;
39823+ atomic_long_unchecked_t mesq_receive;
39824+ atomic_long_unchecked_t mesq_receive_none;
39825+ atomic_long_unchecked_t mesq_send;
39826+ atomic_long_unchecked_t mesq_send_failed;
39827+ atomic_long_unchecked_t mesq_noop;
39828+ atomic_long_unchecked_t mesq_send_unexpected_error;
39829+ atomic_long_unchecked_t mesq_send_lb_overflow;
39830+ atomic_long_unchecked_t mesq_send_qlimit_reached;
39831+ atomic_long_unchecked_t mesq_send_amo_nacked;
39832+ atomic_long_unchecked_t mesq_send_put_nacked;
39833+ atomic_long_unchecked_t mesq_page_overflow;
39834+ atomic_long_unchecked_t mesq_qf_locked;
39835+ atomic_long_unchecked_t mesq_qf_noop_not_full;
39836+ atomic_long_unchecked_t mesq_qf_switch_head_failed;
39837+ atomic_long_unchecked_t mesq_qf_unexpected_error;
39838+ atomic_long_unchecked_t mesq_noop_unexpected_error;
39839+ atomic_long_unchecked_t mesq_noop_lb_overflow;
39840+ atomic_long_unchecked_t mesq_noop_qlimit_reached;
39841+ atomic_long_unchecked_t mesq_noop_amo_nacked;
39842+ atomic_long_unchecked_t mesq_noop_put_nacked;
39843+ atomic_long_unchecked_t mesq_noop_page_overflow;
39844
39845 };
39846
39847@@ -251,8 +251,8 @@ enum mcs_op {cchop_allocate, cchop_start, cchop_interrupt, cchop_interrupt_sync,
39848 tghop_invalidate, mcsop_last};
39849
39850 struct mcs_op_statistic {
39851- atomic_long_t count;
39852- atomic_long_t total;
39853+ atomic_long_unchecked_t count;
39854+ atomic_long_unchecked_t total;
39855 unsigned long max;
39856 };
39857
39858@@ -275,7 +275,7 @@ extern struct mcs_op_statistic mcs_op_statistics[mcsop_last];
39859
39860 #define STAT(id) do { \
39861 if (gru_options & OPT_STATS) \
39862- atomic_long_inc(&gru_stats.id); \
39863+ atomic_long_inc_unchecked(&gru_stats.id); \
39864 } while (0)
39865
39866 #ifdef CONFIG_SGI_GRU_DEBUG
39867diff --git a/drivers/misc/sgi-xp/xp.h b/drivers/misc/sgi-xp/xp.h
39868index c862cd4..0d176fe 100644
39869--- a/drivers/misc/sgi-xp/xp.h
39870+++ b/drivers/misc/sgi-xp/xp.h
39871@@ -288,7 +288,7 @@ struct xpc_interface {
39872 xpc_notify_func, void *);
39873 void (*received) (short, int, void *);
39874 enum xp_retval (*partid_to_nasids) (short, void *);
39875-};
39876+} __no_const;
39877
39878 extern struct xpc_interface xpc_interface;
39879
39880diff --git a/drivers/misc/sgi-xp/xpc.h b/drivers/misc/sgi-xp/xpc.h
39881index b94d5f7..7f494c5 100644
39882--- a/drivers/misc/sgi-xp/xpc.h
39883+++ b/drivers/misc/sgi-xp/xpc.h
39884@@ -835,6 +835,7 @@ struct xpc_arch_operations {
39885 void (*received_payload) (struct xpc_channel *, void *);
39886 void (*notify_senders_of_disconnect) (struct xpc_channel *);
39887 };
39888+typedef struct xpc_arch_operations __no_const xpc_arch_operations_no_const;
39889
39890 /* struct xpc_partition act_state values (for XPC HB) */
39891
39892@@ -876,7 +877,7 @@ extern struct xpc_registration xpc_registrations[];
39893 /* found in xpc_main.c */
39894 extern struct device *xpc_part;
39895 extern struct device *xpc_chan;
39896-extern struct xpc_arch_operations xpc_arch_ops;
39897+extern xpc_arch_operations_no_const xpc_arch_ops;
39898 extern int xpc_disengage_timelimit;
39899 extern int xpc_disengage_timedout;
39900 extern int xpc_activate_IRQ_rcvd;
39901diff --git a/drivers/misc/sgi-xp/xpc_main.c b/drivers/misc/sgi-xp/xpc_main.c
39902index d971817..33bdca5 100644
39903--- a/drivers/misc/sgi-xp/xpc_main.c
39904+++ b/drivers/misc/sgi-xp/xpc_main.c
39905@@ -166,7 +166,7 @@ static struct notifier_block xpc_die_notifier = {
39906 .notifier_call = xpc_system_die,
39907 };
39908
39909-struct xpc_arch_operations xpc_arch_ops;
39910+xpc_arch_operations_no_const xpc_arch_ops;
39911
39912 /*
39913 * Timer function to enforce the timelimit on the partition disengage.
39914@@ -1210,7 +1210,7 @@ xpc_system_die(struct notifier_block *nb, unsigned long event, void *_die_args)
39915
39916 if (((die_args->trapnr == X86_TRAP_MF) ||
39917 (die_args->trapnr == X86_TRAP_XF)) &&
39918- !user_mode_vm(die_args->regs))
39919+ !user_mode(die_args->regs))
39920 xpc_die_deactivate();
39921
39922 break;
39923diff --git a/drivers/mmc/core/mmc_ops.c b/drivers/mmc/core/mmc_ops.c
39924index 49f04bc..65660c2 100644
39925--- a/drivers/mmc/core/mmc_ops.c
39926+++ b/drivers/mmc/core/mmc_ops.c
39927@@ -247,7 +247,7 @@ mmc_send_cxd_data(struct mmc_card *card, struct mmc_host *host,
39928 void *data_buf;
39929 int is_on_stack;
39930
39931- is_on_stack = object_is_on_stack(buf);
39932+ is_on_stack = object_starts_on_stack(buf);
39933 if (is_on_stack) {
39934 /*
39935 * dma onto stack is unsafe/nonportable, but callers to this
39936diff --git a/drivers/mmc/host/dw_mmc.h b/drivers/mmc/host/dw_mmc.h
39937index 0b74189..818358f 100644
39938--- a/drivers/mmc/host/dw_mmc.h
39939+++ b/drivers/mmc/host/dw_mmc.h
39940@@ -202,5 +202,5 @@ struct dw_mci_drv_data {
39941 void (*prepare_command)(struct dw_mci *host, u32 *cmdr);
39942 void (*set_ios)(struct dw_mci *host, struct mmc_ios *ios);
39943 int (*parse_dt)(struct dw_mci *host);
39944-};
39945+} __do_const;
39946 #endif /* _DW_MMC_H_ */
39947diff --git a/drivers/mmc/host/sdhci-s3c.c b/drivers/mmc/host/sdhci-s3c.c
39948index c6f6246..60760a8 100644
39949--- a/drivers/mmc/host/sdhci-s3c.c
39950+++ b/drivers/mmc/host/sdhci-s3c.c
39951@@ -664,9 +664,11 @@ static int sdhci_s3c_probe(struct platform_device *pdev)
39952 * we can use overriding functions instead of default.
39953 */
39954 if (host->quirks & SDHCI_QUIRK_NONSTANDARD_CLOCK) {
39955- sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39956- sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39957- sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39958+ pax_open_kernel();
39959+ *(void **)&sdhci_s3c_ops.set_clock = sdhci_cmu_set_clock;
39960+ *(void **)&sdhci_s3c_ops.get_min_clock = sdhci_cmu_get_min_clock;
39961+ *(void **)&sdhci_s3c_ops.get_max_clock = sdhci_cmu_get_max_clock;
39962+ pax_close_kernel();
39963 }
39964
39965 /* It supports additional host capabilities if needed */
39966diff --git a/drivers/mtd/nand/denali.c b/drivers/mtd/nand/denali.c
39967index 0c8bb6b..6f35deb 100644
39968--- a/drivers/mtd/nand/denali.c
39969+++ b/drivers/mtd/nand/denali.c
39970@@ -24,6 +24,7 @@
39971 #include <linux/slab.h>
39972 #include <linux/mtd/mtd.h>
39973 #include <linux/module.h>
39974+#include <linux/slab.h>
39975
39976 #include "denali.h"
39977
39978diff --git a/drivers/mtd/nftlmount.c b/drivers/mtd/nftlmount.c
39979index 51b9d6a..52af9a7 100644
39980--- a/drivers/mtd/nftlmount.c
39981+++ b/drivers/mtd/nftlmount.c
39982@@ -24,6 +24,7 @@
39983 #include <asm/errno.h>
39984 #include <linux/delay.h>
39985 #include <linux/slab.h>
39986+#include <linux/sched.h>
39987 #include <linux/mtd/mtd.h>
39988 #include <linux/mtd/nand.h>
39989 #include <linux/mtd/nftl.h>
39990diff --git a/drivers/mtd/sm_ftl.c b/drivers/mtd/sm_ftl.c
39991index f9d5615..99dd95f 100644
39992--- a/drivers/mtd/sm_ftl.c
39993+++ b/drivers/mtd/sm_ftl.c
39994@@ -56,7 +56,7 @@ ssize_t sm_attr_show(struct device *dev, struct device_attribute *attr,
39995 #define SM_CIS_VENDOR_OFFSET 0x59
39996 struct attribute_group *sm_create_sysfs_attributes(struct sm_ftl *ftl)
39997 {
39998- struct attribute_group *attr_group;
39999+ attribute_group_no_const *attr_group;
40000 struct attribute **attributes;
40001 struct sm_sysfs_attribute *vendor_attribute;
40002
40003diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
40004index f975696..4597e21 100644
40005--- a/drivers/net/bonding/bond_main.c
40006+++ b/drivers/net/bonding/bond_main.c
40007@@ -4870,7 +4870,7 @@ static unsigned int bond_get_num_tx_queues(void)
40008 return tx_queues;
40009 }
40010
40011-static struct rtnl_link_ops bond_link_ops __read_mostly = {
40012+static struct rtnl_link_ops bond_link_ops = {
40013 .kind = "bond",
40014 .priv_size = sizeof(struct bonding),
40015 .setup = bond_setup,
40016@@ -4995,8 +4995,8 @@ static void __exit bonding_exit(void)
40017
40018 bond_destroy_debugfs();
40019
40020- rtnl_link_unregister(&bond_link_ops);
40021 unregister_pernet_subsys(&bond_net_ops);
40022+ rtnl_link_unregister(&bond_link_ops);
40023
40024 #ifdef CONFIG_NET_POLL_CONTROLLER
40025 /*
40026diff --git a/drivers/net/ethernet/8390/ax88796.c b/drivers/net/ethernet/8390/ax88796.c
40027index e1d2643..7f4133b 100644
40028--- a/drivers/net/ethernet/8390/ax88796.c
40029+++ b/drivers/net/ethernet/8390/ax88796.c
40030@@ -872,9 +872,11 @@ static int ax_probe(struct platform_device *pdev)
40031 if (ax->plat->reg_offsets)
40032 ei_local->reg_offset = ax->plat->reg_offsets;
40033 else {
40034+ resource_size_t _mem_size = mem_size;
40035+ do_div(_mem_size, 0x18);
40036 ei_local->reg_offset = ax->reg_offsets;
40037 for (ret = 0; ret < 0x18; ret++)
40038- ax->reg_offsets[ret] = (mem_size / 0x18) * ret;
40039+ ax->reg_offsets[ret] = _mem_size * ret;
40040 }
40041
40042 if (!request_mem_region(mem->start, mem_size, pdev->name)) {
40043diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40044index 151675d..0139a9d 100644
40045--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40046+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.h
40047@@ -1112,7 +1112,7 @@ static inline u8 bnx2x_get_path_func_num(struct bnx2x *bp)
40048 static inline void bnx2x_init_bp_objs(struct bnx2x *bp)
40049 {
40050 /* RX_MODE controlling object */
40051- bnx2x_init_rx_mode_obj(bp, &bp->rx_mode_obj);
40052+ bnx2x_init_rx_mode_obj(bp);
40053
40054 /* multicast configuration controlling object */
40055 bnx2x_init_mcast_obj(bp, &bp->mcast_obj, bp->fp->cl_id, bp->fp->cid,
40056diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
40057index ce1a916..10b52b0 100644
40058--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
40059+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_ethtool.c
40060@@ -960,6 +960,9 @@ static int bnx2x_set_dump(struct net_device *dev, struct ethtool_dump *val)
40061 struct bnx2x *bp = netdev_priv(dev);
40062
40063 /* Use the ethtool_dump "flag" field as the dump preset index */
40064+ if (val->flag < 1 || val->flag > DUMP_MAX_PRESETS)
40065+ return -EINVAL;
40066+
40067 bp->dump_preset_idx = val->flag;
40068 return 0;
40069 }
40070@@ -986,8 +989,6 @@ static int bnx2x_get_dump_data(struct net_device *dev,
40071 struct bnx2x *bp = netdev_priv(dev);
40072 struct dump_header dump_hdr = {0};
40073
40074- memset(p, 0, dump->len);
40075-
40076 /* Disable parity attentions as long as following dump may
40077 * cause false alarms by reading never written registers. We
40078 * will re-enable parity attentions right after the dump.
40079diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40080index b4c9dea..2a9927f 100644
40081--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40082+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
40083@@ -11497,6 +11497,8 @@ static int bnx2x_init_bp(struct bnx2x *bp)
40084 bp->min_msix_vec_cnt = 2;
40085 BNX2X_DEV_INFO("bp->min_msix_vec_cnt %d", bp->min_msix_vec_cnt);
40086
40087+ bp->dump_preset_idx = 1;
40088+
40089 return rc;
40090 }
40091
40092diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40093index 32a9609..0b1c53a 100644
40094--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40095+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.c
40096@@ -2387,15 +2387,14 @@ int bnx2x_config_rx_mode(struct bnx2x *bp,
40097 return rc;
40098 }
40099
40100-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
40101- struct bnx2x_rx_mode_obj *o)
40102+void bnx2x_init_rx_mode_obj(struct bnx2x *bp)
40103 {
40104 if (CHIP_IS_E1x(bp)) {
40105- o->wait_comp = bnx2x_empty_rx_mode_wait;
40106- o->config_rx_mode = bnx2x_set_rx_mode_e1x;
40107+ bp->rx_mode_obj.wait_comp = bnx2x_empty_rx_mode_wait;
40108+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e1x;
40109 } else {
40110- o->wait_comp = bnx2x_wait_rx_mode_comp_e2;
40111- o->config_rx_mode = bnx2x_set_rx_mode_e2;
40112+ bp->rx_mode_obj.wait_comp = bnx2x_wait_rx_mode_comp_e2;
40113+ bp->rx_mode_obj.config_rx_mode = bnx2x_set_rx_mode_e2;
40114 }
40115 }
40116
40117diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40118index 43c00bc..dd1d03d 100644
40119--- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40120+++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_sp.h
40121@@ -1321,8 +1321,7 @@ int bnx2x_vlan_mac_move(struct bnx2x *bp,
40122
40123 /********************* RX MODE ****************/
40124
40125-void bnx2x_init_rx_mode_obj(struct bnx2x *bp,
40126- struct bnx2x_rx_mode_obj *o);
40127+void bnx2x_init_rx_mode_obj(struct bnx2x *bp);
40128
40129 /**
40130 * bnx2x_config_rx_mode - Send and RX_MODE ramrod according to the provided parameters.
40131diff --git a/drivers/net/ethernet/broadcom/tg3.h b/drivers/net/ethernet/broadcom/tg3.h
40132index ff6e30e..87e8452 100644
40133--- a/drivers/net/ethernet/broadcom/tg3.h
40134+++ b/drivers/net/ethernet/broadcom/tg3.h
40135@@ -147,6 +147,7 @@
40136 #define CHIPREV_ID_5750_A0 0x4000
40137 #define CHIPREV_ID_5750_A1 0x4001
40138 #define CHIPREV_ID_5750_A3 0x4003
40139+#define CHIPREV_ID_5750_C1 0x4201
40140 #define CHIPREV_ID_5750_C2 0x4202
40141 #define CHIPREV_ID_5752_A0_HW 0x5000
40142 #define CHIPREV_ID_5752_A0 0x6000
40143diff --git a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
40144index 71497e8..b650951 100644
40145--- a/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
40146+++ b/drivers/net/ethernet/chelsio/cxgb3/cxgb3_main.c
40147@@ -3037,7 +3037,9 @@ static void t3_io_resume(struct pci_dev *pdev)
40148 CH_ALERT(adapter, "adapter recovering, PEX ERR 0x%x\n",
40149 t3_read_reg(adapter, A_PCIE_PEX_ERR));
40150
40151+ rtnl_lock();
40152 t3_resume_ports(adapter);
40153+ rtnl_unlock();
40154 }
40155
40156 static const struct pci_error_handlers t3_err_handler = {
40157diff --git a/drivers/net/ethernet/chelsio/cxgb3/l2t.h b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40158index 8cffcdf..aadf043 100644
40159--- a/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40160+++ b/drivers/net/ethernet/chelsio/cxgb3/l2t.h
40161@@ -87,7 +87,7 @@ typedef void (*arp_failure_handler_func)(struct t3cdev * dev,
40162 */
40163 struct l2t_skb_cb {
40164 arp_failure_handler_func arp_failure_handler;
40165-};
40166+} __no_const;
40167
40168 #define L2T_SKB_CB(skb) ((struct l2t_skb_cb *)(skb)->cb)
40169
40170diff --git a/drivers/net/ethernet/dec/tulip/de4x5.c b/drivers/net/ethernet/dec/tulip/de4x5.c
40171index 4c83003..2a2a5b9 100644
40172--- a/drivers/net/ethernet/dec/tulip/de4x5.c
40173+++ b/drivers/net/ethernet/dec/tulip/de4x5.c
40174@@ -5388,7 +5388,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40175 for (i=0; i<ETH_ALEN; i++) {
40176 tmp.addr[i] = dev->dev_addr[i];
40177 }
40178- if (copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40179+ if (ioc->len > sizeof tmp.addr || copy_to_user(ioc->data, tmp.addr, ioc->len)) return -EFAULT;
40180 break;
40181
40182 case DE4X5_SET_HWADDR: /* Set the hardware address */
40183@@ -5428,7 +5428,7 @@ de4x5_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
40184 spin_lock_irqsave(&lp->lock, flags);
40185 memcpy(&statbuf, &lp->pktStats, ioc->len);
40186 spin_unlock_irqrestore(&lp->lock, flags);
40187- if (copy_to_user(ioc->data, &statbuf, ioc->len))
40188+ if (ioc->len > sizeof statbuf || copy_to_user(ioc->data, &statbuf, ioc->len))
40189 return -EFAULT;
40190 break;
40191 }
40192diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c
40193index a0b4be5..d8cb082 100644
40194--- a/drivers/net/ethernet/emulex/benet/be_main.c
40195+++ b/drivers/net/ethernet/emulex/benet/be_main.c
40196@@ -469,7 +469,7 @@ static void accumulate_16bit_val(u32 *acc, u16 val)
40197
40198 if (wrapped)
40199 newacc += 65536;
40200- ACCESS_ONCE(*acc) = newacc;
40201+ ACCESS_ONCE_RW(*acc) = newacc;
40202 }
40203
40204 void populate_erx_stats(struct be_adapter *adapter,
40205diff --git a/drivers/net/ethernet/faraday/ftgmac100.c b/drivers/net/ethernet/faraday/ftgmac100.c
40206index 21b85fb..b49e5fc 100644
40207--- a/drivers/net/ethernet/faraday/ftgmac100.c
40208+++ b/drivers/net/ethernet/faraday/ftgmac100.c
40209@@ -31,6 +31,8 @@
40210 #include <linux/netdevice.h>
40211 #include <linux/phy.h>
40212 #include <linux/platform_device.h>
40213+#include <linux/interrupt.h>
40214+#include <linux/irqreturn.h>
40215 #include <net/ip.h>
40216
40217 #include "ftgmac100.h"
40218diff --git a/drivers/net/ethernet/faraday/ftmac100.c b/drivers/net/ethernet/faraday/ftmac100.c
40219index a6eda8d..935d273 100644
40220--- a/drivers/net/ethernet/faraday/ftmac100.c
40221+++ b/drivers/net/ethernet/faraday/ftmac100.c
40222@@ -31,6 +31,8 @@
40223 #include <linux/module.h>
40224 #include <linux/netdevice.h>
40225 #include <linux/platform_device.h>
40226+#include <linux/interrupt.h>
40227+#include <linux/irqreturn.h>
40228
40229 #include "ftmac100.h"
40230
40231diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40232index 331987d..3be1135 100644
40233--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40234+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c
40235@@ -776,7 +776,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter)
40236 }
40237
40238 /* update the base incval used to calculate frequency adjustment */
40239- ACCESS_ONCE(adapter->base_incval) = incval;
40240+ ACCESS_ONCE_RW(adapter->base_incval) = incval;
40241 smp_mb();
40242
40243 /* need lock to prevent incorrect read while modifying cyclecounter */
40244diff --git a/drivers/net/ethernet/neterion/vxge/vxge-config.c b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40245index fbe5363..266b4e3 100644
40246--- a/drivers/net/ethernet/neterion/vxge/vxge-config.c
40247+++ b/drivers/net/ethernet/neterion/vxge/vxge-config.c
40248@@ -3461,7 +3461,10 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40249 struct __vxge_hw_fifo *fifo;
40250 struct vxge_hw_fifo_config *config;
40251 u32 txdl_size, txdl_per_memblock;
40252- struct vxge_hw_mempool_cbs fifo_mp_callback;
40253+ static struct vxge_hw_mempool_cbs fifo_mp_callback = {
40254+ .item_func_alloc = __vxge_hw_fifo_mempool_item_alloc,
40255+ };
40256+
40257 struct __vxge_hw_virtualpath *vpath;
40258
40259 if ((vp == NULL) || (attr == NULL)) {
40260@@ -3544,8 +3547,6 @@ __vxge_hw_fifo_create(struct __vxge_hw_vpath_handle *vp,
40261 goto exit;
40262 }
40263
40264- fifo_mp_callback.item_func_alloc = __vxge_hw_fifo_mempool_item_alloc;
40265-
40266 fifo->mempool =
40267 __vxge_hw_mempool_create(vpath->hldev,
40268 fifo->config->memblock_size,
40269diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40270index 5e7fb1d..f8d1810 100644
40271--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40272+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_init.c
40273@@ -1948,7 +1948,9 @@ int qlcnic_83xx_config_default_opmode(struct qlcnic_adapter *adapter)
40274 op_mode = QLC_83XX_DEFAULT_OPMODE;
40275
40276 if (op_mode == QLC_83XX_DEFAULT_OPMODE) {
40277- adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
40278+ pax_open_kernel();
40279+ *(void **)&adapter->nic_ops->init_driver = qlcnic_83xx_init_default_driver;
40280+ pax_close_kernel();
40281 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40282 } else {
40283 return -EIO;
40284diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40285index b0c3de9..fc5857e 100644
40286--- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40287+++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_83xx_vnic.c
40288@@ -200,15 +200,21 @@ int qlcnic_83xx_config_vnic_opmode(struct qlcnic_adapter *adapter)
40289 if (priv_level == QLCNIC_NON_PRIV_FUNC) {
40290 ahw->op_mode = QLCNIC_NON_PRIV_FUNC;
40291 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40292- nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
40293+ pax_open_kernel();
40294+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_non_privileged_vnic;
40295+ pax_close_kernel();
40296 } else if (priv_level == QLCNIC_PRIV_FUNC) {
40297 ahw->op_mode = QLCNIC_PRIV_FUNC;
40298 ahw->idc.state_entry = qlcnic_83xx_idc_vnic_pf_entry;
40299- nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
40300+ pax_open_kernel();
40301+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_privileged_vnic;
40302+ pax_close_kernel();
40303 } else if (priv_level == QLCNIC_MGMT_FUNC) {
40304 ahw->op_mode = QLCNIC_MGMT_FUNC;
40305 ahw->idc.state_entry = qlcnic_83xx_idc_ready_state_entry;
40306- nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
40307+ pax_open_kernel();
40308+ *(void **)&nic_ops->init_driver = qlcnic_83xx_init_mgmt_vnic;
40309+ pax_close_kernel();
40310 } else {
40311 return -EIO;
40312 }
40313diff --git a/drivers/net/ethernet/realtek/r8169.c b/drivers/net/ethernet/realtek/r8169.c
40314index 393f961..d343034 100644
40315--- a/drivers/net/ethernet/realtek/r8169.c
40316+++ b/drivers/net/ethernet/realtek/r8169.c
40317@@ -753,22 +753,22 @@ struct rtl8169_private {
40318 struct mdio_ops {
40319 void (*write)(struct rtl8169_private *, int, int);
40320 int (*read)(struct rtl8169_private *, int);
40321- } mdio_ops;
40322+ } __no_const mdio_ops;
40323
40324 struct pll_power_ops {
40325 void (*down)(struct rtl8169_private *);
40326 void (*up)(struct rtl8169_private *);
40327- } pll_power_ops;
40328+ } __no_const pll_power_ops;
40329
40330 struct jumbo_ops {
40331 void (*enable)(struct rtl8169_private *);
40332 void (*disable)(struct rtl8169_private *);
40333- } jumbo_ops;
40334+ } __no_const jumbo_ops;
40335
40336 struct csi_ops {
40337 void (*write)(struct rtl8169_private *, int, int);
40338 u32 (*read)(struct rtl8169_private *, int);
40339- } csi_ops;
40340+ } __no_const csi_ops;
40341
40342 int (*set_speed)(struct net_device *, u8 aneg, u16 sp, u8 dpx, u32 adv);
40343 int (*get_settings)(struct net_device *, struct ethtool_cmd *);
40344diff --git a/drivers/net/ethernet/sfc/ptp.c b/drivers/net/ethernet/sfc/ptp.c
40345index 9a95abf..36df7f9 100644
40346--- a/drivers/net/ethernet/sfc/ptp.c
40347+++ b/drivers/net/ethernet/sfc/ptp.c
40348@@ -535,7 +535,7 @@ static int efx_ptp_synchronize(struct efx_nic *efx, unsigned int num_readings)
40349 (u32)((u64)ptp->start.dma_addr >> 32));
40350
40351 /* Clear flag that signals MC ready */
40352- ACCESS_ONCE(*start) = 0;
40353+ ACCESS_ONCE_RW(*start) = 0;
40354 efx_mcdi_rpc_start(efx, MC_CMD_PTP, synch_buf,
40355 MC_CMD_PTP_IN_SYNCHRONIZE_LEN);
40356
40357diff --git a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40358index 50617c5..b13724c 100644
40359--- a/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40360+++ b/drivers/net/ethernet/stmicro/stmmac/mmc_core.c
40361@@ -140,8 +140,8 @@ void dwmac_mmc_ctrl(void __iomem *ioaddr, unsigned int mode)
40362
40363 writel(value, ioaddr + MMC_CNTRL);
40364
40365- pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40366- MMC_CNTRL, value);
40367+// pr_debug("stmmac: MMC ctrl register (offset 0x%x): 0x%08x\n",
40368+// MMC_CNTRL, value);
40369 }
40370
40371 /* To mask all all interrupts.*/
40372diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h
40373index e6fe0d8..2b7d752 100644
40374--- a/drivers/net/hyperv/hyperv_net.h
40375+++ b/drivers/net/hyperv/hyperv_net.h
40376@@ -101,7 +101,7 @@ struct rndis_device {
40377
40378 enum rndis_device_state state;
40379 bool link_state;
40380- atomic_t new_req_id;
40381+ atomic_unchecked_t new_req_id;
40382
40383 spinlock_t request_lock;
40384 struct list_head req_list;
40385diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c
40386index 0775f0a..d4fb316 100644
40387--- a/drivers/net/hyperv/rndis_filter.c
40388+++ b/drivers/net/hyperv/rndis_filter.c
40389@@ -104,7 +104,7 @@ static struct rndis_request *get_rndis_request(struct rndis_device *dev,
40390 * template
40391 */
40392 set = &rndis_msg->msg.set_req;
40393- set->req_id = atomic_inc_return(&dev->new_req_id);
40394+ set->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40395
40396 /* Add to the request list */
40397 spin_lock_irqsave(&dev->request_lock, flags);
40398@@ -752,7 +752,7 @@ static void rndis_filter_halt_device(struct rndis_device *dev)
40399
40400 /* Setup the rndis set */
40401 halt = &request->request_msg.msg.halt_req;
40402- halt->req_id = atomic_inc_return(&dev->new_req_id);
40403+ halt->req_id = atomic_inc_return_unchecked(&dev->new_req_id);
40404
40405 /* Ignore return since this msg is optional. */
40406 rndis_filter_send_request(dev, request);
40407diff --git a/drivers/net/ieee802154/fakehard.c b/drivers/net/ieee802154/fakehard.c
40408index bf0d55e..82bcfbd1 100644
40409--- a/drivers/net/ieee802154/fakehard.c
40410+++ b/drivers/net/ieee802154/fakehard.c
40411@@ -364,7 +364,7 @@ static int ieee802154fake_probe(struct platform_device *pdev)
40412 phy->transmit_power = 0xbf;
40413
40414 dev->netdev_ops = &fake_ops;
40415- dev->ml_priv = &fake_mlme;
40416+ dev->ml_priv = (void *)&fake_mlme;
40417
40418 priv = netdev_priv(dev);
40419 priv->phy = phy;
40420diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
40421index 6e91931..2b0ebe7 100644
40422--- a/drivers/net/macvlan.c
40423+++ b/drivers/net/macvlan.c
40424@@ -905,13 +905,15 @@ static const struct nla_policy macvlan_policy[IFLA_MACVLAN_MAX + 1] = {
40425 int macvlan_link_register(struct rtnl_link_ops *ops)
40426 {
40427 /* common fields */
40428- ops->priv_size = sizeof(struct macvlan_dev);
40429- ops->validate = macvlan_validate;
40430- ops->maxtype = IFLA_MACVLAN_MAX;
40431- ops->policy = macvlan_policy;
40432- ops->changelink = macvlan_changelink;
40433- ops->get_size = macvlan_get_size;
40434- ops->fill_info = macvlan_fill_info;
40435+ pax_open_kernel();
40436+ *(size_t *)&ops->priv_size = sizeof(struct macvlan_dev);
40437+ *(void **)&ops->validate = macvlan_validate;
40438+ *(int *)&ops->maxtype = IFLA_MACVLAN_MAX;
40439+ *(const void **)&ops->policy = macvlan_policy;
40440+ *(void **)&ops->changelink = macvlan_changelink;
40441+ *(void **)&ops->get_size = macvlan_get_size;
40442+ *(void **)&ops->fill_info = macvlan_fill_info;
40443+ pax_close_kernel();
40444
40445 return rtnl_link_register(ops);
40446 };
40447@@ -967,7 +969,7 @@ static int macvlan_device_event(struct notifier_block *unused,
40448 return NOTIFY_DONE;
40449 }
40450
40451-static struct notifier_block macvlan_notifier_block __read_mostly = {
40452+static struct notifier_block macvlan_notifier_block = {
40453 .notifier_call = macvlan_device_event,
40454 };
40455
40456diff --git a/drivers/net/macvtap.c b/drivers/net/macvtap.c
40457index b6dd6a7..5c38a02 100644
40458--- a/drivers/net/macvtap.c
40459+++ b/drivers/net/macvtap.c
40460@@ -1094,7 +1094,7 @@ static int macvtap_device_event(struct notifier_block *unused,
40461 return NOTIFY_DONE;
40462 }
40463
40464-static struct notifier_block macvtap_notifier_block __read_mostly = {
40465+static struct notifier_block macvtap_notifier_block = {
40466 .notifier_call = macvtap_device_event,
40467 };
40468
40469diff --git a/drivers/net/phy/mdio-bitbang.c b/drivers/net/phy/mdio-bitbang.c
40470index daec9b0..6428fcb 100644
40471--- a/drivers/net/phy/mdio-bitbang.c
40472+++ b/drivers/net/phy/mdio-bitbang.c
40473@@ -234,6 +234,7 @@ void free_mdio_bitbang(struct mii_bus *bus)
40474 struct mdiobb_ctrl *ctrl = bus->priv;
40475
40476 module_put(ctrl->ops->owner);
40477+ mdiobus_unregister(bus);
40478 mdiobus_free(bus);
40479 }
40480 EXPORT_SYMBOL(free_mdio_bitbang);
40481diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
40482index 72ff14b..11d442d 100644
40483--- a/drivers/net/ppp/ppp_generic.c
40484+++ b/drivers/net/ppp/ppp_generic.c
40485@@ -999,7 +999,6 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40486 void __user *addr = (void __user *) ifr->ifr_ifru.ifru_data;
40487 struct ppp_stats stats;
40488 struct ppp_comp_stats cstats;
40489- char *vers;
40490
40491 switch (cmd) {
40492 case SIOCGPPPSTATS:
40493@@ -1021,8 +1020,7 @@ ppp_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
40494 break;
40495
40496 case SIOCGPPPVER:
40497- vers = PPP_VERSION;
40498- if (copy_to_user(addr, vers, strlen(vers) + 1))
40499+ if (copy_to_user(addr, PPP_VERSION, sizeof(PPP_VERSION)))
40500 break;
40501 err = 0;
40502 break;
40503diff --git a/drivers/net/slip/slhc.c b/drivers/net/slip/slhc.c
40504index 1252d9c..80e660b 100644
40505--- a/drivers/net/slip/slhc.c
40506+++ b/drivers/net/slip/slhc.c
40507@@ -488,7 +488,7 @@ slhc_uncompress(struct slcompress *comp, unsigned char *icp, int isize)
40508 register struct tcphdr *thp;
40509 register struct iphdr *ip;
40510 register struct cstate *cs;
40511- int len, hdrlen;
40512+ long len, hdrlen;
40513 unsigned char *cp = icp;
40514
40515 /* We've got a compressed packet; read the change byte */
40516diff --git a/drivers/net/team/team.c b/drivers/net/team/team.c
40517index b305105..8ead6df 100644
40518--- a/drivers/net/team/team.c
40519+++ b/drivers/net/team/team.c
40520@@ -2682,7 +2682,7 @@ static int team_device_event(struct notifier_block *unused,
40521 return NOTIFY_DONE;
40522 }
40523
40524-static struct notifier_block team_notifier_block __read_mostly = {
40525+static struct notifier_block team_notifier_block = {
40526 .notifier_call = team_device_event,
40527 };
40528
40529diff --git a/drivers/net/tun.c b/drivers/net/tun.c
40530index 9c61f87..213609e 100644
40531--- a/drivers/net/tun.c
40532+++ b/drivers/net/tun.c
40533@@ -1852,7 +1852,7 @@ unlock:
40534 }
40535
40536 static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40537- unsigned long arg, int ifreq_len)
40538+ unsigned long arg, size_t ifreq_len)
40539 {
40540 struct tun_file *tfile = file->private_data;
40541 struct tun_struct *tun;
40542@@ -1864,6 +1864,9 @@ static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
40543 int vnet_hdr_sz;
40544 int ret;
40545
40546+ if (ifreq_len > sizeof ifr)
40547+ return -EFAULT;
40548+
40549 if (cmd == TUNSETIFF || cmd == TUNSETQUEUE || _IOC_TYPE(cmd) == 0x89) {
40550 if (copy_from_user(&ifr, argp, ifreq_len))
40551 return -EFAULT;
40552diff --git a/drivers/net/usb/hso.c b/drivers/net/usb/hso.c
40553index cba1d46..f703766 100644
40554--- a/drivers/net/usb/hso.c
40555+++ b/drivers/net/usb/hso.c
40556@@ -71,7 +71,7 @@
40557 #include <asm/byteorder.h>
40558 #include <linux/serial_core.h>
40559 #include <linux/serial.h>
40560-
40561+#include <asm/local.h>
40562
40563 #define MOD_AUTHOR "Option Wireless"
40564 #define MOD_DESCRIPTION "USB High Speed Option driver"
40565@@ -1180,7 +1180,7 @@ static void put_rxbuf_data_and_resubmit_ctrl_urb(struct hso_serial *serial)
40566 struct urb *urb;
40567
40568 urb = serial->rx_urb[0];
40569- if (serial->port.count > 0) {
40570+ if (atomic_read(&serial->port.count) > 0) {
40571 count = put_rxbuf_data(urb, serial);
40572 if (count == -1)
40573 return;
40574@@ -1216,7 +1216,7 @@ static void hso_std_serial_read_bulk_callback(struct urb *urb)
40575 DUMP1(urb->transfer_buffer, urb->actual_length);
40576
40577 /* Anyone listening? */
40578- if (serial->port.count == 0)
40579+ if (atomic_read(&serial->port.count) == 0)
40580 return;
40581
40582 if (status == 0) {
40583@@ -1298,8 +1298,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40584 tty_port_tty_set(&serial->port, tty);
40585
40586 /* check for port already opened, if not set the termios */
40587- serial->port.count++;
40588- if (serial->port.count == 1) {
40589+ if (atomic_inc_return(&serial->port.count) == 1) {
40590 serial->rx_state = RX_IDLE;
40591 /* Force default termio settings */
40592 _hso_serial_set_termios(tty, NULL);
40593@@ -1311,7 +1310,7 @@ static int hso_serial_open(struct tty_struct *tty, struct file *filp)
40594 result = hso_start_serial_device(serial->parent, GFP_KERNEL);
40595 if (result) {
40596 hso_stop_serial_device(serial->parent);
40597- serial->port.count--;
40598+ atomic_dec(&serial->port.count);
40599 kref_put(&serial->parent->ref, hso_serial_ref_free);
40600 }
40601 } else {
40602@@ -1348,10 +1347,10 @@ static void hso_serial_close(struct tty_struct *tty, struct file *filp)
40603
40604 /* reset the rts and dtr */
40605 /* do the actual close */
40606- serial->port.count--;
40607+ atomic_dec(&serial->port.count);
40608
40609- if (serial->port.count <= 0) {
40610- serial->port.count = 0;
40611+ if (atomic_read(&serial->port.count) <= 0) {
40612+ atomic_set(&serial->port.count, 0);
40613 tty_port_tty_set(&serial->port, NULL);
40614 if (!usb_gone)
40615 hso_stop_serial_device(serial->parent);
40616@@ -1427,7 +1426,7 @@ static void hso_serial_set_termios(struct tty_struct *tty, struct ktermios *old)
40617
40618 /* the actual setup */
40619 spin_lock_irqsave(&serial->serial_lock, flags);
40620- if (serial->port.count)
40621+ if (atomic_read(&serial->port.count))
40622 _hso_serial_set_termios(tty, old);
40623 else
40624 tty->termios = *old;
40625@@ -1886,7 +1885,7 @@ static void intr_callback(struct urb *urb)
40626 D1("Pending read interrupt on port %d\n", i);
40627 spin_lock(&serial->serial_lock);
40628 if (serial->rx_state == RX_IDLE &&
40629- serial->port.count > 0) {
40630+ atomic_read(&serial->port.count) > 0) {
40631 /* Setup and send a ctrl req read on
40632 * port i */
40633 if (!serial->rx_urb_filled[0]) {
40634@@ -3057,7 +3056,7 @@ static int hso_resume(struct usb_interface *iface)
40635 /* Start all serial ports */
40636 for (i = 0; i < HSO_SERIAL_TTY_MINORS; i++) {
40637 if (serial_table[i] && (serial_table[i]->interface == iface)) {
40638- if (dev2ser(serial_table[i])->port.count) {
40639+ if (atomic_read(&dev2ser(serial_table[i])->port.count)) {
40640 result =
40641 hso_start_serial_device(serial_table[i], GFP_NOIO);
40642 hso_kick_transmit(dev2ser(serial_table[i]));
40643diff --git a/drivers/net/vxlan.c b/drivers/net/vxlan.c
40644index 57325f3..36b181f 100644
40645--- a/drivers/net/vxlan.c
40646+++ b/drivers/net/vxlan.c
40647@@ -1579,7 +1579,7 @@ nla_put_failure:
40648 return -EMSGSIZE;
40649 }
40650
40651-static struct rtnl_link_ops vxlan_link_ops __read_mostly = {
40652+static struct rtnl_link_ops vxlan_link_ops = {
40653 .kind = "vxlan",
40654 .maxtype = IFLA_VXLAN_MAX,
40655 .policy = vxlan_policy,
40656diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
40657index 34c8a33..3261fdc 100644
40658--- a/drivers/net/wireless/at76c50x-usb.c
40659+++ b/drivers/net/wireless/at76c50x-usb.c
40660@@ -353,7 +353,7 @@ static int at76_dfu_get_state(struct usb_device *udev, u8 *state)
40661 }
40662
40663 /* Convert timeout from the DFU status to jiffies */
40664-static inline unsigned long at76_get_timeout(struct dfu_status *s)
40665+static inline unsigned long __intentional_overflow(-1) at76_get_timeout(struct dfu_status *s)
40666 {
40667 return msecs_to_jiffies((s->poll_timeout[2] << 16)
40668 | (s->poll_timeout[1] << 8)
40669diff --git a/drivers/net/wireless/ath/ath9k/ar9002_mac.c b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40670index 8d78253..bebbb68 100644
40671--- a/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40672+++ b/drivers/net/wireless/ath/ath9k/ar9002_mac.c
40673@@ -184,8 +184,8 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40674 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
40675 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
40676
40677- ACCESS_ONCE(ads->ds_link) = i->link;
40678- ACCESS_ONCE(ads->ds_data) = i->buf_addr[0];
40679+ ACCESS_ONCE_RW(ads->ds_link) = i->link;
40680+ ACCESS_ONCE_RW(ads->ds_data) = i->buf_addr[0];
40681
40682 ctl1 = i->buf_len[0] | (i->is_last ? 0 : AR_TxMore);
40683 ctl6 = SM(i->keytype, AR_EncrType);
40684@@ -199,26 +199,26 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40685
40686 if ((i->is_first || i->is_last) &&
40687 i->aggr != AGGR_BUF_MIDDLE && i->aggr != AGGR_BUF_LAST) {
40688- ACCESS_ONCE(ads->ds_ctl2) = set11nTries(i->rates, 0)
40689+ ACCESS_ONCE_RW(ads->ds_ctl2) = set11nTries(i->rates, 0)
40690 | set11nTries(i->rates, 1)
40691 | set11nTries(i->rates, 2)
40692 | set11nTries(i->rates, 3)
40693 | (i->dur_update ? AR_DurUpdateEna : 0)
40694 | SM(0, AR_BurstDur);
40695
40696- ACCESS_ONCE(ads->ds_ctl3) = set11nRate(i->rates, 0)
40697+ ACCESS_ONCE_RW(ads->ds_ctl3) = set11nRate(i->rates, 0)
40698 | set11nRate(i->rates, 1)
40699 | set11nRate(i->rates, 2)
40700 | set11nRate(i->rates, 3);
40701 } else {
40702- ACCESS_ONCE(ads->ds_ctl2) = 0;
40703- ACCESS_ONCE(ads->ds_ctl3) = 0;
40704+ ACCESS_ONCE_RW(ads->ds_ctl2) = 0;
40705+ ACCESS_ONCE_RW(ads->ds_ctl3) = 0;
40706 }
40707
40708 if (!i->is_first) {
40709- ACCESS_ONCE(ads->ds_ctl0) = 0;
40710- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40711- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40712+ ACCESS_ONCE_RW(ads->ds_ctl0) = 0;
40713+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40714+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40715 return;
40716 }
40717
40718@@ -243,7 +243,7 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40719 break;
40720 }
40721
40722- ACCESS_ONCE(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40723+ ACCESS_ONCE_RW(ads->ds_ctl0) = (i->pkt_len & AR_FrameLen)
40724 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40725 | SM(i->txpower, AR_XmitPower)
40726 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40727@@ -253,19 +253,19 @@ ar9002_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40728 | (i->flags & ATH9K_TXDESC_RTSENA ? AR_RTSEnable :
40729 (i->flags & ATH9K_TXDESC_CTSENA ? AR_CTSEnable : 0));
40730
40731- ACCESS_ONCE(ads->ds_ctl1) = ctl1;
40732- ACCESS_ONCE(ads->ds_ctl6) = ctl6;
40733+ ACCESS_ONCE_RW(ads->ds_ctl1) = ctl1;
40734+ ACCESS_ONCE_RW(ads->ds_ctl6) = ctl6;
40735
40736 if (i->aggr == AGGR_BUF_MIDDLE || i->aggr == AGGR_BUF_LAST)
40737 return;
40738
40739- ACCESS_ONCE(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40740+ ACCESS_ONCE_RW(ads->ds_ctl4) = set11nPktDurRTSCTS(i->rates, 0)
40741 | set11nPktDurRTSCTS(i->rates, 1);
40742
40743- ACCESS_ONCE(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40744+ ACCESS_ONCE_RW(ads->ds_ctl5) = set11nPktDurRTSCTS(i->rates, 2)
40745 | set11nPktDurRTSCTS(i->rates, 3);
40746
40747- ACCESS_ONCE(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40748+ ACCESS_ONCE_RW(ads->ds_ctl7) = set11nRateFlags(i->rates, 0)
40749 | set11nRateFlags(i->rates, 1)
40750 | set11nRateFlags(i->rates, 2)
40751 | set11nRateFlags(i->rates, 3)
40752diff --git a/drivers/net/wireless/ath/ath9k/ar9003_mac.c b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40753index 301bf72..3f5654f 100644
40754--- a/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40755+++ b/drivers/net/wireless/ath/ath9k/ar9003_mac.c
40756@@ -39,47 +39,47 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40757 (i->qcu << AR_TxQcuNum_S) | desc_len;
40758
40759 checksum += val;
40760- ACCESS_ONCE(ads->info) = val;
40761+ ACCESS_ONCE_RW(ads->info) = val;
40762
40763 checksum += i->link;
40764- ACCESS_ONCE(ads->link) = i->link;
40765+ ACCESS_ONCE_RW(ads->link) = i->link;
40766
40767 checksum += i->buf_addr[0];
40768- ACCESS_ONCE(ads->data0) = i->buf_addr[0];
40769+ ACCESS_ONCE_RW(ads->data0) = i->buf_addr[0];
40770 checksum += i->buf_addr[1];
40771- ACCESS_ONCE(ads->data1) = i->buf_addr[1];
40772+ ACCESS_ONCE_RW(ads->data1) = i->buf_addr[1];
40773 checksum += i->buf_addr[2];
40774- ACCESS_ONCE(ads->data2) = i->buf_addr[2];
40775+ ACCESS_ONCE_RW(ads->data2) = i->buf_addr[2];
40776 checksum += i->buf_addr[3];
40777- ACCESS_ONCE(ads->data3) = i->buf_addr[3];
40778+ ACCESS_ONCE_RW(ads->data3) = i->buf_addr[3];
40779
40780 checksum += (val = (i->buf_len[0] << AR_BufLen_S) & AR_BufLen);
40781- ACCESS_ONCE(ads->ctl3) = val;
40782+ ACCESS_ONCE_RW(ads->ctl3) = val;
40783 checksum += (val = (i->buf_len[1] << AR_BufLen_S) & AR_BufLen);
40784- ACCESS_ONCE(ads->ctl5) = val;
40785+ ACCESS_ONCE_RW(ads->ctl5) = val;
40786 checksum += (val = (i->buf_len[2] << AR_BufLen_S) & AR_BufLen);
40787- ACCESS_ONCE(ads->ctl7) = val;
40788+ ACCESS_ONCE_RW(ads->ctl7) = val;
40789 checksum += (val = (i->buf_len[3] << AR_BufLen_S) & AR_BufLen);
40790- ACCESS_ONCE(ads->ctl9) = val;
40791+ ACCESS_ONCE_RW(ads->ctl9) = val;
40792
40793 checksum = (u16) (((checksum & 0xffff) + (checksum >> 16)) & 0xffff);
40794- ACCESS_ONCE(ads->ctl10) = checksum;
40795+ ACCESS_ONCE_RW(ads->ctl10) = checksum;
40796
40797 if (i->is_first || i->is_last) {
40798- ACCESS_ONCE(ads->ctl13) = set11nTries(i->rates, 0)
40799+ ACCESS_ONCE_RW(ads->ctl13) = set11nTries(i->rates, 0)
40800 | set11nTries(i->rates, 1)
40801 | set11nTries(i->rates, 2)
40802 | set11nTries(i->rates, 3)
40803 | (i->dur_update ? AR_DurUpdateEna : 0)
40804 | SM(0, AR_BurstDur);
40805
40806- ACCESS_ONCE(ads->ctl14) = set11nRate(i->rates, 0)
40807+ ACCESS_ONCE_RW(ads->ctl14) = set11nRate(i->rates, 0)
40808 | set11nRate(i->rates, 1)
40809 | set11nRate(i->rates, 2)
40810 | set11nRate(i->rates, 3);
40811 } else {
40812- ACCESS_ONCE(ads->ctl13) = 0;
40813- ACCESS_ONCE(ads->ctl14) = 0;
40814+ ACCESS_ONCE_RW(ads->ctl13) = 0;
40815+ ACCESS_ONCE_RW(ads->ctl14) = 0;
40816 }
40817
40818 ads->ctl20 = 0;
40819@@ -89,17 +89,17 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40820
40821 ctl17 = SM(i->keytype, AR_EncrType);
40822 if (!i->is_first) {
40823- ACCESS_ONCE(ads->ctl11) = 0;
40824- ACCESS_ONCE(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40825- ACCESS_ONCE(ads->ctl15) = 0;
40826- ACCESS_ONCE(ads->ctl16) = 0;
40827- ACCESS_ONCE(ads->ctl17) = ctl17;
40828- ACCESS_ONCE(ads->ctl18) = 0;
40829- ACCESS_ONCE(ads->ctl19) = 0;
40830+ ACCESS_ONCE_RW(ads->ctl11) = 0;
40831+ ACCESS_ONCE_RW(ads->ctl12) = i->is_last ? 0 : AR_TxMore;
40832+ ACCESS_ONCE_RW(ads->ctl15) = 0;
40833+ ACCESS_ONCE_RW(ads->ctl16) = 0;
40834+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40835+ ACCESS_ONCE_RW(ads->ctl18) = 0;
40836+ ACCESS_ONCE_RW(ads->ctl19) = 0;
40837 return;
40838 }
40839
40840- ACCESS_ONCE(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40841+ ACCESS_ONCE_RW(ads->ctl11) = (i->pkt_len & AR_FrameLen)
40842 | (i->flags & ATH9K_TXDESC_VMF ? AR_VirtMoreFrag : 0)
40843 | SM(i->txpower, AR_XmitPower)
40844 | (i->flags & ATH9K_TXDESC_VEOL ? AR_VEOL : 0)
40845@@ -135,22 +135,22 @@ ar9003_set_txdesc(struct ath_hw *ah, void *ds, struct ath_tx_info *i)
40846 val = (i->flags & ATH9K_TXDESC_PAPRD) >> ATH9K_TXDESC_PAPRD_S;
40847 ctl12 |= SM(val, AR_PAPRDChainMask);
40848
40849- ACCESS_ONCE(ads->ctl12) = ctl12;
40850- ACCESS_ONCE(ads->ctl17) = ctl17;
40851+ ACCESS_ONCE_RW(ads->ctl12) = ctl12;
40852+ ACCESS_ONCE_RW(ads->ctl17) = ctl17;
40853
40854- ACCESS_ONCE(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40855+ ACCESS_ONCE_RW(ads->ctl15) = set11nPktDurRTSCTS(i->rates, 0)
40856 | set11nPktDurRTSCTS(i->rates, 1);
40857
40858- ACCESS_ONCE(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40859+ ACCESS_ONCE_RW(ads->ctl16) = set11nPktDurRTSCTS(i->rates, 2)
40860 | set11nPktDurRTSCTS(i->rates, 3);
40861
40862- ACCESS_ONCE(ads->ctl18) = set11nRateFlags(i->rates, 0)
40863+ ACCESS_ONCE_RW(ads->ctl18) = set11nRateFlags(i->rates, 0)
40864 | set11nRateFlags(i->rates, 1)
40865 | set11nRateFlags(i->rates, 2)
40866 | set11nRateFlags(i->rates, 3)
40867 | SM(i->rtscts_rate, AR_RTSCTSRate);
40868
40869- ACCESS_ONCE(ads->ctl19) = AR_Not_Sounding;
40870+ ACCESS_ONCE_RW(ads->ctl19) = AR_Not_Sounding;
40871 }
40872
40873 static u16 ar9003_calc_ptr_chksum(struct ar9003_txc *ads)
40874diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
40875index ae30343..a117806 100644
40876--- a/drivers/net/wireless/ath/ath9k/hw.h
40877+++ b/drivers/net/wireless/ath/ath9k/hw.h
40878@@ -652,7 +652,7 @@ struct ath_hw_private_ops {
40879
40880 /* ANI */
40881 void (*ani_cache_ini_regs)(struct ath_hw *ah);
40882-};
40883+} __no_const;
40884
40885 /**
40886 * struct ath_spec_scan - parameters for Atheros spectral scan
40887@@ -721,7 +721,7 @@ struct ath_hw_ops {
40888 struct ath_spec_scan *param);
40889 void (*spectral_scan_trigger)(struct ath_hw *ah);
40890 void (*spectral_scan_wait)(struct ath_hw *ah);
40891-};
40892+} __no_const;
40893
40894 struct ath_nf_limits {
40895 s16 max;
40896diff --git a/drivers/net/wireless/iwlegacy/3945-mac.c b/drivers/net/wireless/iwlegacy/3945-mac.c
40897index b37a582..680835d 100644
40898--- a/drivers/net/wireless/iwlegacy/3945-mac.c
40899+++ b/drivers/net/wireless/iwlegacy/3945-mac.c
40900@@ -3639,7 +3639,9 @@ il3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
40901 */
40902 if (il3945_mod_params.disable_hw_scan) {
40903 D_INFO("Disabling hw_scan\n");
40904- il3945_mac_ops.hw_scan = NULL;
40905+ pax_open_kernel();
40906+ *(void **)&il3945_mac_ops.hw_scan = NULL;
40907+ pax_close_kernel();
40908 }
40909
40910 D_INFO("*** LOAD DRIVER ***\n");
40911diff --git a/drivers/net/wireless/iwlwifi/dvm/debugfs.c b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40912index d532948..a1cb592 100644
40913--- a/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40914+++ b/drivers/net/wireless/iwlwifi/dvm/debugfs.c
40915@@ -203,7 +203,7 @@ static ssize_t iwl_dbgfs_sram_write(struct file *file,
40916 {
40917 struct iwl_priv *priv = file->private_data;
40918 char buf[64];
40919- int buf_size;
40920+ size_t buf_size;
40921 u32 offset, len;
40922
40923 memset(buf, 0, sizeof(buf));
40924@@ -473,7 +473,7 @@ static ssize_t iwl_dbgfs_rx_handlers_write(struct file *file,
40925 struct iwl_priv *priv = file->private_data;
40926
40927 char buf[8];
40928- int buf_size;
40929+ size_t buf_size;
40930 u32 reset_flag;
40931
40932 memset(buf, 0, sizeof(buf));
40933@@ -554,7 +554,7 @@ static ssize_t iwl_dbgfs_disable_ht40_write(struct file *file,
40934 {
40935 struct iwl_priv *priv = file->private_data;
40936 char buf[8];
40937- int buf_size;
40938+ size_t buf_size;
40939 int ht40;
40940
40941 memset(buf, 0, sizeof(buf));
40942@@ -606,7 +606,7 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
40943 {
40944 struct iwl_priv *priv = file->private_data;
40945 char buf[8];
40946- int buf_size;
40947+ size_t buf_size;
40948 int value;
40949
40950 memset(buf, 0, sizeof(buf));
40951@@ -1871,7 +1871,7 @@ static ssize_t iwl_dbgfs_clear_ucode_statistics_write(struct file *file,
40952 {
40953 struct iwl_priv *priv = file->private_data;
40954 char buf[8];
40955- int buf_size;
40956+ size_t buf_size;
40957 int clear;
40958
40959 memset(buf, 0, sizeof(buf));
40960@@ -1916,7 +1916,7 @@ static ssize_t iwl_dbgfs_ucode_tracing_write(struct file *file,
40961 {
40962 struct iwl_priv *priv = file->private_data;
40963 char buf[8];
40964- int buf_size;
40965+ size_t buf_size;
40966 int trace;
40967
40968 memset(buf, 0, sizeof(buf));
40969@@ -1987,7 +1987,7 @@ static ssize_t iwl_dbgfs_missed_beacon_write(struct file *file,
40970 {
40971 struct iwl_priv *priv = file->private_data;
40972 char buf[8];
40973- int buf_size;
40974+ size_t buf_size;
40975 int missed;
40976
40977 memset(buf, 0, sizeof(buf));
40978@@ -2028,7 +2028,7 @@ static ssize_t iwl_dbgfs_plcp_delta_write(struct file *file,
40979
40980 struct iwl_priv *priv = file->private_data;
40981 char buf[8];
40982- int buf_size;
40983+ size_t buf_size;
40984 int plcp;
40985
40986 memset(buf, 0, sizeof(buf));
40987@@ -2088,7 +2088,7 @@ static ssize_t iwl_dbgfs_txfifo_flush_write(struct file *file,
40988
40989 struct iwl_priv *priv = file->private_data;
40990 char buf[8];
40991- int buf_size;
40992+ size_t buf_size;
40993 int flush;
40994
40995 memset(buf, 0, sizeof(buf));
40996@@ -2178,7 +2178,7 @@ static ssize_t iwl_dbgfs_protection_mode_write(struct file *file,
40997
40998 struct iwl_priv *priv = file->private_data;
40999 char buf[8];
41000- int buf_size;
41001+ size_t buf_size;
41002 int rts;
41003
41004 if (!priv->cfg->ht_params)
41005@@ -2220,7 +2220,7 @@ static ssize_t iwl_dbgfs_echo_test_write(struct file *file,
41006 {
41007 struct iwl_priv *priv = file->private_data;
41008 char buf[8];
41009- int buf_size;
41010+ size_t buf_size;
41011
41012 memset(buf, 0, sizeof(buf));
41013 buf_size = min(count, sizeof(buf) - 1);
41014@@ -2254,7 +2254,7 @@ static ssize_t iwl_dbgfs_log_event_write(struct file *file,
41015 struct iwl_priv *priv = file->private_data;
41016 u32 event_log_flag;
41017 char buf[8];
41018- int buf_size;
41019+ size_t buf_size;
41020
41021 /* check that the interface is up */
41022 if (!iwl_is_ready(priv))
41023@@ -2308,7 +2308,7 @@ static ssize_t iwl_dbgfs_calib_disabled_write(struct file *file,
41024 struct iwl_priv *priv = file->private_data;
41025 char buf[8];
41026 u32 calib_disabled;
41027- int buf_size;
41028+ size_t buf_size;
41029
41030 memset(buf, 0, sizeof(buf));
41031 buf_size = min(count, sizeof(buf) - 1);
41032diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
41033index 50ba0a4..29424e7 100644
41034--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
41035+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
41036@@ -1329,7 +1329,7 @@ static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
41037 struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
41038
41039 char buf[8];
41040- int buf_size;
41041+ size_t buf_size;
41042 u32 reset_flag;
41043
41044 memset(buf, 0, sizeof(buf));
41045@@ -1350,7 +1350,7 @@ static ssize_t iwl_dbgfs_csr_write(struct file *file,
41046 {
41047 struct iwl_trans *trans = file->private_data;
41048 char buf[8];
41049- int buf_size;
41050+ size_t buf_size;
41051 int csr;
41052
41053 memset(buf, 0, sizeof(buf));
41054diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
41055index cb34c78..9fec0dc 100644
41056--- a/drivers/net/wireless/mac80211_hwsim.c
41057+++ b/drivers/net/wireless/mac80211_hwsim.c
41058@@ -2195,25 +2195,19 @@ static int __init init_mac80211_hwsim(void)
41059
41060 if (channels > 1) {
41061 hwsim_if_comb.num_different_channels = channels;
41062- mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
41063- mac80211_hwsim_ops.cancel_hw_scan =
41064- mac80211_hwsim_cancel_hw_scan;
41065- mac80211_hwsim_ops.sw_scan_start = NULL;
41066- mac80211_hwsim_ops.sw_scan_complete = NULL;
41067- mac80211_hwsim_ops.remain_on_channel =
41068- mac80211_hwsim_roc;
41069- mac80211_hwsim_ops.cancel_remain_on_channel =
41070- mac80211_hwsim_croc;
41071- mac80211_hwsim_ops.add_chanctx =
41072- mac80211_hwsim_add_chanctx;
41073- mac80211_hwsim_ops.remove_chanctx =
41074- mac80211_hwsim_remove_chanctx;
41075- mac80211_hwsim_ops.change_chanctx =
41076- mac80211_hwsim_change_chanctx;
41077- mac80211_hwsim_ops.assign_vif_chanctx =
41078- mac80211_hwsim_assign_vif_chanctx;
41079- mac80211_hwsim_ops.unassign_vif_chanctx =
41080- mac80211_hwsim_unassign_vif_chanctx;
41081+ pax_open_kernel();
41082+ *(void **)&mac80211_hwsim_ops.hw_scan = mac80211_hwsim_hw_scan;
41083+ *(void **)&mac80211_hwsim_ops.cancel_hw_scan = mac80211_hwsim_cancel_hw_scan;
41084+ *(void **)&mac80211_hwsim_ops.sw_scan_start = NULL;
41085+ *(void **)&mac80211_hwsim_ops.sw_scan_complete = NULL;
41086+ *(void **)&mac80211_hwsim_ops.remain_on_channel = mac80211_hwsim_roc;
41087+ *(void **)&mac80211_hwsim_ops.cancel_remain_on_channel = mac80211_hwsim_croc;
41088+ *(void **)&mac80211_hwsim_ops.add_chanctx = mac80211_hwsim_add_chanctx;
41089+ *(void **)&mac80211_hwsim_ops.remove_chanctx = mac80211_hwsim_remove_chanctx;
41090+ *(void **)&mac80211_hwsim_ops.change_chanctx = mac80211_hwsim_change_chanctx;
41091+ *(void **)&mac80211_hwsim_ops.assign_vif_chanctx = mac80211_hwsim_assign_vif_chanctx;
41092+ *(void **)&mac80211_hwsim_ops.unassign_vif_chanctx = mac80211_hwsim_unassign_vif_chanctx;
41093+ pax_close_kernel();
41094 }
41095
41096 spin_lock_init(&hwsim_radio_lock);
41097diff --git a/drivers/net/wireless/rndis_wlan.c b/drivers/net/wireless/rndis_wlan.c
41098index 8169a85..7fa3b47 100644
41099--- a/drivers/net/wireless/rndis_wlan.c
41100+++ b/drivers/net/wireless/rndis_wlan.c
41101@@ -1238,7 +1238,7 @@ static int set_rts_threshold(struct usbnet *usbdev, u32 rts_threshold)
41102
41103 netdev_dbg(usbdev->net, "%s(): %i\n", __func__, rts_threshold);
41104
41105- if (rts_threshold < 0 || rts_threshold > 2347)
41106+ if (rts_threshold > 2347)
41107 rts_threshold = 2347;
41108
41109 tmp = cpu_to_le32(rts_threshold);
41110diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
41111index 7510723..5ba37f5 100644
41112--- a/drivers/net/wireless/rt2x00/rt2x00.h
41113+++ b/drivers/net/wireless/rt2x00/rt2x00.h
41114@@ -386,7 +386,7 @@ struct rt2x00_intf {
41115 * for hardware which doesn't support hardware
41116 * sequence counting.
41117 */
41118- atomic_t seqno;
41119+ atomic_unchecked_t seqno;
41120 };
41121
41122 static inline struct rt2x00_intf* vif_to_intf(struct ieee80211_vif *vif)
41123diff --git a/drivers/net/wireless/rt2x00/rt2x00queue.c b/drivers/net/wireless/rt2x00/rt2x00queue.c
41124index 2c12311..7b77f24 100644
41125--- a/drivers/net/wireless/rt2x00/rt2x00queue.c
41126+++ b/drivers/net/wireless/rt2x00/rt2x00queue.c
41127@@ -252,9 +252,9 @@ static void rt2x00queue_create_tx_descriptor_seq(struct rt2x00_dev *rt2x00dev,
41128 * sequence counter given by mac80211.
41129 */
41130 if (test_bit(ENTRY_TXD_FIRST_FRAGMENT, &txdesc->flags))
41131- seqno = atomic_add_return(0x10, &intf->seqno);
41132+ seqno = atomic_add_return_unchecked(0x10, &intf->seqno);
41133 else
41134- seqno = atomic_read(&intf->seqno);
41135+ seqno = atomic_read_unchecked(&intf->seqno);
41136
41137 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
41138 hdr->seq_ctrl |= cpu_to_le16(seqno);
41139diff --git a/drivers/net/wireless/ti/wl1251/sdio.c b/drivers/net/wireless/ti/wl1251/sdio.c
41140index e2b3d9c..67a5184 100644
41141--- a/drivers/net/wireless/ti/wl1251/sdio.c
41142+++ b/drivers/net/wireless/ti/wl1251/sdio.c
41143@@ -271,13 +271,17 @@ static int wl1251_sdio_probe(struct sdio_func *func,
41144
41145 irq_set_irq_type(wl->irq, IRQ_TYPE_EDGE_RISING);
41146
41147- wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41148- wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41149+ pax_open_kernel();
41150+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_enable_line_irq;
41151+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_disable_line_irq;
41152+ pax_close_kernel();
41153
41154 wl1251_info("using dedicated interrupt line");
41155 } else {
41156- wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41157- wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41158+ pax_open_kernel();
41159+ *(void **)&wl1251_sdio_ops.enable_irq = wl1251_sdio_enable_irq;
41160+ *(void **)&wl1251_sdio_ops.disable_irq = wl1251_sdio_disable_irq;
41161+ pax_close_kernel();
41162
41163 wl1251_info("using SDIO interrupt");
41164 }
41165diff --git a/drivers/net/wireless/ti/wl12xx/main.c b/drivers/net/wireless/ti/wl12xx/main.c
41166index 1c627da..69f7d17 100644
41167--- a/drivers/net/wireless/ti/wl12xx/main.c
41168+++ b/drivers/net/wireless/ti/wl12xx/main.c
41169@@ -656,7 +656,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41170 sizeof(wl->conf.mem));
41171
41172 /* read data preparation is only needed by wl127x */
41173- wl->ops->prepare_read = wl127x_prepare_read;
41174+ pax_open_kernel();
41175+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41176+ pax_close_kernel();
41177
41178 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
41179 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
41180@@ -681,7 +683,9 @@ static int wl12xx_identify_chip(struct wl1271 *wl)
41181 sizeof(wl->conf.mem));
41182
41183 /* read data preparation is only needed by wl127x */
41184- wl->ops->prepare_read = wl127x_prepare_read;
41185+ pax_open_kernel();
41186+ *(void **)&wl->ops->prepare_read = wl127x_prepare_read;
41187+ pax_close_kernel();
41188
41189 wlcore_set_min_fw_ver(wl, WL127X_CHIP_VER,
41190 WL127X_IFTYPE_SR_VER, WL127X_MAJOR_SR_VER,
41191diff --git a/drivers/net/wireless/ti/wl18xx/main.c b/drivers/net/wireless/ti/wl18xx/main.c
41192index 9fa692d..b31fee0 100644
41193--- a/drivers/net/wireless/ti/wl18xx/main.c
41194+++ b/drivers/net/wireless/ti/wl18xx/main.c
41195@@ -1687,8 +1687,10 @@ static int wl18xx_setup(struct wl1271 *wl)
41196 }
41197
41198 if (!checksum_param) {
41199- wl18xx_ops.set_rx_csum = NULL;
41200- wl18xx_ops.init_vif = NULL;
41201+ pax_open_kernel();
41202+ *(void **)&wl18xx_ops.set_rx_csum = NULL;
41203+ *(void **)&wl18xx_ops.init_vif = NULL;
41204+ pax_close_kernel();
41205 }
41206
41207 /* Enable 11a Band only if we have 5G antennas */
41208diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
41209index 7ef0b4a..ff65c28 100644
41210--- a/drivers/net/wireless/zd1211rw/zd_usb.c
41211+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
41212@@ -386,7 +386,7 @@ static inline void handle_regs_int(struct urb *urb)
41213 {
41214 struct zd_usb *usb = urb->context;
41215 struct zd_usb_interrupt *intr = &usb->intr;
41216- int len;
41217+ unsigned int len;
41218 u16 int_num;
41219
41220 ZD_ASSERT(in_interrupt());
41221diff --git a/drivers/oprofile/buffer_sync.c b/drivers/oprofile/buffer_sync.c
41222index d93b2b6..ae50401 100644
41223--- a/drivers/oprofile/buffer_sync.c
41224+++ b/drivers/oprofile/buffer_sync.c
41225@@ -332,7 +332,7 @@ static void add_data(struct op_entry *entry, struct mm_struct *mm)
41226 if (cookie == NO_COOKIE)
41227 offset = pc;
41228 if (cookie == INVALID_COOKIE) {
41229- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41230+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41231 offset = pc;
41232 }
41233 if (cookie != last_cookie) {
41234@@ -376,14 +376,14 @@ add_sample(struct mm_struct *mm, struct op_sample *s, int in_kernel)
41235 /* add userspace sample */
41236
41237 if (!mm) {
41238- atomic_inc(&oprofile_stats.sample_lost_no_mm);
41239+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mm);
41240 return 0;
41241 }
41242
41243 cookie = lookup_dcookie(mm, s->eip, &offset);
41244
41245 if (cookie == INVALID_COOKIE) {
41246- atomic_inc(&oprofile_stats.sample_lost_no_mapping);
41247+ atomic_inc_unchecked(&oprofile_stats.sample_lost_no_mapping);
41248 return 0;
41249 }
41250
41251@@ -552,7 +552,7 @@ void sync_buffer(int cpu)
41252 /* ignore backtraces if failed to add a sample */
41253 if (state == sb_bt_start) {
41254 state = sb_bt_ignore;
41255- atomic_inc(&oprofile_stats.bt_lost_no_mapping);
41256+ atomic_inc_unchecked(&oprofile_stats.bt_lost_no_mapping);
41257 }
41258 }
41259 release_mm(mm);
41260diff --git a/drivers/oprofile/event_buffer.c b/drivers/oprofile/event_buffer.c
41261index c0cc4e7..44d4e54 100644
41262--- a/drivers/oprofile/event_buffer.c
41263+++ b/drivers/oprofile/event_buffer.c
41264@@ -53,7 +53,7 @@ void add_event_entry(unsigned long value)
41265 }
41266
41267 if (buffer_pos == buffer_size) {
41268- atomic_inc(&oprofile_stats.event_lost_overflow);
41269+ atomic_inc_unchecked(&oprofile_stats.event_lost_overflow);
41270 return;
41271 }
41272
41273diff --git a/drivers/oprofile/oprof.c b/drivers/oprofile/oprof.c
41274index ed2c3ec..deda85a 100644
41275--- a/drivers/oprofile/oprof.c
41276+++ b/drivers/oprofile/oprof.c
41277@@ -110,7 +110,7 @@ static void switch_worker(struct work_struct *work)
41278 if (oprofile_ops.switch_events())
41279 return;
41280
41281- atomic_inc(&oprofile_stats.multiplex_counter);
41282+ atomic_inc_unchecked(&oprofile_stats.multiplex_counter);
41283 start_switch_worker();
41284 }
41285
41286diff --git a/drivers/oprofile/oprofile_files.c b/drivers/oprofile/oprofile_files.c
41287index 84a208d..d61b0a1 100644
41288--- a/drivers/oprofile/oprofile_files.c
41289+++ b/drivers/oprofile/oprofile_files.c
41290@@ -27,7 +27,7 @@ unsigned long oprofile_time_slice;
41291
41292 #ifdef CONFIG_OPROFILE_EVENT_MULTIPLEX
41293
41294-static ssize_t timeout_read(struct file *file, char __user *buf,
41295+static ssize_t __intentional_overflow(-1) timeout_read(struct file *file, char __user *buf,
41296 size_t count, loff_t *offset)
41297 {
41298 return oprofilefs_ulong_to_user(jiffies_to_msecs(oprofile_time_slice),
41299diff --git a/drivers/oprofile/oprofile_stats.c b/drivers/oprofile/oprofile_stats.c
41300index 917d28e..d62d981 100644
41301--- a/drivers/oprofile/oprofile_stats.c
41302+++ b/drivers/oprofile/oprofile_stats.c
41303@@ -30,11 +30,11 @@ void oprofile_reset_stats(void)
41304 cpu_buf->sample_invalid_eip = 0;
41305 }
41306
41307- atomic_set(&oprofile_stats.sample_lost_no_mm, 0);
41308- atomic_set(&oprofile_stats.sample_lost_no_mapping, 0);
41309- atomic_set(&oprofile_stats.event_lost_overflow, 0);
41310- atomic_set(&oprofile_stats.bt_lost_no_mapping, 0);
41311- atomic_set(&oprofile_stats.multiplex_counter, 0);
41312+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mm, 0);
41313+ atomic_set_unchecked(&oprofile_stats.sample_lost_no_mapping, 0);
41314+ atomic_set_unchecked(&oprofile_stats.event_lost_overflow, 0);
41315+ atomic_set_unchecked(&oprofile_stats.bt_lost_no_mapping, 0);
41316+ atomic_set_unchecked(&oprofile_stats.multiplex_counter, 0);
41317 }
41318
41319
41320diff --git a/drivers/oprofile/oprofile_stats.h b/drivers/oprofile/oprofile_stats.h
41321index 38b6fc0..b5cbfce 100644
41322--- a/drivers/oprofile/oprofile_stats.h
41323+++ b/drivers/oprofile/oprofile_stats.h
41324@@ -13,11 +13,11 @@
41325 #include <linux/atomic.h>
41326
41327 struct oprofile_stat_struct {
41328- atomic_t sample_lost_no_mm;
41329- atomic_t sample_lost_no_mapping;
41330- atomic_t bt_lost_no_mapping;
41331- atomic_t event_lost_overflow;
41332- atomic_t multiplex_counter;
41333+ atomic_unchecked_t sample_lost_no_mm;
41334+ atomic_unchecked_t sample_lost_no_mapping;
41335+ atomic_unchecked_t bt_lost_no_mapping;
41336+ atomic_unchecked_t event_lost_overflow;
41337+ atomic_unchecked_t multiplex_counter;
41338 };
41339
41340 extern struct oprofile_stat_struct oprofile_stats;
41341diff --git a/drivers/oprofile/oprofilefs.c b/drivers/oprofile/oprofilefs.c
41342index 7c12d9c..558bf3bb 100644
41343--- a/drivers/oprofile/oprofilefs.c
41344+++ b/drivers/oprofile/oprofilefs.c
41345@@ -190,7 +190,7 @@ static const struct file_operations atomic_ro_fops = {
41346
41347
41348 int oprofilefs_create_ro_atomic(struct super_block *sb, struct dentry *root,
41349- char const *name, atomic_t *val)
41350+ char const *name, atomic_unchecked_t *val)
41351 {
41352 return __oprofilefs_create_file(sb, root, name,
41353 &atomic_ro_fops, 0444, val);
41354diff --git a/drivers/oprofile/timer_int.c b/drivers/oprofile/timer_int.c
41355index 93404f7..4a313d8 100644
41356--- a/drivers/oprofile/timer_int.c
41357+++ b/drivers/oprofile/timer_int.c
41358@@ -93,7 +93,7 @@ static int __cpuinit oprofile_cpu_notify(struct notifier_block *self,
41359 return NOTIFY_OK;
41360 }
41361
41362-static struct notifier_block __refdata oprofile_cpu_notifier = {
41363+static struct notifier_block oprofile_cpu_notifier = {
41364 .notifier_call = oprofile_cpu_notify,
41365 };
41366
41367diff --git a/drivers/parport/procfs.c b/drivers/parport/procfs.c
41368index 92ed045..62d39bd7 100644
41369--- a/drivers/parport/procfs.c
41370+++ b/drivers/parport/procfs.c
41371@@ -64,7 +64,7 @@ static int do_active_device(ctl_table *table, int write,
41372
41373 *ppos += len;
41374
41375- return copy_to_user(result, buffer, len) ? -EFAULT : 0;
41376+ return (len > sizeof buffer || copy_to_user(result, buffer, len)) ? -EFAULT : 0;
41377 }
41378
41379 #ifdef CONFIG_PARPORT_1284
41380@@ -106,7 +106,7 @@ static int do_autoprobe(ctl_table *table, int write,
41381
41382 *ppos += len;
41383
41384- return copy_to_user (result, buffer, len) ? -EFAULT : 0;
41385+ return (len > sizeof buffer || copy_to_user (result, buffer, len)) ? -EFAULT : 0;
41386 }
41387 #endif /* IEEE1284.3 support. */
41388
41389diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
41390index c35e8ad..fc33beb 100644
41391--- a/drivers/pci/hotplug/acpiphp_ibm.c
41392+++ b/drivers/pci/hotplug/acpiphp_ibm.c
41393@@ -464,7 +464,9 @@ static int __init ibm_acpiphp_init(void)
41394 goto init_cleanup;
41395 }
41396
41397- ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41398+ pax_open_kernel();
41399+ *(size_t *)&ibm_apci_table_attr.size = ibm_get_table_from_acpi(NULL);
41400+ pax_close_kernel();
41401 retval = sysfs_create_bin_file(sysdir, &ibm_apci_table_attr);
41402
41403 return retval;
41404diff --git a/drivers/pci/hotplug/cpcihp_generic.c b/drivers/pci/hotplug/cpcihp_generic.c
41405index a6a71c4..c91097b 100644
41406--- a/drivers/pci/hotplug/cpcihp_generic.c
41407+++ b/drivers/pci/hotplug/cpcihp_generic.c
41408@@ -73,7 +73,6 @@ static u16 port;
41409 static unsigned int enum_bit;
41410 static u8 enum_mask;
41411
41412-static struct cpci_hp_controller_ops generic_hpc_ops;
41413 static struct cpci_hp_controller generic_hpc;
41414
41415 static int __init validate_parameters(void)
41416@@ -139,6 +138,10 @@ static int query_enum(void)
41417 return ((value & enum_mask) == enum_mask);
41418 }
41419
41420+static struct cpci_hp_controller_ops generic_hpc_ops = {
41421+ .query_enum = query_enum,
41422+};
41423+
41424 static int __init cpcihp_generic_init(void)
41425 {
41426 int status;
41427@@ -165,7 +168,6 @@ static int __init cpcihp_generic_init(void)
41428 pci_dev_put(dev);
41429
41430 memset(&generic_hpc, 0, sizeof (struct cpci_hp_controller));
41431- generic_hpc_ops.query_enum = query_enum;
41432 generic_hpc.ops = &generic_hpc_ops;
41433
41434 status = cpci_hp_register_controller(&generic_hpc);
41435diff --git a/drivers/pci/hotplug/cpcihp_zt5550.c b/drivers/pci/hotplug/cpcihp_zt5550.c
41436index 449b4bb..257e2e8 100644
41437--- a/drivers/pci/hotplug/cpcihp_zt5550.c
41438+++ b/drivers/pci/hotplug/cpcihp_zt5550.c
41439@@ -59,7 +59,6 @@
41440 /* local variables */
41441 static bool debug;
41442 static bool poll;
41443-static struct cpci_hp_controller_ops zt5550_hpc_ops;
41444 static struct cpci_hp_controller zt5550_hpc;
41445
41446 /* Primary cPCI bus bridge device */
41447@@ -205,6 +204,10 @@ static int zt5550_hc_disable_irq(void)
41448 return 0;
41449 }
41450
41451+static struct cpci_hp_controller_ops zt5550_hpc_ops = {
41452+ .query_enum = zt5550_hc_query_enum,
41453+};
41454+
41455 static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
41456 {
41457 int status;
41458@@ -216,16 +219,17 @@ static int zt5550_hc_init_one (struct pci_dev *pdev, const struct pci_device_id
41459 dbg("returned from zt5550_hc_config");
41460
41461 memset(&zt5550_hpc, 0, sizeof (struct cpci_hp_controller));
41462- zt5550_hpc_ops.query_enum = zt5550_hc_query_enum;
41463 zt5550_hpc.ops = &zt5550_hpc_ops;
41464 if(!poll) {
41465 zt5550_hpc.irq = hc_dev->irq;
41466 zt5550_hpc.irq_flags = IRQF_SHARED;
41467 zt5550_hpc.dev_id = hc_dev;
41468
41469- zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41470- zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41471- zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41472+ pax_open_kernel();
41473+ *(void **)&zt5550_hpc_ops.enable_irq = zt5550_hc_enable_irq;
41474+ *(void **)&zt5550_hpc_ops.disable_irq = zt5550_hc_disable_irq;
41475+ *(void **)&zt5550_hpc_ops.check_irq = zt5550_hc_check_irq;
41476+ pax_open_kernel();
41477 } else {
41478 info("using ENUM# polling mode");
41479 }
41480diff --git a/drivers/pci/hotplug/cpqphp_nvram.c b/drivers/pci/hotplug/cpqphp_nvram.c
41481index 76ba8a1..20ca857 100644
41482--- a/drivers/pci/hotplug/cpqphp_nvram.c
41483+++ b/drivers/pci/hotplug/cpqphp_nvram.c
41484@@ -428,9 +428,13 @@ static u32 store_HRT (void __iomem *rom_start)
41485
41486 void compaq_nvram_init (void __iomem *rom_start)
41487 {
41488+
41489+#ifndef CONFIG_PAX_KERNEXEC
41490 if (rom_start) {
41491 compaq_int15_entry_point = (rom_start + ROM_INT15_PHY_ADDR - ROM_PHY_ADDR);
41492 }
41493+#endif
41494+
41495 dbg("int15 entry = %p\n", compaq_int15_entry_point);
41496
41497 /* initialize our int15 lock */
41498diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
41499index ec20f74..c1d961e 100644
41500--- a/drivers/pci/hotplug/pci_hotplug_core.c
41501+++ b/drivers/pci/hotplug/pci_hotplug_core.c
41502@@ -441,8 +441,10 @@ int __pci_hp_register(struct hotplug_slot *slot, struct pci_bus *bus,
41503 return -EINVAL;
41504 }
41505
41506- slot->ops->owner = owner;
41507- slot->ops->mod_name = mod_name;
41508+ pax_open_kernel();
41509+ *(struct module **)&slot->ops->owner = owner;
41510+ *(const char **)&slot->ops->mod_name = mod_name;
41511+ pax_close_kernel();
41512
41513 mutex_lock(&pci_hp_mutex);
41514 /*
41515diff --git a/drivers/pci/hotplug/pciehp_core.c b/drivers/pci/hotplug/pciehp_core.c
41516index 7d72c5e..edce02c 100644
41517--- a/drivers/pci/hotplug/pciehp_core.c
41518+++ b/drivers/pci/hotplug/pciehp_core.c
41519@@ -91,7 +91,7 @@ static int init_slot(struct controller *ctrl)
41520 struct slot *slot = ctrl->slot;
41521 struct hotplug_slot *hotplug = NULL;
41522 struct hotplug_slot_info *info = NULL;
41523- struct hotplug_slot_ops *ops = NULL;
41524+ hotplug_slot_ops_no_const *ops = NULL;
41525 char name[SLOT_NAME_SIZE];
41526 int retval = -ENOMEM;
41527
41528diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
41529index 5b4a9d9..cd5ac1f 100644
41530--- a/drivers/pci/pci-sysfs.c
41531+++ b/drivers/pci/pci-sysfs.c
41532@@ -1071,7 +1071,7 @@ static int pci_create_attr(struct pci_dev *pdev, int num, int write_combine)
41533 {
41534 /* allocate attribute structure, piggyback attribute name */
41535 int name_len = write_combine ? 13 : 10;
41536- struct bin_attribute *res_attr;
41537+ bin_attribute_no_const *res_attr;
41538 int retval;
41539
41540 res_attr = kzalloc(sizeof(*res_attr) + name_len, GFP_ATOMIC);
41541@@ -1256,7 +1256,7 @@ static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_stor
41542 static int pci_create_capabilities_sysfs(struct pci_dev *dev)
41543 {
41544 int retval;
41545- struct bin_attribute *attr;
41546+ bin_attribute_no_const *attr;
41547
41548 /* If the device has VPD, try to expose it in sysfs. */
41549 if (dev->vpd) {
41550@@ -1303,7 +1303,7 @@ int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
41551 {
41552 int retval;
41553 int rom_size = 0;
41554- struct bin_attribute *attr;
41555+ bin_attribute_no_const *attr;
41556
41557 if (!sysfs_initialized)
41558 return -EACCES;
41559diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
41560index d1182c4..2a138ec 100644
41561--- a/drivers/pci/pci.h
41562+++ b/drivers/pci/pci.h
41563@@ -92,7 +92,7 @@ struct pci_vpd_ops {
41564 struct pci_vpd {
41565 unsigned int len;
41566 const struct pci_vpd_ops *ops;
41567- struct bin_attribute *attr; /* descriptor for sysfs VPD entry */
41568+ bin_attribute_no_const *attr; /* descriptor for sysfs VPD entry */
41569 };
41570
41571 int pci_vpd_pci22_init(struct pci_dev *dev);
41572diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
41573index d320df6..ca9a8f6 100644
41574--- a/drivers/pci/pcie/aspm.c
41575+++ b/drivers/pci/pcie/aspm.c
41576@@ -27,9 +27,9 @@
41577 #define MODULE_PARAM_PREFIX "pcie_aspm."
41578
41579 /* Note: those are not register definitions */
41580-#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
41581-#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
41582-#define ASPM_STATE_L1 (4) /* L1 state */
41583+#define ASPM_STATE_L0S_UP (1U) /* Upstream direction L0s state */
41584+#define ASPM_STATE_L0S_DW (2U) /* Downstream direction L0s state */
41585+#define ASPM_STATE_L1 (4U) /* L1 state */
41586 #define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
41587 #define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
41588
41589diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
41590index ea37072..10e58e56 100644
41591--- a/drivers/pci/probe.c
41592+++ b/drivers/pci/probe.c
41593@@ -173,7 +173,7 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
41594 struct pci_bus_region region;
41595 bool bar_too_big = false, bar_disabled = false;
41596
41597- mask = type ? PCI_ROM_ADDRESS_MASK : ~0;
41598+ mask = type ? (u32)PCI_ROM_ADDRESS_MASK : ~0;
41599
41600 /* No printks while decoding is disabled! */
41601 if (!dev->mmio_always_on) {
41602diff --git a/drivers/pci/proc.c b/drivers/pci/proc.c
41603index 0812608..b04018c4 100644
41604--- a/drivers/pci/proc.c
41605+++ b/drivers/pci/proc.c
41606@@ -453,7 +453,16 @@ static const struct file_operations proc_bus_pci_dev_operations = {
41607 static int __init pci_proc_init(void)
41608 {
41609 struct pci_dev *dev = NULL;
41610+
41611+#ifdef CONFIG_GRKERNSEC_PROC_ADD
41612+#ifdef CONFIG_GRKERNSEC_PROC_USER
41613+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR, NULL);
41614+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
41615+ proc_bus_pci_dir = proc_mkdir_mode("bus/pci", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
41616+#endif
41617+#else
41618 proc_bus_pci_dir = proc_mkdir("bus/pci", NULL);
41619+#endif
41620 proc_create("devices", 0, proc_bus_pci_dir,
41621 &proc_bus_pci_dev_operations);
41622 proc_initialized = 1;
41623diff --git a/drivers/platform/x86/chromeos_laptop.c b/drivers/platform/x86/chromeos_laptop.c
41624index 3e5b4497..dcdfb70 100644
41625--- a/drivers/platform/x86/chromeos_laptop.c
41626+++ b/drivers/platform/x86/chromeos_laptop.c
41627@@ -301,7 +301,7 @@ static int __init setup_tsl2563_als(const struct dmi_system_id *id)
41628 return 0;
41629 }
41630
41631-static struct dmi_system_id __initdata chromeos_laptop_dmi_table[] = {
41632+static struct dmi_system_id __initconst chromeos_laptop_dmi_table[] = {
41633 {
41634 .ident = "Samsung Series 5 550 - Touchpad",
41635 .matches = {
41636diff --git a/drivers/platform/x86/msi-laptop.c b/drivers/platform/x86/msi-laptop.c
41637index 6b22938..bc9700e 100644
41638--- a/drivers/platform/x86/msi-laptop.c
41639+++ b/drivers/platform/x86/msi-laptop.c
41640@@ -1000,12 +1000,14 @@ static int __init load_scm_model_init(struct platform_device *sdev)
41641
41642 if (!quirks->ec_read_only) {
41643 /* allow userland write sysfs file */
41644- dev_attr_bluetooth.store = store_bluetooth;
41645- dev_attr_wlan.store = store_wlan;
41646- dev_attr_threeg.store = store_threeg;
41647- dev_attr_bluetooth.attr.mode |= S_IWUSR;
41648- dev_attr_wlan.attr.mode |= S_IWUSR;
41649- dev_attr_threeg.attr.mode |= S_IWUSR;
41650+ pax_open_kernel();
41651+ *(void **)&dev_attr_bluetooth.store = store_bluetooth;
41652+ *(void **)&dev_attr_wlan.store = store_wlan;
41653+ *(void **)&dev_attr_threeg.store = store_threeg;
41654+ *(umode_t *)&dev_attr_bluetooth.attr.mode |= S_IWUSR;
41655+ *(umode_t *)&dev_attr_wlan.attr.mode |= S_IWUSR;
41656+ *(umode_t *)&dev_attr_threeg.attr.mode |= S_IWUSR;
41657+ pax_close_kernel();
41658 }
41659
41660 /* disable hardware control by fn key */
41661diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
41662index 2ac045f..39c443d 100644
41663--- a/drivers/platform/x86/sony-laptop.c
41664+++ b/drivers/platform/x86/sony-laptop.c
41665@@ -2483,7 +2483,7 @@ static void sony_nc_gfx_switch_cleanup(struct platform_device *pd)
41666 }
41667
41668 /* High speed charging function */
41669-static struct device_attribute *hsc_handle;
41670+static device_attribute_no_const *hsc_handle;
41671
41672 static ssize_t sony_nc_highspeed_charging_store(struct device *dev,
41673 struct device_attribute *attr,
41674diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
41675index 54d31c0..3f896d3 100644
41676--- a/drivers/platform/x86/thinkpad_acpi.c
41677+++ b/drivers/platform/x86/thinkpad_acpi.c
41678@@ -2093,7 +2093,7 @@ static int hotkey_mask_get(void)
41679 return 0;
41680 }
41681
41682-void static hotkey_mask_warn_incomplete_mask(void)
41683+static void hotkey_mask_warn_incomplete_mask(void)
41684 {
41685 /* log only what the user can fix... */
41686 const u32 wantedmask = hotkey_driver_mask &
41687@@ -2324,11 +2324,6 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
41688 }
41689 }
41690
41691-static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41692- struct tp_nvram_state *newn,
41693- const u32 event_mask)
41694-{
41695-
41696 #define TPACPI_COMPARE_KEY(__scancode, __member) \
41697 do { \
41698 if ((event_mask & (1 << __scancode)) && \
41699@@ -2342,36 +2337,42 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41700 tpacpi_hotkey_send_key(__scancode); \
41701 } while (0)
41702
41703- void issue_volchange(const unsigned int oldvol,
41704- const unsigned int newvol)
41705- {
41706- unsigned int i = oldvol;
41707+static void issue_volchange(const unsigned int oldvol,
41708+ const unsigned int newvol,
41709+ const u32 event_mask)
41710+{
41711+ unsigned int i = oldvol;
41712
41713- while (i > newvol) {
41714- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41715- i--;
41716- }
41717- while (i < newvol) {
41718- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41719- i++;
41720- }
41721+ while (i > newvol) {
41722+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEDOWN);
41723+ i--;
41724 }
41725+ while (i < newvol) {
41726+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41727+ i++;
41728+ }
41729+}
41730
41731- void issue_brightnesschange(const unsigned int oldbrt,
41732- const unsigned int newbrt)
41733- {
41734- unsigned int i = oldbrt;
41735+static void issue_brightnesschange(const unsigned int oldbrt,
41736+ const unsigned int newbrt,
41737+ const u32 event_mask)
41738+{
41739+ unsigned int i = oldbrt;
41740
41741- while (i > newbrt) {
41742- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41743- i--;
41744- }
41745- while (i < newbrt) {
41746- TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41747- i++;
41748- }
41749+ while (i > newbrt) {
41750+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNEND);
41751+ i--;
41752+ }
41753+ while (i < newbrt) {
41754+ TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41755+ i++;
41756 }
41757+}
41758
41759+static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41760+ struct tp_nvram_state *newn,
41761+ const u32 event_mask)
41762+{
41763 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
41764 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
41765 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
41766@@ -2405,7 +2406,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41767 oldn->volume_level != newn->volume_level) {
41768 /* recently muted, or repeated mute keypress, or
41769 * multiple presses ending in mute */
41770- issue_volchange(oldn->volume_level, newn->volume_level);
41771+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41772 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_MUTE);
41773 }
41774 } else {
41775@@ -2415,7 +2416,7 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41776 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_VOLUMEUP);
41777 }
41778 if (oldn->volume_level != newn->volume_level) {
41779- issue_volchange(oldn->volume_level, newn->volume_level);
41780+ issue_volchange(oldn->volume_level, newn->volume_level, event_mask);
41781 } else if (oldn->volume_toggle != newn->volume_toggle) {
41782 /* repeated vol up/down keypress at end of scale ? */
41783 if (newn->volume_level == 0)
41784@@ -2428,7 +2429,8 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41785 /* handle brightness */
41786 if (oldn->brightness_level != newn->brightness_level) {
41787 issue_brightnesschange(oldn->brightness_level,
41788- newn->brightness_level);
41789+ newn->brightness_level,
41790+ event_mask);
41791 } else if (oldn->brightness_toggle != newn->brightness_toggle) {
41792 /* repeated key presses that didn't change state */
41793 if (newn->brightness_level == 0)
41794@@ -2437,10 +2439,10 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
41795 && !tp_features.bright_unkfw)
41796 TPACPI_MAY_SEND_KEY(TP_ACPI_HOTKEYSCAN_FNHOME);
41797 }
41798+}
41799
41800 #undef TPACPI_COMPARE_KEY
41801 #undef TPACPI_MAY_SEND_KEY
41802-}
41803
41804 /*
41805 * Polling driver
41806diff --git a/drivers/pnp/pnpbios/bioscalls.c b/drivers/pnp/pnpbios/bioscalls.c
41807index 769d265..a3a05ca 100644
41808--- a/drivers/pnp/pnpbios/bioscalls.c
41809+++ b/drivers/pnp/pnpbios/bioscalls.c
41810@@ -58,7 +58,7 @@ do { \
41811 set_desc_limit(&gdt[(selname) >> 3], (size) - 1); \
41812 } while(0)
41813
41814-static struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4092,
41815+static const struct desc_struct bad_bios_desc = GDT_ENTRY_INIT(0x4093,
41816 (unsigned long)__va(0x400UL), PAGE_SIZE - 0x400 - 1);
41817
41818 /*
41819@@ -95,7 +95,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41820
41821 cpu = get_cpu();
41822 save_desc_40 = get_cpu_gdt_table(cpu)[0x40 / 8];
41823+
41824+ pax_open_kernel();
41825 get_cpu_gdt_table(cpu)[0x40 / 8] = bad_bios_desc;
41826+ pax_close_kernel();
41827
41828 /* On some boxes IRQ's during PnP BIOS calls are deadly. */
41829 spin_lock_irqsave(&pnp_bios_lock, flags);
41830@@ -133,7 +136,10 @@ static inline u16 call_pnp_bios(u16 func, u16 arg1, u16 arg2, u16 arg3,
41831 :"memory");
41832 spin_unlock_irqrestore(&pnp_bios_lock, flags);
41833
41834+ pax_open_kernel();
41835 get_cpu_gdt_table(cpu)[0x40 / 8] = save_desc_40;
41836+ pax_close_kernel();
41837+
41838 put_cpu();
41839
41840 /* If we get here and this is set then the PnP BIOS faulted on us. */
41841@@ -467,7 +473,7 @@ int pnp_bios_read_escd(char *data, u32 nvram_base)
41842 return status;
41843 }
41844
41845-void pnpbios_calls_init(union pnp_bios_install_struct *header)
41846+void __init pnpbios_calls_init(union pnp_bios_install_struct *header)
41847 {
41848 int i;
41849
41850@@ -475,6 +481,8 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41851 pnp_bios_callpoint.offset = header->fields.pm16offset;
41852 pnp_bios_callpoint.segment = PNP_CS16;
41853
41854+ pax_open_kernel();
41855+
41856 for_each_possible_cpu(i) {
41857 struct desc_struct *gdt = get_cpu_gdt_table(i);
41858 if (!gdt)
41859@@ -486,4 +494,6 @@ void pnpbios_calls_init(union pnp_bios_install_struct *header)
41860 set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
41861 (unsigned long)__va(header->fields.pm16dseg));
41862 }
41863+
41864+ pax_close_kernel();
41865 }
41866diff --git a/drivers/pnp/resource.c b/drivers/pnp/resource.c
41867index 3e6db1c..1fbbdae 100644
41868--- a/drivers/pnp/resource.c
41869+++ b/drivers/pnp/resource.c
41870@@ -360,7 +360,7 @@ int pnp_check_irq(struct pnp_dev *dev, struct resource *res)
41871 return 1;
41872
41873 /* check if the resource is valid */
41874- if (*irq < 0 || *irq > 15)
41875+ if (*irq > 15)
41876 return 0;
41877
41878 /* check if the resource is reserved */
41879@@ -424,7 +424,7 @@ int pnp_check_dma(struct pnp_dev *dev, struct resource *res)
41880 return 1;
41881
41882 /* check if the resource is valid */
41883- if (*dma < 0 || *dma == 4 || *dma > 7)
41884+ if (*dma == 4 || *dma > 7)
41885 return 0;
41886
41887 /* check if the resource is reserved */
41888diff --git a/drivers/power/pda_power.c b/drivers/power/pda_power.c
41889index 0c52e2a..3421ab7 100644
41890--- a/drivers/power/pda_power.c
41891+++ b/drivers/power/pda_power.c
41892@@ -37,7 +37,11 @@ static int polling;
41893
41894 #if IS_ENABLED(CONFIG_USB_PHY)
41895 static struct usb_phy *transceiver;
41896-static struct notifier_block otg_nb;
41897+static int otg_handle_notification(struct notifier_block *nb,
41898+ unsigned long event, void *unused);
41899+static struct notifier_block otg_nb = {
41900+ .notifier_call = otg_handle_notification
41901+};
41902 #endif
41903
41904 static struct regulator *ac_draw;
41905@@ -369,7 +373,6 @@ static int pda_power_probe(struct platform_device *pdev)
41906
41907 #if IS_ENABLED(CONFIG_USB_PHY)
41908 if (!IS_ERR_OR_NULL(transceiver) && pdata->use_otg_notifier) {
41909- otg_nb.notifier_call = otg_handle_notification;
41910 ret = usb_register_notifier(transceiver, &otg_nb);
41911 if (ret) {
41912 dev_err(dev, "failure to register otg notifier\n");
41913diff --git a/drivers/power/power_supply.h b/drivers/power/power_supply.h
41914index cc439fd..8fa30df 100644
41915--- a/drivers/power/power_supply.h
41916+++ b/drivers/power/power_supply.h
41917@@ -16,12 +16,12 @@ struct power_supply;
41918
41919 #ifdef CONFIG_SYSFS
41920
41921-extern void power_supply_init_attrs(struct device_type *dev_type);
41922+extern void power_supply_init_attrs(void);
41923 extern int power_supply_uevent(struct device *dev, struct kobj_uevent_env *env);
41924
41925 #else
41926
41927-static inline void power_supply_init_attrs(struct device_type *dev_type) {}
41928+static inline void power_supply_init_attrs(void) {}
41929 #define power_supply_uevent NULL
41930
41931 #endif /* CONFIG_SYSFS */
41932diff --git a/drivers/power/power_supply_core.c b/drivers/power/power_supply_core.c
41933index 1c517c3..ffa2f17 100644
41934--- a/drivers/power/power_supply_core.c
41935+++ b/drivers/power/power_supply_core.c
41936@@ -24,7 +24,10 @@
41937 struct class *power_supply_class;
41938 EXPORT_SYMBOL_GPL(power_supply_class);
41939
41940-static struct device_type power_supply_dev_type;
41941+extern const struct attribute_group *power_supply_attr_groups[];
41942+static struct device_type power_supply_dev_type = {
41943+ .groups = power_supply_attr_groups,
41944+};
41945
41946 static bool __power_supply_is_supplied_by(struct power_supply *supplier,
41947 struct power_supply *supply)
41948@@ -554,7 +557,7 @@ static int __init power_supply_class_init(void)
41949 return PTR_ERR(power_supply_class);
41950
41951 power_supply_class->dev_uevent = power_supply_uevent;
41952- power_supply_init_attrs(&power_supply_dev_type);
41953+ power_supply_init_attrs();
41954
41955 return 0;
41956 }
41957diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
41958index 29178f7..c65f324 100644
41959--- a/drivers/power/power_supply_sysfs.c
41960+++ b/drivers/power/power_supply_sysfs.c
41961@@ -230,17 +230,15 @@ static struct attribute_group power_supply_attr_group = {
41962 .is_visible = power_supply_attr_is_visible,
41963 };
41964
41965-static const struct attribute_group *power_supply_attr_groups[] = {
41966+const struct attribute_group *power_supply_attr_groups[] = {
41967 &power_supply_attr_group,
41968 NULL,
41969 };
41970
41971-void power_supply_init_attrs(struct device_type *dev_type)
41972+void power_supply_init_attrs(void)
41973 {
41974 int i;
41975
41976- dev_type->groups = power_supply_attr_groups;
41977-
41978 for (i = 0; i < ARRAY_SIZE(power_supply_attrs); i++)
41979 __power_supply_attrs[i] = &power_supply_attrs[i].attr;
41980 }
41981diff --git a/drivers/regulator/max8660.c b/drivers/regulator/max8660.c
41982index d428ef9..fdc0357 100644
41983--- a/drivers/regulator/max8660.c
41984+++ b/drivers/regulator/max8660.c
41985@@ -333,8 +333,10 @@ static int max8660_probe(struct i2c_client *client,
41986 max8660->shadow_regs[MAX8660_OVER1] = 5;
41987 } else {
41988 /* Otherwise devices can be toggled via software */
41989- max8660_dcdc_ops.enable = max8660_dcdc_enable;
41990- max8660_dcdc_ops.disable = max8660_dcdc_disable;
41991+ pax_open_kernel();
41992+ *(void **)&max8660_dcdc_ops.enable = max8660_dcdc_enable;
41993+ *(void **)&max8660_dcdc_ops.disable = max8660_dcdc_disable;
41994+ pax_close_kernel();
41995 }
41996
41997 /*
41998diff --git a/drivers/regulator/max8973-regulator.c b/drivers/regulator/max8973-regulator.c
41999index adb1414..c13e0ce 100644
42000--- a/drivers/regulator/max8973-regulator.c
42001+++ b/drivers/regulator/max8973-regulator.c
42002@@ -401,9 +401,11 @@ static int max8973_probe(struct i2c_client *client,
42003 if (!pdata->enable_ext_control) {
42004 max->desc.enable_reg = MAX8973_VOUT;
42005 max->desc.enable_mask = MAX8973_VOUT_ENABLE;
42006- max8973_dcdc_ops.enable = regulator_enable_regmap;
42007- max8973_dcdc_ops.disable = regulator_disable_regmap;
42008- max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
42009+ pax_open_kernel();
42010+ *(void **)&max8973_dcdc_ops.enable = regulator_enable_regmap;
42011+ *(void **)&max8973_dcdc_ops.disable = regulator_disable_regmap;
42012+ *(void **)&max8973_dcdc_ops.is_enabled = regulator_is_enabled_regmap;
42013+ pax_close_kernel();
42014 }
42015
42016 max->enable_external_control = pdata->enable_ext_control;
42017diff --git a/drivers/regulator/mc13892-regulator.c b/drivers/regulator/mc13892-regulator.c
42018index b716283..3cc4349 100644
42019--- a/drivers/regulator/mc13892-regulator.c
42020+++ b/drivers/regulator/mc13892-regulator.c
42021@@ -582,10 +582,12 @@ static int mc13892_regulator_probe(struct platform_device *pdev)
42022 }
42023 mc13xxx_unlock(mc13892);
42024
42025- mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
42026+ pax_open_kernel();
42027+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->set_mode
42028 = mc13892_vcam_set_mode;
42029- mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
42030+ *(void **)&mc13892_regulators[MC13892_VCAM].desc.ops->get_mode
42031 = mc13892_vcam_get_mode;
42032+ pax_close_kernel();
42033
42034 mc13xxx_data = mc13xxx_parse_regulators_dt(pdev, mc13892_regulators,
42035 ARRAY_SIZE(mc13892_regulators));
42036diff --git a/drivers/rtc/rtc-cmos.c b/drivers/rtc/rtc-cmos.c
42037index f1cb706..4c7832a 100644
42038--- a/drivers/rtc/rtc-cmos.c
42039+++ b/drivers/rtc/rtc-cmos.c
42040@@ -724,7 +724,9 @@ cmos_do_probe(struct device *dev, struct resource *ports, int rtc_irq)
42041 hpet_rtc_timer_init();
42042
42043 /* export at least the first block of NVRAM */
42044- nvram.size = address_space - NVRAM_OFFSET;
42045+ pax_open_kernel();
42046+ *(size_t *)&nvram.size = address_space - NVRAM_OFFSET;
42047+ pax_close_kernel();
42048 retval = sysfs_create_bin_file(&dev->kobj, &nvram);
42049 if (retval < 0) {
42050 dev_dbg(dev, "can't create nvram file? %d\n", retval);
42051diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
42052index d049393..bb20be0 100644
42053--- a/drivers/rtc/rtc-dev.c
42054+++ b/drivers/rtc/rtc-dev.c
42055@@ -16,6 +16,7 @@
42056 #include <linux/module.h>
42057 #include <linux/rtc.h>
42058 #include <linux/sched.h>
42059+#include <linux/grsecurity.h>
42060 #include "rtc-core.h"
42061
42062 static dev_t rtc_devt;
42063@@ -347,6 +348,8 @@ static long rtc_dev_ioctl(struct file *file,
42064 if (copy_from_user(&tm, uarg, sizeof(tm)))
42065 return -EFAULT;
42066
42067+ gr_log_timechange();
42068+
42069 return rtc_set_time(rtc, &tm);
42070
42071 case RTC_PIE_ON:
42072diff --git a/drivers/rtc/rtc-ds1307.c b/drivers/rtc/rtc-ds1307.c
42073index b53992a..776df84 100644
42074--- a/drivers/rtc/rtc-ds1307.c
42075+++ b/drivers/rtc/rtc-ds1307.c
42076@@ -107,7 +107,7 @@ struct ds1307 {
42077 u8 offset; /* register's offset */
42078 u8 regs[11];
42079 u16 nvram_offset;
42080- struct bin_attribute *nvram;
42081+ bin_attribute_no_const *nvram;
42082 enum ds_type type;
42083 unsigned long flags;
42084 #define HAS_NVRAM 0 /* bit 0 == sysfs file active */
42085diff --git a/drivers/rtc/rtc-m48t59.c b/drivers/rtc/rtc-m48t59.c
42086index 130f29a..6179d03 100644
42087--- a/drivers/rtc/rtc-m48t59.c
42088+++ b/drivers/rtc/rtc-m48t59.c
42089@@ -482,7 +482,9 @@ static int m48t59_rtc_probe(struct platform_device *pdev)
42090 goto out;
42091 }
42092
42093- m48t59_nvram_attr.size = pdata->offset;
42094+ pax_open_kernel();
42095+ *(size_t *)&m48t59_nvram_attr.size = pdata->offset;
42096+ pax_close_kernel();
42097
42098 ret = sysfs_create_bin_file(&pdev->dev.kobj, &m48t59_nvram_attr);
42099 if (ret) {
42100diff --git a/drivers/scsi/bfa/bfa_fcpim.h b/drivers/scsi/bfa/bfa_fcpim.h
42101index e693af6..2e525b6 100644
42102--- a/drivers/scsi/bfa/bfa_fcpim.h
42103+++ b/drivers/scsi/bfa/bfa_fcpim.h
42104@@ -36,7 +36,7 @@ struct bfa_iotag_s {
42105
42106 struct bfa_itn_s {
42107 bfa_isr_func_t isr;
42108-};
42109+} __no_const;
42110
42111 void bfa_itn_create(struct bfa_s *bfa, struct bfa_rport_s *rport,
42112 void (*isr)(struct bfa_s *bfa, struct bfi_msg_s *m));
42113diff --git a/drivers/scsi/bfa/bfa_ioc.h b/drivers/scsi/bfa/bfa_ioc.h
42114index 23a90e7..9cf04ee 100644
42115--- a/drivers/scsi/bfa/bfa_ioc.h
42116+++ b/drivers/scsi/bfa/bfa_ioc.h
42117@@ -258,7 +258,7 @@ struct bfa_ioc_cbfn_s {
42118 bfa_ioc_disable_cbfn_t disable_cbfn;
42119 bfa_ioc_hbfail_cbfn_t hbfail_cbfn;
42120 bfa_ioc_reset_cbfn_t reset_cbfn;
42121-};
42122+} __no_const;
42123
42124 /*
42125 * IOC event notification mechanism.
42126@@ -346,7 +346,7 @@ struct bfa_ioc_hwif_s {
42127 void (*ioc_sync_ack) (struct bfa_ioc_s *ioc);
42128 bfa_boolean_t (*ioc_sync_complete) (struct bfa_ioc_s *ioc);
42129 bfa_boolean_t (*ioc_lpu_read_stat) (struct bfa_ioc_s *ioc);
42130-};
42131+} __no_const;
42132
42133 /*
42134 * Queue element to wait for room in request queue. FIFO order is
42135diff --git a/drivers/scsi/hosts.c b/drivers/scsi/hosts.c
42136index df0c3c7..b00e1d0 100644
42137--- a/drivers/scsi/hosts.c
42138+++ b/drivers/scsi/hosts.c
42139@@ -42,7 +42,7 @@
42140 #include "scsi_logging.h"
42141
42142
42143-static atomic_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42144+static atomic_unchecked_t scsi_host_next_hn = ATOMIC_INIT(0); /* host_no for next new host */
42145
42146
42147 static void scsi_host_cls_release(struct device *dev)
42148@@ -361,7 +361,7 @@ struct Scsi_Host *scsi_host_alloc(struct scsi_host_template *sht, int privsize)
42149 * subtract one because we increment first then return, but we need to
42150 * know what the next host number was before increment
42151 */
42152- shost->host_no = atomic_inc_return(&scsi_host_next_hn) - 1;
42153+ shost->host_no = atomic_inc_return_unchecked(&scsi_host_next_hn) - 1;
42154 shost->dma_channel = 0xff;
42155
42156 /* These three are default values which can be overridden */
42157diff --git a/drivers/scsi/hpsa.c b/drivers/scsi/hpsa.c
42158index 7f4f790..b75b92a 100644
42159--- a/drivers/scsi/hpsa.c
42160+++ b/drivers/scsi/hpsa.c
42161@@ -554,7 +554,7 @@ static inline u32 next_command(struct ctlr_info *h, u8 q)
42162 unsigned long flags;
42163
42164 if (unlikely(!(h->transMethod & CFGTBL_Trans_Performant)))
42165- return h->access.command_completed(h, q);
42166+ return h->access->command_completed(h, q);
42167
42168 if ((rq->head[rq->current_entry] & 1) == rq->wraparound) {
42169 a = rq->head[rq->current_entry];
42170@@ -3422,7 +3422,7 @@ static void start_io(struct ctlr_info *h)
42171 while (!list_empty(&h->reqQ)) {
42172 c = list_entry(h->reqQ.next, struct CommandList, list);
42173 /* can't do anything if fifo is full */
42174- if ((h->access.fifo_full(h))) {
42175+ if ((h->access->fifo_full(h))) {
42176 dev_warn(&h->pdev->dev, "fifo full\n");
42177 break;
42178 }
42179@@ -3444,7 +3444,7 @@ static void start_io(struct ctlr_info *h)
42180
42181 /* Tell the controller execute command */
42182 spin_unlock_irqrestore(&h->lock, flags);
42183- h->access.submit_command(h, c);
42184+ h->access->submit_command(h, c);
42185 spin_lock_irqsave(&h->lock, flags);
42186 }
42187 spin_unlock_irqrestore(&h->lock, flags);
42188@@ -3452,17 +3452,17 @@ static void start_io(struct ctlr_info *h)
42189
42190 static inline unsigned long get_next_completion(struct ctlr_info *h, u8 q)
42191 {
42192- return h->access.command_completed(h, q);
42193+ return h->access->command_completed(h, q);
42194 }
42195
42196 static inline bool interrupt_pending(struct ctlr_info *h)
42197 {
42198- return h->access.intr_pending(h);
42199+ return h->access->intr_pending(h);
42200 }
42201
42202 static inline long interrupt_not_for_us(struct ctlr_info *h)
42203 {
42204- return (h->access.intr_pending(h) == 0) ||
42205+ return (h->access->intr_pending(h) == 0) ||
42206 (h->interrupts_enabled == 0);
42207 }
42208
42209@@ -4364,7 +4364,7 @@ static int hpsa_pci_init(struct ctlr_info *h)
42210 if (prod_index < 0)
42211 return -ENODEV;
42212 h->product_name = products[prod_index].product_name;
42213- h->access = *(products[prod_index].access);
42214+ h->access = products[prod_index].access;
42215
42216 pci_disable_link_state(h->pdev, PCIE_LINK_STATE_L0S |
42217 PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_CLKPM);
42218@@ -4646,7 +4646,7 @@ static void controller_lockup_detected(struct ctlr_info *h)
42219
42220 assert_spin_locked(&lockup_detector_lock);
42221 remove_ctlr_from_lockup_detector_list(h);
42222- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42223+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42224 spin_lock_irqsave(&h->lock, flags);
42225 h->lockup_detected = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET);
42226 spin_unlock_irqrestore(&h->lock, flags);
42227@@ -4823,7 +4823,7 @@ reinit_after_soft_reset:
42228 }
42229
42230 /* make sure the board interrupts are off */
42231- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42232+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42233
42234 if (hpsa_request_irq(h, do_hpsa_intr_msi, do_hpsa_intr_intx))
42235 goto clean2;
42236@@ -4857,7 +4857,7 @@ reinit_after_soft_reset:
42237 * fake ones to scoop up any residual completions.
42238 */
42239 spin_lock_irqsave(&h->lock, flags);
42240- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42241+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42242 spin_unlock_irqrestore(&h->lock, flags);
42243 free_irqs(h);
42244 rc = hpsa_request_irq(h, hpsa_msix_discard_completions,
42245@@ -4876,9 +4876,9 @@ reinit_after_soft_reset:
42246 dev_info(&h->pdev->dev, "Board READY.\n");
42247 dev_info(&h->pdev->dev,
42248 "Waiting for stale completions to drain.\n");
42249- h->access.set_intr_mask(h, HPSA_INTR_ON);
42250+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42251 msleep(10000);
42252- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42253+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42254
42255 rc = controller_reset_failed(h->cfgtable);
42256 if (rc)
42257@@ -4899,7 +4899,7 @@ reinit_after_soft_reset:
42258 }
42259
42260 /* Turn the interrupts on so we can service requests */
42261- h->access.set_intr_mask(h, HPSA_INTR_ON);
42262+ h->access->set_intr_mask(h, HPSA_INTR_ON);
42263
42264 hpsa_hba_inquiry(h);
42265 hpsa_register_scsi(h); /* hook ourselves into SCSI subsystem */
42266@@ -4954,7 +4954,7 @@ static void hpsa_shutdown(struct pci_dev *pdev)
42267 * To write all data in the battery backed cache to disks
42268 */
42269 hpsa_flush_cache(h);
42270- h->access.set_intr_mask(h, HPSA_INTR_OFF);
42271+ h->access->set_intr_mask(h, HPSA_INTR_OFF);
42272 hpsa_free_irqs_and_disable_msix(h);
42273 }
42274
42275@@ -5122,7 +5122,7 @@ static void hpsa_enter_performant_mode(struct ctlr_info *h, u32 use_short_tags)
42276 return;
42277 }
42278 /* Change the access methods to the performant access methods */
42279- h->access = SA5_performant_access;
42280+ h->access = &SA5_performant_access;
42281 h->transMethod = CFGTBL_Trans_Performant;
42282 }
42283
42284diff --git a/drivers/scsi/hpsa.h b/drivers/scsi/hpsa.h
42285index 9816479..c5d4e97 100644
42286--- a/drivers/scsi/hpsa.h
42287+++ b/drivers/scsi/hpsa.h
42288@@ -79,7 +79,7 @@ struct ctlr_info {
42289 unsigned int msix_vector;
42290 unsigned int msi_vector;
42291 int intr_mode; /* either PERF_MODE_INT or SIMPLE_MODE_INT */
42292- struct access_method access;
42293+ struct access_method *access;
42294
42295 /* queue and queue Info */
42296 struct list_head reqQ;
42297diff --git a/drivers/scsi/libfc/fc_exch.c b/drivers/scsi/libfc/fc_exch.c
42298index 8b928c6..9c76300 100644
42299--- a/drivers/scsi/libfc/fc_exch.c
42300+++ b/drivers/scsi/libfc/fc_exch.c
42301@@ -100,12 +100,12 @@ struct fc_exch_mgr {
42302 u16 pool_max_index;
42303
42304 struct {
42305- atomic_t no_free_exch;
42306- atomic_t no_free_exch_xid;
42307- atomic_t xid_not_found;
42308- atomic_t xid_busy;
42309- atomic_t seq_not_found;
42310- atomic_t non_bls_resp;
42311+ atomic_unchecked_t no_free_exch;
42312+ atomic_unchecked_t no_free_exch_xid;
42313+ atomic_unchecked_t xid_not_found;
42314+ atomic_unchecked_t xid_busy;
42315+ atomic_unchecked_t seq_not_found;
42316+ atomic_unchecked_t non_bls_resp;
42317 } stats;
42318 };
42319
42320@@ -736,7 +736,7 @@ static struct fc_exch *fc_exch_em_alloc(struct fc_lport *lport,
42321 /* allocate memory for exchange */
42322 ep = mempool_alloc(mp->ep_pool, GFP_ATOMIC);
42323 if (!ep) {
42324- atomic_inc(&mp->stats.no_free_exch);
42325+ atomic_inc_unchecked(&mp->stats.no_free_exch);
42326 goto out;
42327 }
42328 memset(ep, 0, sizeof(*ep));
42329@@ -797,7 +797,7 @@ out:
42330 return ep;
42331 err:
42332 spin_unlock_bh(&pool->lock);
42333- atomic_inc(&mp->stats.no_free_exch_xid);
42334+ atomic_inc_unchecked(&mp->stats.no_free_exch_xid);
42335 mempool_free(ep, mp->ep_pool);
42336 return NULL;
42337 }
42338@@ -940,7 +940,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42339 xid = ntohs(fh->fh_ox_id); /* we originated exch */
42340 ep = fc_exch_find(mp, xid);
42341 if (!ep) {
42342- atomic_inc(&mp->stats.xid_not_found);
42343+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42344 reject = FC_RJT_OX_ID;
42345 goto out;
42346 }
42347@@ -970,7 +970,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42348 ep = fc_exch_find(mp, xid);
42349 if ((f_ctl & FC_FC_FIRST_SEQ) && fc_sof_is_init(fr_sof(fp))) {
42350 if (ep) {
42351- atomic_inc(&mp->stats.xid_busy);
42352+ atomic_inc_unchecked(&mp->stats.xid_busy);
42353 reject = FC_RJT_RX_ID;
42354 goto rel;
42355 }
42356@@ -981,7 +981,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42357 }
42358 xid = ep->xid; /* get our XID */
42359 } else if (!ep) {
42360- atomic_inc(&mp->stats.xid_not_found);
42361+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42362 reject = FC_RJT_RX_ID; /* XID not found */
42363 goto out;
42364 }
42365@@ -998,7 +998,7 @@ static enum fc_pf_rjt_reason fc_seq_lookup_recip(struct fc_lport *lport,
42366 } else {
42367 sp = &ep->seq;
42368 if (sp->id != fh->fh_seq_id) {
42369- atomic_inc(&mp->stats.seq_not_found);
42370+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42371 if (f_ctl & FC_FC_END_SEQ) {
42372 /*
42373 * Update sequence_id based on incoming last
42374@@ -1448,22 +1448,22 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42375
42376 ep = fc_exch_find(mp, ntohs(fh->fh_ox_id));
42377 if (!ep) {
42378- atomic_inc(&mp->stats.xid_not_found);
42379+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42380 goto out;
42381 }
42382 if (ep->esb_stat & ESB_ST_COMPLETE) {
42383- atomic_inc(&mp->stats.xid_not_found);
42384+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42385 goto rel;
42386 }
42387 if (ep->rxid == FC_XID_UNKNOWN)
42388 ep->rxid = ntohs(fh->fh_rx_id);
42389 if (ep->sid != 0 && ep->sid != ntoh24(fh->fh_d_id)) {
42390- atomic_inc(&mp->stats.xid_not_found);
42391+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42392 goto rel;
42393 }
42394 if (ep->did != ntoh24(fh->fh_s_id) &&
42395 ep->did != FC_FID_FLOGI) {
42396- atomic_inc(&mp->stats.xid_not_found);
42397+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42398 goto rel;
42399 }
42400 sof = fr_sof(fp);
42401@@ -1472,7 +1472,7 @@ static void fc_exch_recv_seq_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42402 sp->ssb_stat |= SSB_ST_RESP;
42403 sp->id = fh->fh_seq_id;
42404 } else if (sp->id != fh->fh_seq_id) {
42405- atomic_inc(&mp->stats.seq_not_found);
42406+ atomic_inc_unchecked(&mp->stats.seq_not_found);
42407 goto rel;
42408 }
42409
42410@@ -1536,9 +1536,9 @@ static void fc_exch_recv_resp(struct fc_exch_mgr *mp, struct fc_frame *fp)
42411 sp = fc_seq_lookup_orig(mp, fp); /* doesn't hold sequence */
42412
42413 if (!sp)
42414- atomic_inc(&mp->stats.xid_not_found);
42415+ atomic_inc_unchecked(&mp->stats.xid_not_found);
42416 else
42417- atomic_inc(&mp->stats.non_bls_resp);
42418+ atomic_inc_unchecked(&mp->stats.non_bls_resp);
42419
42420 fc_frame_free(fp);
42421 }
42422@@ -2185,13 +2185,13 @@ void fc_exch_update_stats(struct fc_lport *lport)
42423
42424 list_for_each_entry(ema, &lport->ema_list, ema_list) {
42425 mp = ema->mp;
42426- st->fc_no_free_exch += atomic_read(&mp->stats.no_free_exch);
42427+ st->fc_no_free_exch += atomic_read_unchecked(&mp->stats.no_free_exch);
42428 st->fc_no_free_exch_xid +=
42429- atomic_read(&mp->stats.no_free_exch_xid);
42430- st->fc_xid_not_found += atomic_read(&mp->stats.xid_not_found);
42431- st->fc_xid_busy += atomic_read(&mp->stats.xid_busy);
42432- st->fc_seq_not_found += atomic_read(&mp->stats.seq_not_found);
42433- st->fc_non_bls_resp += atomic_read(&mp->stats.non_bls_resp);
42434+ atomic_read_unchecked(&mp->stats.no_free_exch_xid);
42435+ st->fc_xid_not_found += atomic_read_unchecked(&mp->stats.xid_not_found);
42436+ st->fc_xid_busy += atomic_read_unchecked(&mp->stats.xid_busy);
42437+ st->fc_seq_not_found += atomic_read_unchecked(&mp->stats.seq_not_found);
42438+ st->fc_non_bls_resp += atomic_read_unchecked(&mp->stats.non_bls_resp);
42439 }
42440 }
42441 EXPORT_SYMBOL(fc_exch_update_stats);
42442diff --git a/drivers/scsi/libsas/sas_ata.c b/drivers/scsi/libsas/sas_ata.c
42443index 161c98e..6d563b3 100644
42444--- a/drivers/scsi/libsas/sas_ata.c
42445+++ b/drivers/scsi/libsas/sas_ata.c
42446@@ -554,7 +554,7 @@ static struct ata_port_operations sas_sata_ops = {
42447 .postreset = ata_std_postreset,
42448 .error_handler = ata_std_error_handler,
42449 .post_internal_cmd = sas_ata_post_internal,
42450- .qc_defer = ata_std_qc_defer,
42451+ .qc_defer = ata_std_qc_defer,
42452 .qc_prep = ata_noop_qc_prep,
42453 .qc_issue = sas_ata_qc_issue,
42454 .qc_fill_rtf = sas_ata_qc_fill_rtf,
42455diff --git a/drivers/scsi/lpfc/lpfc.h b/drivers/scsi/lpfc/lpfc.h
42456index bcc56ca..6f4174a 100644
42457--- a/drivers/scsi/lpfc/lpfc.h
42458+++ b/drivers/scsi/lpfc/lpfc.h
42459@@ -431,7 +431,7 @@ struct lpfc_vport {
42460 struct dentry *debug_nodelist;
42461 struct dentry *vport_debugfs_root;
42462 struct lpfc_debugfs_trc *disc_trc;
42463- atomic_t disc_trc_cnt;
42464+ atomic_unchecked_t disc_trc_cnt;
42465 #endif
42466 uint8_t stat_data_enabled;
42467 uint8_t stat_data_blocked;
42468@@ -865,8 +865,8 @@ struct lpfc_hba {
42469 struct timer_list fabric_block_timer;
42470 unsigned long bit_flags;
42471 #define FABRIC_COMANDS_BLOCKED 0
42472- atomic_t num_rsrc_err;
42473- atomic_t num_cmd_success;
42474+ atomic_unchecked_t num_rsrc_err;
42475+ atomic_unchecked_t num_cmd_success;
42476 unsigned long last_rsrc_error_time;
42477 unsigned long last_ramp_down_time;
42478 unsigned long last_ramp_up_time;
42479@@ -902,7 +902,7 @@ struct lpfc_hba {
42480
42481 struct dentry *debug_slow_ring_trc;
42482 struct lpfc_debugfs_trc *slow_ring_trc;
42483- atomic_t slow_ring_trc_cnt;
42484+ atomic_unchecked_t slow_ring_trc_cnt;
42485 /* iDiag debugfs sub-directory */
42486 struct dentry *idiag_root;
42487 struct dentry *idiag_pci_cfg;
42488diff --git a/drivers/scsi/lpfc/lpfc_debugfs.c b/drivers/scsi/lpfc/lpfc_debugfs.c
42489index f525ecb..32549a4 100644
42490--- a/drivers/scsi/lpfc/lpfc_debugfs.c
42491+++ b/drivers/scsi/lpfc/lpfc_debugfs.c
42492@@ -106,7 +106,7 @@ MODULE_PARM_DESC(lpfc_debugfs_mask_disc_trc,
42493
42494 #include <linux/debugfs.h>
42495
42496-static atomic_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42497+static atomic_unchecked_t lpfc_debugfs_seq_trc_cnt = ATOMIC_INIT(0);
42498 static unsigned long lpfc_debugfs_start_time = 0L;
42499
42500 /* iDiag */
42501@@ -147,7 +147,7 @@ lpfc_debugfs_disc_trc_data(struct lpfc_vport *vport, char *buf, int size)
42502 lpfc_debugfs_enable = 0;
42503
42504 len = 0;
42505- index = (atomic_read(&vport->disc_trc_cnt) + 1) &
42506+ index = (atomic_read_unchecked(&vport->disc_trc_cnt) + 1) &
42507 (lpfc_debugfs_max_disc_trc - 1);
42508 for (i = index; i < lpfc_debugfs_max_disc_trc; i++) {
42509 dtp = vport->disc_trc + i;
42510@@ -213,7 +213,7 @@ lpfc_debugfs_slow_ring_trc_data(struct lpfc_hba *phba, char *buf, int size)
42511 lpfc_debugfs_enable = 0;
42512
42513 len = 0;
42514- index = (atomic_read(&phba->slow_ring_trc_cnt) + 1) &
42515+ index = (atomic_read_unchecked(&phba->slow_ring_trc_cnt) + 1) &
42516 (lpfc_debugfs_max_slow_ring_trc - 1);
42517 for (i = index; i < lpfc_debugfs_max_slow_ring_trc; i++) {
42518 dtp = phba->slow_ring_trc + i;
42519@@ -646,14 +646,14 @@ lpfc_debugfs_disc_trc(struct lpfc_vport *vport, int mask, char *fmt,
42520 !vport || !vport->disc_trc)
42521 return;
42522
42523- index = atomic_inc_return(&vport->disc_trc_cnt) &
42524+ index = atomic_inc_return_unchecked(&vport->disc_trc_cnt) &
42525 (lpfc_debugfs_max_disc_trc - 1);
42526 dtp = vport->disc_trc + index;
42527 dtp->fmt = fmt;
42528 dtp->data1 = data1;
42529 dtp->data2 = data2;
42530 dtp->data3 = data3;
42531- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42532+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42533 dtp->jif = jiffies;
42534 #endif
42535 return;
42536@@ -684,14 +684,14 @@ lpfc_debugfs_slow_ring_trc(struct lpfc_hba *phba, char *fmt,
42537 !phba || !phba->slow_ring_trc)
42538 return;
42539
42540- index = atomic_inc_return(&phba->slow_ring_trc_cnt) &
42541+ index = atomic_inc_return_unchecked(&phba->slow_ring_trc_cnt) &
42542 (lpfc_debugfs_max_slow_ring_trc - 1);
42543 dtp = phba->slow_ring_trc + index;
42544 dtp->fmt = fmt;
42545 dtp->data1 = data1;
42546 dtp->data2 = data2;
42547 dtp->data3 = data3;
42548- dtp->seq_cnt = atomic_inc_return(&lpfc_debugfs_seq_trc_cnt);
42549+ dtp->seq_cnt = atomic_inc_return_unchecked(&lpfc_debugfs_seq_trc_cnt);
42550 dtp->jif = jiffies;
42551 #endif
42552 return;
42553@@ -4182,7 +4182,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42554 "slow_ring buffer\n");
42555 goto debug_failed;
42556 }
42557- atomic_set(&phba->slow_ring_trc_cnt, 0);
42558+ atomic_set_unchecked(&phba->slow_ring_trc_cnt, 0);
42559 memset(phba->slow_ring_trc, 0,
42560 (sizeof(struct lpfc_debugfs_trc) *
42561 lpfc_debugfs_max_slow_ring_trc));
42562@@ -4228,7 +4228,7 @@ lpfc_debugfs_initialize(struct lpfc_vport *vport)
42563 "buffer\n");
42564 goto debug_failed;
42565 }
42566- atomic_set(&vport->disc_trc_cnt, 0);
42567+ atomic_set_unchecked(&vport->disc_trc_cnt, 0);
42568
42569 snprintf(name, sizeof(name), "discovery_trace");
42570 vport->debug_disc_trc =
42571diff --git a/drivers/scsi/lpfc/lpfc_init.c b/drivers/scsi/lpfc/lpfc_init.c
42572index cb465b2..2e7b25f 100644
42573--- a/drivers/scsi/lpfc/lpfc_init.c
42574+++ b/drivers/scsi/lpfc/lpfc_init.c
42575@@ -10950,8 +10950,10 @@ lpfc_init(void)
42576 "misc_register returned with status %d", error);
42577
42578 if (lpfc_enable_npiv) {
42579- lpfc_transport_functions.vport_create = lpfc_vport_create;
42580- lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42581+ pax_open_kernel();
42582+ *(void **)&lpfc_transport_functions.vport_create = lpfc_vport_create;
42583+ *(void **)&lpfc_transport_functions.vport_delete = lpfc_vport_delete;
42584+ pax_close_kernel();
42585 }
42586 lpfc_transport_template =
42587 fc_attach_transport(&lpfc_transport_functions);
42588diff --git a/drivers/scsi/lpfc/lpfc_scsi.c b/drivers/scsi/lpfc/lpfc_scsi.c
42589index 8523b278e..ce1d812 100644
42590--- a/drivers/scsi/lpfc/lpfc_scsi.c
42591+++ b/drivers/scsi/lpfc/lpfc_scsi.c
42592@@ -331,7 +331,7 @@ lpfc_rampdown_queue_depth(struct lpfc_hba *phba)
42593 uint32_t evt_posted;
42594
42595 spin_lock_irqsave(&phba->hbalock, flags);
42596- atomic_inc(&phba->num_rsrc_err);
42597+ atomic_inc_unchecked(&phba->num_rsrc_err);
42598 phba->last_rsrc_error_time = jiffies;
42599
42600 if ((phba->last_ramp_down_time + QUEUE_RAMP_DOWN_INTERVAL) > jiffies) {
42601@@ -372,7 +372,7 @@ lpfc_rampup_queue_depth(struct lpfc_vport *vport,
42602 unsigned long flags;
42603 struct lpfc_hba *phba = vport->phba;
42604 uint32_t evt_posted;
42605- atomic_inc(&phba->num_cmd_success);
42606+ atomic_inc_unchecked(&phba->num_cmd_success);
42607
42608 if (vport->cfg_lun_queue_depth <= queue_depth)
42609 return;
42610@@ -416,8 +416,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42611 unsigned long num_rsrc_err, num_cmd_success;
42612 int i;
42613
42614- num_rsrc_err = atomic_read(&phba->num_rsrc_err);
42615- num_cmd_success = atomic_read(&phba->num_cmd_success);
42616+ num_rsrc_err = atomic_read_unchecked(&phba->num_rsrc_err);
42617+ num_cmd_success = atomic_read_unchecked(&phba->num_cmd_success);
42618
42619 /*
42620 * The error and success command counters are global per
42621@@ -445,8 +445,8 @@ lpfc_ramp_down_queue_handler(struct lpfc_hba *phba)
42622 }
42623 }
42624 lpfc_destroy_vport_work_array(phba, vports);
42625- atomic_set(&phba->num_rsrc_err, 0);
42626- atomic_set(&phba->num_cmd_success, 0);
42627+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42628+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42629 }
42630
42631 /**
42632@@ -480,8 +480,8 @@ lpfc_ramp_up_queue_handler(struct lpfc_hba *phba)
42633 }
42634 }
42635 lpfc_destroy_vport_work_array(phba, vports);
42636- atomic_set(&phba->num_rsrc_err, 0);
42637- atomic_set(&phba->num_cmd_success, 0);
42638+ atomic_set_unchecked(&phba->num_rsrc_err, 0);
42639+ atomic_set_unchecked(&phba->num_cmd_success, 0);
42640 }
42641
42642 /**
42643diff --git a/drivers/scsi/pmcraid.c b/drivers/scsi/pmcraid.c
42644index 8e1b737..50ff510 100644
42645--- a/drivers/scsi/pmcraid.c
42646+++ b/drivers/scsi/pmcraid.c
42647@@ -200,8 +200,8 @@ static int pmcraid_slave_alloc(struct scsi_device *scsi_dev)
42648 res->scsi_dev = scsi_dev;
42649 scsi_dev->hostdata = res;
42650 res->change_detected = 0;
42651- atomic_set(&res->read_failures, 0);
42652- atomic_set(&res->write_failures, 0);
42653+ atomic_set_unchecked(&res->read_failures, 0);
42654+ atomic_set_unchecked(&res->write_failures, 0);
42655 rc = 0;
42656 }
42657 spin_unlock_irqrestore(&pinstance->resource_lock, lock_flags);
42658@@ -2676,9 +2676,9 @@ static int pmcraid_error_handler(struct pmcraid_cmd *cmd)
42659
42660 /* If this was a SCSI read/write command keep count of errors */
42661 if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_READ_CMD)
42662- atomic_inc(&res->read_failures);
42663+ atomic_inc_unchecked(&res->read_failures);
42664 else if (SCSI_CMD_TYPE(scsi_cmd->cmnd[0]) == SCSI_WRITE_CMD)
42665- atomic_inc(&res->write_failures);
42666+ atomic_inc_unchecked(&res->write_failures);
42667
42668 if (!RES_IS_GSCSI(res->cfg_entry) &&
42669 masked_ioasc != PMCRAID_IOASC_HW_DEVICE_BUS_STATUS_ERROR) {
42670@@ -3534,7 +3534,7 @@ static int pmcraid_queuecommand_lck(
42671 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42672 * hrrq_id assigned here in queuecommand
42673 */
42674- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42675+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42676 pinstance->num_hrrq;
42677 cmd->cmd_done = pmcraid_io_done;
42678
42679@@ -3846,7 +3846,7 @@ static long pmcraid_ioctl_passthrough(
42680 * block of scsi_cmd which is re-used (e.g. cancel/abort), which uses
42681 * hrrq_id assigned here in queuecommand
42682 */
42683- ioarcb->hrrq_id = atomic_add_return(1, &(pinstance->last_message_id)) %
42684+ ioarcb->hrrq_id = atomic_add_return_unchecked(1, &(pinstance->last_message_id)) %
42685 pinstance->num_hrrq;
42686
42687 if (request_size) {
42688@@ -4483,7 +4483,7 @@ static void pmcraid_worker_function(struct work_struct *workp)
42689
42690 pinstance = container_of(workp, struct pmcraid_instance, worker_q);
42691 /* add resources only after host is added into system */
42692- if (!atomic_read(&pinstance->expose_resources))
42693+ if (!atomic_read_unchecked(&pinstance->expose_resources))
42694 return;
42695
42696 fw_version = be16_to_cpu(pinstance->inq_data->fw_version);
42697@@ -5310,8 +5310,8 @@ static int pmcraid_init_instance(struct pci_dev *pdev, struct Scsi_Host *host,
42698 init_waitqueue_head(&pinstance->reset_wait_q);
42699
42700 atomic_set(&pinstance->outstanding_cmds, 0);
42701- atomic_set(&pinstance->last_message_id, 0);
42702- atomic_set(&pinstance->expose_resources, 0);
42703+ atomic_set_unchecked(&pinstance->last_message_id, 0);
42704+ atomic_set_unchecked(&pinstance->expose_resources, 0);
42705
42706 INIT_LIST_HEAD(&pinstance->free_res_q);
42707 INIT_LIST_HEAD(&pinstance->used_res_q);
42708@@ -6024,7 +6024,7 @@ static int pmcraid_probe(struct pci_dev *pdev,
42709 /* Schedule worker thread to handle CCN and take care of adding and
42710 * removing devices to OS
42711 */
42712- atomic_set(&pinstance->expose_resources, 1);
42713+ atomic_set_unchecked(&pinstance->expose_resources, 1);
42714 schedule_work(&pinstance->worker_q);
42715 return rc;
42716
42717diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
42718index e1d150f..6c6df44 100644
42719--- a/drivers/scsi/pmcraid.h
42720+++ b/drivers/scsi/pmcraid.h
42721@@ -748,7 +748,7 @@ struct pmcraid_instance {
42722 struct pmcraid_isr_param hrrq_vector[PMCRAID_NUM_MSIX_VECTORS];
42723
42724 /* Message id as filled in last fired IOARCB, used to identify HRRQ */
42725- atomic_t last_message_id;
42726+ atomic_unchecked_t last_message_id;
42727
42728 /* configuration table */
42729 struct pmcraid_config_table *cfg_table;
42730@@ -777,7 +777,7 @@ struct pmcraid_instance {
42731 atomic_t outstanding_cmds;
42732
42733 /* should add/delete resources to mid-layer now ?*/
42734- atomic_t expose_resources;
42735+ atomic_unchecked_t expose_resources;
42736
42737
42738
42739@@ -813,8 +813,8 @@ struct pmcraid_resource_entry {
42740 struct pmcraid_config_table_entry_ext cfg_entry_ext;
42741 };
42742 struct scsi_device *scsi_dev; /* Link scsi_device structure */
42743- atomic_t read_failures; /* count of failed READ commands */
42744- atomic_t write_failures; /* count of failed WRITE commands */
42745+ atomic_unchecked_t read_failures; /* count of failed READ commands */
42746+ atomic_unchecked_t write_failures; /* count of failed WRITE commands */
42747
42748 /* To indicate add/delete/modify during CCN */
42749 u8 change_detected;
42750diff --git a/drivers/scsi/qla2xxx/qla_attr.c b/drivers/scsi/qla2xxx/qla_attr.c
42751index bf60c63..74d4dce 100644
42752--- a/drivers/scsi/qla2xxx/qla_attr.c
42753+++ b/drivers/scsi/qla2xxx/qla_attr.c
42754@@ -2001,7 +2001,7 @@ qla24xx_vport_disable(struct fc_vport *fc_vport, bool disable)
42755 return 0;
42756 }
42757
42758-struct fc_function_template qla2xxx_transport_functions = {
42759+fc_function_template_no_const qla2xxx_transport_functions = {
42760
42761 .show_host_node_name = 1,
42762 .show_host_port_name = 1,
42763@@ -2048,7 +2048,7 @@ struct fc_function_template qla2xxx_transport_functions = {
42764 .bsg_timeout = qla24xx_bsg_timeout,
42765 };
42766
42767-struct fc_function_template qla2xxx_transport_vport_functions = {
42768+fc_function_template_no_const qla2xxx_transport_vport_functions = {
42769
42770 .show_host_node_name = 1,
42771 .show_host_port_name = 1,
42772diff --git a/drivers/scsi/qla2xxx/qla_gbl.h b/drivers/scsi/qla2xxx/qla_gbl.h
42773index 026bfde..90c4018 100644
42774--- a/drivers/scsi/qla2xxx/qla_gbl.h
42775+++ b/drivers/scsi/qla2xxx/qla_gbl.h
42776@@ -528,8 +528,8 @@ extern void qla2x00_get_sym_node_name(scsi_qla_host_t *, uint8_t *);
42777 struct device_attribute;
42778 extern struct device_attribute *qla2x00_host_attrs[];
42779 struct fc_function_template;
42780-extern struct fc_function_template qla2xxx_transport_functions;
42781-extern struct fc_function_template qla2xxx_transport_vport_functions;
42782+extern fc_function_template_no_const qla2xxx_transport_functions;
42783+extern fc_function_template_no_const qla2xxx_transport_vport_functions;
42784 extern void qla2x00_alloc_sysfs_attr(scsi_qla_host_t *);
42785 extern void qla2x00_free_sysfs_attr(scsi_qla_host_t *);
42786 extern void qla2x00_init_host_attr(scsi_qla_host_t *);
42787diff --git a/drivers/scsi/qla2xxx/qla_os.c b/drivers/scsi/qla2xxx/qla_os.c
42788index ad72c1d..afc9a98 100644
42789--- a/drivers/scsi/qla2xxx/qla_os.c
42790+++ b/drivers/scsi/qla2xxx/qla_os.c
42791@@ -1571,8 +1571,10 @@ qla2x00_config_dma_addressing(struct qla_hw_data *ha)
42792 !pci_set_consistent_dma_mask(ha->pdev, DMA_BIT_MASK(64))) {
42793 /* Ok, a 64bit DMA mask is applicable. */
42794 ha->flags.enable_64bit_addressing = 1;
42795- ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42796- ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42797+ pax_open_kernel();
42798+ *(void **)&ha->isp_ops->calc_req_entries = qla2x00_calc_iocbs_64;
42799+ *(void **)&ha->isp_ops->build_iocbs = qla2x00_build_scsi_iocbs_64;
42800+ pax_close_kernel();
42801 return;
42802 }
42803 }
42804diff --git a/drivers/scsi/qla4xxx/ql4_def.h b/drivers/scsi/qla4xxx/ql4_def.h
42805index ddf16a8..80f4dd0 100644
42806--- a/drivers/scsi/qla4xxx/ql4_def.h
42807+++ b/drivers/scsi/qla4xxx/ql4_def.h
42808@@ -291,7 +291,7 @@ struct ddb_entry {
42809 * (4000 only) */
42810 atomic_t relogin_timer; /* Max Time to wait for
42811 * relogin to complete */
42812- atomic_t relogin_retry_count; /* Num of times relogin has been
42813+ atomic_unchecked_t relogin_retry_count; /* Num of times relogin has been
42814 * retried */
42815 uint32_t default_time2wait; /* Default Min time between
42816 * relogins (+aens) */
42817diff --git a/drivers/scsi/qla4xxx/ql4_os.c b/drivers/scsi/qla4xxx/ql4_os.c
42818index 4d231c1..2892c37 100644
42819--- a/drivers/scsi/qla4xxx/ql4_os.c
42820+++ b/drivers/scsi/qla4xxx/ql4_os.c
42821@@ -2971,12 +2971,12 @@ static void qla4xxx_check_relogin_flash_ddb(struct iscsi_cls_session *cls_sess)
42822 */
42823 if (!iscsi_is_session_online(cls_sess)) {
42824 /* Reset retry relogin timer */
42825- atomic_inc(&ddb_entry->relogin_retry_count);
42826+ atomic_inc_unchecked(&ddb_entry->relogin_retry_count);
42827 DEBUG2(ql4_printk(KERN_INFO, ha,
42828 "%s: index[%d] relogin timed out-retrying"
42829 " relogin (%d), retry (%d)\n", __func__,
42830 ddb_entry->fw_ddb_index,
42831- atomic_read(&ddb_entry->relogin_retry_count),
42832+ atomic_read_unchecked(&ddb_entry->relogin_retry_count),
42833 ddb_entry->default_time2wait + 4));
42834 set_bit(DPC_RELOGIN_DEVICE, &ha->dpc_flags);
42835 atomic_set(&ddb_entry->retry_relogin_timer,
42836@@ -5081,7 +5081,7 @@ static void qla4xxx_setup_flash_ddb_entry(struct scsi_qla_host *ha,
42837
42838 atomic_set(&ddb_entry->retry_relogin_timer, INVALID_ENTRY);
42839 atomic_set(&ddb_entry->relogin_timer, 0);
42840- atomic_set(&ddb_entry->relogin_retry_count, 0);
42841+ atomic_set_unchecked(&ddb_entry->relogin_retry_count, 0);
42842 def_timeout = le16_to_cpu(ddb_entry->fw_ddb_entry.def_timeout);
42843 ddb_entry->default_relogin_timeout =
42844 (def_timeout > LOGIN_TOV) && (def_timeout < LOGIN_TOV * 10) ?
42845diff --git a/drivers/scsi/scsi.c b/drivers/scsi/scsi.c
42846index 3b1ea34..1583a72 100644
42847--- a/drivers/scsi/scsi.c
42848+++ b/drivers/scsi/scsi.c
42849@@ -661,7 +661,7 @@ int scsi_dispatch_cmd(struct scsi_cmnd *cmd)
42850 unsigned long timeout;
42851 int rtn = 0;
42852
42853- atomic_inc(&cmd->device->iorequest_cnt);
42854+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42855
42856 /* check if the device is still usable */
42857 if (unlikely(cmd->device->sdev_state == SDEV_DEL)) {
42858diff --git a/drivers/scsi/scsi_lib.c b/drivers/scsi/scsi_lib.c
42859index 86d5220..f22c51a 100644
42860--- a/drivers/scsi/scsi_lib.c
42861+++ b/drivers/scsi/scsi_lib.c
42862@@ -1458,7 +1458,7 @@ static void scsi_kill_request(struct request *req, struct request_queue *q)
42863 shost = sdev->host;
42864 scsi_init_cmd_errh(cmd);
42865 cmd->result = DID_NO_CONNECT << 16;
42866- atomic_inc(&cmd->device->iorequest_cnt);
42867+ atomic_inc_unchecked(&cmd->device->iorequest_cnt);
42868
42869 /*
42870 * SCSI request completion path will do scsi_device_unbusy(),
42871@@ -1484,9 +1484,9 @@ static void scsi_softirq_done(struct request *rq)
42872
42873 INIT_LIST_HEAD(&cmd->eh_entry);
42874
42875- atomic_inc(&cmd->device->iodone_cnt);
42876+ atomic_inc_unchecked(&cmd->device->iodone_cnt);
42877 if (cmd->result)
42878- atomic_inc(&cmd->device->ioerr_cnt);
42879+ atomic_inc_unchecked(&cmd->device->ioerr_cnt);
42880
42881 disposition = scsi_decide_disposition(cmd);
42882 if (disposition != SUCCESS &&
42883diff --git a/drivers/scsi/scsi_sysfs.c b/drivers/scsi/scsi_sysfs.c
42884index 931a7d9..0c2a754 100644
42885--- a/drivers/scsi/scsi_sysfs.c
42886+++ b/drivers/scsi/scsi_sysfs.c
42887@@ -658,7 +658,7 @@ show_iostat_##field(struct device *dev, struct device_attribute *attr, \
42888 char *buf) \
42889 { \
42890 struct scsi_device *sdev = to_scsi_device(dev); \
42891- unsigned long long count = atomic_read(&sdev->field); \
42892+ unsigned long long count = atomic_read_unchecked(&sdev->field); \
42893 return snprintf(buf, 20, "0x%llx\n", count); \
42894 } \
42895 static DEVICE_ATTR(field, S_IRUGO, show_iostat_##field, NULL)
42896diff --git a/drivers/scsi/scsi_tgt_lib.c b/drivers/scsi/scsi_tgt_lib.c
42897index 84a1fdf..693b0d6 100644
42898--- a/drivers/scsi/scsi_tgt_lib.c
42899+++ b/drivers/scsi/scsi_tgt_lib.c
42900@@ -362,7 +362,7 @@ static int scsi_map_user_pages(struct scsi_tgt_cmd *tcmd, struct scsi_cmnd *cmd,
42901 int err;
42902
42903 dprintk("%lx %u\n", uaddr, len);
42904- err = blk_rq_map_user(q, rq, NULL, (void *)uaddr, len, GFP_KERNEL);
42905+ err = blk_rq_map_user(q, rq, NULL, (void __user *)uaddr, len, GFP_KERNEL);
42906 if (err) {
42907 /*
42908 * TODO: need to fixup sg_tablesize, max_segment_size,
42909diff --git a/drivers/scsi/scsi_transport_fc.c b/drivers/scsi/scsi_transport_fc.c
42910index e106c27..11a380e 100644
42911--- a/drivers/scsi/scsi_transport_fc.c
42912+++ b/drivers/scsi/scsi_transport_fc.c
42913@@ -497,7 +497,7 @@ static DECLARE_TRANSPORT_CLASS(fc_vport_class,
42914 * Netlink Infrastructure
42915 */
42916
42917-static atomic_t fc_event_seq;
42918+static atomic_unchecked_t fc_event_seq;
42919
42920 /**
42921 * fc_get_event_number - Obtain the next sequential FC event number
42922@@ -510,7 +510,7 @@ static atomic_t fc_event_seq;
42923 u32
42924 fc_get_event_number(void)
42925 {
42926- return atomic_add_return(1, &fc_event_seq);
42927+ return atomic_add_return_unchecked(1, &fc_event_seq);
42928 }
42929 EXPORT_SYMBOL(fc_get_event_number);
42930
42931@@ -654,7 +654,7 @@ static __init int fc_transport_init(void)
42932 {
42933 int error;
42934
42935- atomic_set(&fc_event_seq, 0);
42936+ atomic_set_unchecked(&fc_event_seq, 0);
42937
42938 error = transport_class_register(&fc_host_class);
42939 if (error)
42940@@ -844,7 +844,7 @@ static int fc_str_to_dev_loss(const char *buf, unsigned long *val)
42941 char *cp;
42942
42943 *val = simple_strtoul(buf, &cp, 0);
42944- if ((*cp && (*cp != '\n')) || (*val < 0))
42945+ if (*cp && (*cp != '\n'))
42946 return -EINVAL;
42947 /*
42948 * Check for overflow; dev_loss_tmo is u32
42949diff --git a/drivers/scsi/scsi_transport_iscsi.c b/drivers/scsi/scsi_transport_iscsi.c
42950index 133926b..903000d 100644
42951--- a/drivers/scsi/scsi_transport_iscsi.c
42952+++ b/drivers/scsi/scsi_transport_iscsi.c
42953@@ -80,7 +80,7 @@ struct iscsi_internal {
42954 struct transport_container session_cont;
42955 };
42956
42957-static atomic_t iscsi_session_nr; /* sysfs session id for next new session */
42958+static atomic_unchecked_t iscsi_session_nr; /* sysfs session id for next new session */
42959 static struct workqueue_struct *iscsi_eh_timer_workq;
42960
42961 static DEFINE_IDA(iscsi_sess_ida);
42962@@ -1738,7 +1738,7 @@ int iscsi_add_session(struct iscsi_cls_session *session, unsigned int target_id)
42963 int err;
42964
42965 ihost = shost->shost_data;
42966- session->sid = atomic_add_return(1, &iscsi_session_nr);
42967+ session->sid = atomic_add_return_unchecked(1, &iscsi_session_nr);
42968
42969 if (target_id == ISCSI_MAX_TARGET) {
42970 id = ida_simple_get(&iscsi_sess_ida, 0, 0, GFP_KERNEL);
42971@@ -3944,7 +3944,7 @@ static __init int iscsi_transport_init(void)
42972 printk(KERN_INFO "Loading iSCSI transport class v%s.\n",
42973 ISCSI_TRANSPORT_VERSION);
42974
42975- atomic_set(&iscsi_session_nr, 0);
42976+ atomic_set_unchecked(&iscsi_session_nr, 0);
42977
42978 err = class_register(&iscsi_transport_class);
42979 if (err)
42980diff --git a/drivers/scsi/scsi_transport_srp.c b/drivers/scsi/scsi_transport_srp.c
42981index f379c7f..e8fc69c 100644
42982--- a/drivers/scsi/scsi_transport_srp.c
42983+++ b/drivers/scsi/scsi_transport_srp.c
42984@@ -33,7 +33,7 @@
42985 #include "scsi_transport_srp_internal.h"
42986
42987 struct srp_host_attrs {
42988- atomic_t next_port_id;
42989+ atomic_unchecked_t next_port_id;
42990 };
42991 #define to_srp_host_attrs(host) ((struct srp_host_attrs *)(host)->shost_data)
42992
42993@@ -61,7 +61,7 @@ static int srp_host_setup(struct transport_container *tc, struct device *dev,
42994 struct Scsi_Host *shost = dev_to_shost(dev);
42995 struct srp_host_attrs *srp_host = to_srp_host_attrs(shost);
42996
42997- atomic_set(&srp_host->next_port_id, 0);
42998+ atomic_set_unchecked(&srp_host->next_port_id, 0);
42999 return 0;
43000 }
43001
43002@@ -210,7 +210,7 @@ struct srp_rport *srp_rport_add(struct Scsi_Host *shost,
43003 memcpy(rport->port_id, ids->port_id, sizeof(rport->port_id));
43004 rport->roles = ids->roles;
43005
43006- id = atomic_inc_return(&to_srp_host_attrs(shost)->next_port_id);
43007+ id = atomic_inc_return_unchecked(&to_srp_host_attrs(shost)->next_port_id);
43008 dev_set_name(&rport->dev, "port-%d:%d", shost->host_no, id);
43009
43010 transport_setup_device(&rport->dev);
43011diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c
43012index 1b1125e..31a2019 100644
43013--- a/drivers/scsi/sd.c
43014+++ b/drivers/scsi/sd.c
43015@@ -2936,7 +2936,7 @@ static int sd_probe(struct device *dev)
43016 sdkp->disk = gd;
43017 sdkp->index = index;
43018 atomic_set(&sdkp->openers, 0);
43019- atomic_set(&sdkp->device->ioerr_cnt, 0);
43020+ atomic_set_unchecked(&sdkp->device->ioerr_cnt, 0);
43021
43022 if (!sdp->request_queue->rq_timeout) {
43023 if (sdp->type != TYPE_MOD)
43024diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
43025index df5e961..df6b97f 100644
43026--- a/drivers/scsi/sg.c
43027+++ b/drivers/scsi/sg.c
43028@@ -1102,7 +1102,7 @@ sg_ioctl(struct file *filp, unsigned int cmd_in, unsigned long arg)
43029 sdp->disk->disk_name,
43030 MKDEV(SCSI_GENERIC_MAJOR, sdp->index),
43031 NULL,
43032- (char *)arg);
43033+ (char __user *)arg);
43034 case BLKTRACESTART:
43035 return blk_trace_startstop(sdp->device->request_queue, 1);
43036 case BLKTRACESTOP:
43037diff --git a/drivers/spi/spi.c b/drivers/spi/spi.c
43038index 32b7bb1..2f1c4bd 100644
43039--- a/drivers/spi/spi.c
43040+++ b/drivers/spi/spi.c
43041@@ -1631,7 +1631,7 @@ int spi_bus_unlock(struct spi_master *master)
43042 EXPORT_SYMBOL_GPL(spi_bus_unlock);
43043
43044 /* portable code must never pass more than 32 bytes */
43045-#define SPI_BUFSIZ max(32,SMP_CACHE_BYTES)
43046+#define SPI_BUFSIZ max(32UL,SMP_CACHE_BYTES)
43047
43048 static u8 *buf;
43049
43050diff --git a/drivers/staging/media/solo6x10/solo6x10-core.c b/drivers/staging/media/solo6x10/solo6x10-core.c
43051index 3675020..e80d92c 100644
43052--- a/drivers/staging/media/solo6x10/solo6x10-core.c
43053+++ b/drivers/staging/media/solo6x10/solo6x10-core.c
43054@@ -434,7 +434,7 @@ static void solo_device_release(struct device *dev)
43055
43056 static int solo_sysfs_init(struct solo_dev *solo_dev)
43057 {
43058- struct bin_attribute *sdram_attr = &solo_dev->sdram_attr;
43059+ bin_attribute_no_const *sdram_attr = &solo_dev->sdram_attr;
43060 struct device *dev = &solo_dev->dev;
43061 const char *driver;
43062 int i;
43063diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
43064index 34afc16..ffe44dd 100644
43065--- a/drivers/staging/octeon/ethernet-rx.c
43066+++ b/drivers/staging/octeon/ethernet-rx.c
43067@@ -421,11 +421,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
43068 /* Increment RX stats for virtual ports */
43069 if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
43070 #ifdef CONFIG_64BIT
43071- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
43072- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
43073+ atomic64_add_unchecked(1, (atomic64_unchecked_t *)&priv->stats.rx_packets);
43074+ atomic64_add_unchecked(skb->len, (atomic64_unchecked_t *)&priv->stats.rx_bytes);
43075 #else
43076- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
43077- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
43078+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_packets);
43079+ atomic_add_unchecked(skb->len, (atomic_unchecked_t *)&priv->stats.rx_bytes);
43080 #endif
43081 }
43082 netif_receive_skb(skb);
43083@@ -437,9 +437,9 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
43084 dev->name);
43085 */
43086 #ifdef CONFIG_64BIT
43087- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
43088+ atomic64_unchecked_add(1, (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43089 #else
43090- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
43091+ atomic_add_unchecked(1, (atomic_unchecked_t *)&priv->stats.rx_dropped);
43092 #endif
43093 dev_kfree_skb_irq(skb);
43094 }
43095diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
43096index c3a90e7..023619a 100644
43097--- a/drivers/staging/octeon/ethernet.c
43098+++ b/drivers/staging/octeon/ethernet.c
43099@@ -252,11 +252,11 @@ static struct net_device_stats *cvm_oct_common_get_stats(struct net_device *dev)
43100 * since the RX tasklet also increments it.
43101 */
43102 #ifdef CONFIG_64BIT
43103- atomic64_add(rx_status.dropped_packets,
43104- (atomic64_t *)&priv->stats.rx_dropped);
43105+ atomic64_add_unchecked(rx_status.dropped_packets,
43106+ (atomic64_unchecked_t *)&priv->stats.rx_dropped);
43107 #else
43108- atomic_add(rx_status.dropped_packets,
43109- (atomic_t *)&priv->stats.rx_dropped);
43110+ atomic_add_unchecked(rx_status.dropped_packets,
43111+ (atomic_unchecked_t *)&priv->stats.rx_dropped);
43112 #endif
43113 }
43114
43115diff --git a/drivers/staging/rtl8712/rtl871x_io.h b/drivers/staging/rtl8712/rtl871x_io.h
43116index dc23395..cf7e9b1 100644
43117--- a/drivers/staging/rtl8712/rtl871x_io.h
43118+++ b/drivers/staging/rtl8712/rtl871x_io.h
43119@@ -108,7 +108,7 @@ struct _io_ops {
43120 u8 *pmem);
43121 u32 (*_write_port)(struct intf_hdl *pintfhdl, u32 addr, u32 cnt,
43122 u8 *pmem);
43123-};
43124+} __no_const;
43125
43126 struct io_req {
43127 struct list_head list;
43128diff --git a/drivers/staging/sbe-2t3e3/netdev.c b/drivers/staging/sbe-2t3e3/netdev.c
43129index 1f5088b..0e59820 100644
43130--- a/drivers/staging/sbe-2t3e3/netdev.c
43131+++ b/drivers/staging/sbe-2t3e3/netdev.c
43132@@ -51,7 +51,7 @@ static int t3e3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
43133 t3e3_if_config(sc, cmd_2t3e3, (char *)&param, &resp, &rlen);
43134
43135 if (rlen)
43136- if (copy_to_user(data, &resp, rlen))
43137+ if (rlen > sizeof resp || copy_to_user(data, &resp, rlen))
43138 return -EFAULT;
43139
43140 return 0;
43141diff --git a/drivers/staging/usbip/vhci.h b/drivers/staging/usbip/vhci.h
43142index a863a98..d272795 100644
43143--- a/drivers/staging/usbip/vhci.h
43144+++ b/drivers/staging/usbip/vhci.h
43145@@ -83,7 +83,7 @@ struct vhci_hcd {
43146 unsigned resuming:1;
43147 unsigned long re_timeout;
43148
43149- atomic_t seqnum;
43150+ atomic_unchecked_t seqnum;
43151
43152 /*
43153 * NOTE:
43154diff --git a/drivers/staging/usbip/vhci_hcd.c b/drivers/staging/usbip/vhci_hcd.c
43155index d7974cb..d78076b 100644
43156--- a/drivers/staging/usbip/vhci_hcd.c
43157+++ b/drivers/staging/usbip/vhci_hcd.c
43158@@ -441,7 +441,7 @@ static void vhci_tx_urb(struct urb *urb)
43159
43160 spin_lock(&vdev->priv_lock);
43161
43162- priv->seqnum = atomic_inc_return(&the_controller->seqnum);
43163+ priv->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43164 if (priv->seqnum == 0xffff)
43165 dev_info(&urb->dev->dev, "seqnum max\n");
43166
43167@@ -687,7 +687,7 @@ static int vhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
43168 return -ENOMEM;
43169 }
43170
43171- unlink->seqnum = atomic_inc_return(&the_controller->seqnum);
43172+ unlink->seqnum = atomic_inc_return_unchecked(&the_controller->seqnum);
43173 if (unlink->seqnum == 0xffff)
43174 pr_info("seqnum max\n");
43175
43176@@ -891,7 +891,7 @@ static int vhci_start(struct usb_hcd *hcd)
43177 vdev->rhport = rhport;
43178 }
43179
43180- atomic_set(&vhci->seqnum, 0);
43181+ atomic_set_unchecked(&vhci->seqnum, 0);
43182 spin_lock_init(&vhci->lock);
43183
43184 hcd->power_budget = 0; /* no limit */
43185diff --git a/drivers/staging/usbip/vhci_rx.c b/drivers/staging/usbip/vhci_rx.c
43186index d07fcb5..358e1e1 100644
43187--- a/drivers/staging/usbip/vhci_rx.c
43188+++ b/drivers/staging/usbip/vhci_rx.c
43189@@ -80,7 +80,7 @@ static void vhci_recv_ret_submit(struct vhci_device *vdev,
43190 if (!urb) {
43191 pr_err("cannot find a urb of seqnum %u\n", pdu->base.seqnum);
43192 pr_info("max seqnum %d\n",
43193- atomic_read(&the_controller->seqnum));
43194+ atomic_read_unchecked(&the_controller->seqnum));
43195 usbip_event_add(ud, VDEV_EVENT_ERROR_TCP);
43196 return;
43197 }
43198diff --git a/drivers/staging/vt6655/hostap.c b/drivers/staging/vt6655/hostap.c
43199index 8417c2f..ef5ebd6 100644
43200--- a/drivers/staging/vt6655/hostap.c
43201+++ b/drivers/staging/vt6655/hostap.c
43202@@ -69,14 +69,13 @@ static int msglevel = MSG_LEVEL_INFO;
43203 *
43204 */
43205
43206+static net_device_ops_no_const apdev_netdev_ops;
43207+
43208 static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43209 {
43210 PSDevice apdev_priv;
43211 struct net_device *dev = pDevice->dev;
43212 int ret;
43213- const struct net_device_ops apdev_netdev_ops = {
43214- .ndo_start_xmit = pDevice->tx_80211,
43215- };
43216
43217 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43218
43219@@ -88,6 +87,8 @@ static int hostap_enable_hostapd(PSDevice pDevice, int rtnl_locked)
43220 *apdev_priv = *pDevice;
43221 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43222
43223+ /* only half broken now */
43224+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43225 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43226
43227 pDevice->apdev->type = ARPHRD_IEEE80211;
43228diff --git a/drivers/staging/vt6656/hostap.c b/drivers/staging/vt6656/hostap.c
43229index c699a30..b90a5fd 100644
43230--- a/drivers/staging/vt6656/hostap.c
43231+++ b/drivers/staging/vt6656/hostap.c
43232@@ -60,14 +60,13 @@ static int msglevel =MSG_LEVEL_INFO;
43233 *
43234 */
43235
43236+static net_device_ops_no_const apdev_netdev_ops;
43237+
43238 static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
43239 {
43240 struct vnt_private *apdev_priv;
43241 struct net_device *dev = pDevice->dev;
43242 int ret;
43243- const struct net_device_ops apdev_netdev_ops = {
43244- .ndo_start_xmit = pDevice->tx_80211,
43245- };
43246
43247 DBG_PRT(MSG_LEVEL_DEBUG, KERN_INFO "%s: Enabling hostapd mode\n", dev->name);
43248
43249@@ -79,6 +78,8 @@ static int hostap_enable_hostapd(struct vnt_private *pDevice, int rtnl_locked)
43250 *apdev_priv = *pDevice;
43251 memcpy(pDevice->apdev->dev_addr, dev->dev_addr, ETH_ALEN);
43252
43253+ /* only half broken now */
43254+ apdev_netdev_ops.ndo_start_xmit = pDevice->tx_80211;
43255 pDevice->apdev->netdev_ops = &apdev_netdev_ops;
43256
43257 pDevice->apdev->type = ARPHRD_IEEE80211;
43258diff --git a/drivers/staging/zcache/tmem.c b/drivers/staging/zcache/tmem.c
43259index d7e51e4..d07eaab 100644
43260--- a/drivers/staging/zcache/tmem.c
43261+++ b/drivers/staging/zcache/tmem.c
43262@@ -51,7 +51,7 @@
43263 * A tmem host implementation must use this function to register callbacks
43264 * for memory allocation.
43265 */
43266-static struct tmem_hostops tmem_hostops;
43267+static tmem_hostops_no_const tmem_hostops;
43268
43269 static void tmem_objnode_tree_init(void);
43270
43271@@ -65,7 +65,7 @@ void tmem_register_hostops(struct tmem_hostops *m)
43272 * A tmem host implementation must use this function to register
43273 * callbacks for a page-accessible memory (PAM) implementation.
43274 */
43275-static struct tmem_pamops tmem_pamops;
43276+static tmem_pamops_no_const tmem_pamops;
43277
43278 void tmem_register_pamops(struct tmem_pamops *m)
43279 {
43280diff --git a/drivers/staging/zcache/tmem.h b/drivers/staging/zcache/tmem.h
43281index d128ce2..a43980c 100644
43282--- a/drivers/staging/zcache/tmem.h
43283+++ b/drivers/staging/zcache/tmem.h
43284@@ -226,6 +226,7 @@ struct tmem_pamops {
43285 int (*replace_in_obj)(void *, struct tmem_obj *);
43286 #endif
43287 };
43288+typedef struct tmem_pamops __no_const tmem_pamops_no_const;
43289 extern void tmem_register_pamops(struct tmem_pamops *m);
43290
43291 /* memory allocation methods provided by the host implementation */
43292@@ -235,6 +236,7 @@ struct tmem_hostops {
43293 struct tmem_objnode *(*objnode_alloc)(struct tmem_pool *);
43294 void (*objnode_free)(struct tmem_objnode *, struct tmem_pool *);
43295 };
43296+typedef struct tmem_hostops __no_const tmem_hostops_no_const;
43297 extern void tmem_register_hostops(struct tmem_hostops *m);
43298
43299 /* core tmem accessor functions */
43300diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
43301index 4630481..c26782a 100644
43302--- a/drivers/target/target_core_device.c
43303+++ b/drivers/target/target_core_device.c
43304@@ -1400,7 +1400,7 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
43305 spin_lock_init(&dev->se_port_lock);
43306 spin_lock_init(&dev->se_tmr_lock);
43307 spin_lock_init(&dev->qf_cmd_lock);
43308- atomic_set(&dev->dev_ordered_id, 0);
43309+ atomic_set_unchecked(&dev->dev_ordered_id, 0);
43310 INIT_LIST_HEAD(&dev->t10_wwn.t10_vpd_list);
43311 spin_lock_init(&dev->t10_wwn.t10_vpd_lock);
43312 INIT_LIST_HEAD(&dev->t10_pr.registration_list);
43313diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
43314index 21e3158..43c6004 100644
43315--- a/drivers/target/target_core_transport.c
43316+++ b/drivers/target/target_core_transport.c
43317@@ -1080,7 +1080,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
43318 * Used to determine when ORDERED commands should go from
43319 * Dormant to Active status.
43320 */
43321- cmd->se_ordered_id = atomic_inc_return(&dev->dev_ordered_id);
43322+ cmd->se_ordered_id = atomic_inc_return_unchecked(&dev->dev_ordered_id);
43323 smp_mb__after_atomic_inc();
43324 pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
43325 cmd->se_ordered_id, cmd->sam_task_attr,
43326diff --git a/drivers/tty/cyclades.c b/drivers/tty/cyclades.c
43327index 33f83fe..d80f8e1 100644
43328--- a/drivers/tty/cyclades.c
43329+++ b/drivers/tty/cyclades.c
43330@@ -1570,10 +1570,10 @@ static int cy_open(struct tty_struct *tty, struct file *filp)
43331 printk(KERN_DEBUG "cyc:cy_open ttyC%d, count = %d\n", info->line,
43332 info->port.count);
43333 #endif
43334- info->port.count++;
43335+ atomic_inc(&info->port.count);
43336 #ifdef CY_DEBUG_COUNT
43337 printk(KERN_DEBUG "cyc:cy_open (%d): incrementing count to %d\n",
43338- current->pid, info->port.count);
43339+ current->pid, atomic_read(&info->port.count));
43340 #endif
43341
43342 /*
43343@@ -3972,7 +3972,7 @@ static int cyclades_proc_show(struct seq_file *m, void *v)
43344 for (j = 0; j < cy_card[i].nports; j++) {
43345 info = &cy_card[i].ports[j];
43346
43347- if (info->port.count) {
43348+ if (atomic_read(&info->port.count)) {
43349 /* XXX is the ldisc num worth this? */
43350 struct tty_struct *tty;
43351 struct tty_ldisc *ld;
43352diff --git a/drivers/tty/hvc/hvc_console.c b/drivers/tty/hvc/hvc_console.c
43353index eb255e8..f637a57 100644
43354--- a/drivers/tty/hvc/hvc_console.c
43355+++ b/drivers/tty/hvc/hvc_console.c
43356@@ -338,7 +338,7 @@ static int hvc_open(struct tty_struct *tty, struct file * filp)
43357
43358 spin_lock_irqsave(&hp->port.lock, flags);
43359 /* Check and then increment for fast path open. */
43360- if (hp->port.count++ > 0) {
43361+ if (atomic_inc_return(&hp->port.count) > 1) {
43362 spin_unlock_irqrestore(&hp->port.lock, flags);
43363 hvc_kick();
43364 return 0;
43365@@ -388,7 +388,7 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43366
43367 spin_lock_irqsave(&hp->port.lock, flags);
43368
43369- if (--hp->port.count == 0) {
43370+ if (atomic_dec_return(&hp->port.count) == 0) {
43371 spin_unlock_irqrestore(&hp->port.lock, flags);
43372 /* We are done with the tty pointer now. */
43373 tty_port_tty_set(&hp->port, NULL);
43374@@ -406,9 +406,9 @@ static void hvc_close(struct tty_struct *tty, struct file * filp)
43375 */
43376 tty_wait_until_sent_from_close(tty, HVC_CLOSE_WAIT);
43377 } else {
43378- if (hp->port.count < 0)
43379+ if (atomic_read(&hp->port.count) < 0)
43380 printk(KERN_ERR "hvc_close %X: oops, count is %d\n",
43381- hp->vtermno, hp->port.count);
43382+ hp->vtermno, atomic_read(&hp->port.count));
43383 spin_unlock_irqrestore(&hp->port.lock, flags);
43384 }
43385 }
43386@@ -438,12 +438,12 @@ static void hvc_hangup(struct tty_struct *tty)
43387 * open->hangup case this can be called after the final close so prevent
43388 * that from happening for now.
43389 */
43390- if (hp->port.count <= 0) {
43391+ if (atomic_read(&hp->port.count) <= 0) {
43392 spin_unlock_irqrestore(&hp->port.lock, flags);
43393 return;
43394 }
43395
43396- hp->port.count = 0;
43397+ atomic_set(&hp->port.count, 0);
43398 spin_unlock_irqrestore(&hp->port.lock, flags);
43399 tty_port_tty_set(&hp->port, NULL);
43400
43401@@ -491,7 +491,7 @@ static int hvc_write(struct tty_struct *tty, const unsigned char *buf, int count
43402 return -EPIPE;
43403
43404 /* FIXME what's this (unprotected) check for? */
43405- if (hp->port.count <= 0)
43406+ if (atomic_read(&hp->port.count) <= 0)
43407 return -EIO;
43408
43409 spin_lock_irqsave(&hp->lock, flags);
43410diff --git a/drivers/tty/hvc/hvcs.c b/drivers/tty/hvc/hvcs.c
43411index 81e939e..95ead10 100644
43412--- a/drivers/tty/hvc/hvcs.c
43413+++ b/drivers/tty/hvc/hvcs.c
43414@@ -83,6 +83,7 @@
43415 #include <asm/hvcserver.h>
43416 #include <asm/uaccess.h>
43417 #include <asm/vio.h>
43418+#include <asm/local.h>
43419
43420 /*
43421 * 1.3.0 -> 1.3.1 In hvcs_open memset(..,0x00,..) instead of memset(..,0x3F,00).
43422@@ -416,7 +417,7 @@ static ssize_t hvcs_vterm_state_store(struct device *dev, struct device_attribut
43423
43424 spin_lock_irqsave(&hvcsd->lock, flags);
43425
43426- if (hvcsd->port.count > 0) {
43427+ if (atomic_read(&hvcsd->port.count) > 0) {
43428 spin_unlock_irqrestore(&hvcsd->lock, flags);
43429 printk(KERN_INFO "HVCS: vterm state unchanged. "
43430 "The hvcs device node is still in use.\n");
43431@@ -1127,7 +1128,7 @@ static int hvcs_install(struct tty_driver *driver, struct tty_struct *tty)
43432 }
43433 }
43434
43435- hvcsd->port.count = 0;
43436+ atomic_set(&hvcsd->port.count, 0);
43437 hvcsd->port.tty = tty;
43438 tty->driver_data = hvcsd;
43439
43440@@ -1180,7 +1181,7 @@ static int hvcs_open(struct tty_struct *tty, struct file *filp)
43441 unsigned long flags;
43442
43443 spin_lock_irqsave(&hvcsd->lock, flags);
43444- hvcsd->port.count++;
43445+ atomic_inc(&hvcsd->port.count);
43446 hvcsd->todo_mask |= HVCS_SCHED_READ;
43447 spin_unlock_irqrestore(&hvcsd->lock, flags);
43448
43449@@ -1216,7 +1217,7 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43450 hvcsd = tty->driver_data;
43451
43452 spin_lock_irqsave(&hvcsd->lock, flags);
43453- if (--hvcsd->port.count == 0) {
43454+ if (atomic_dec_and_test(&hvcsd->port.count)) {
43455
43456 vio_disable_interrupts(hvcsd->vdev);
43457
43458@@ -1241,10 +1242,10 @@ static void hvcs_close(struct tty_struct *tty, struct file *filp)
43459
43460 free_irq(irq, hvcsd);
43461 return;
43462- } else if (hvcsd->port.count < 0) {
43463+ } else if (atomic_read(&hvcsd->port.count) < 0) {
43464 printk(KERN_ERR "HVCS: vty-server@%X open_count: %d"
43465 " is missmanaged.\n",
43466- hvcsd->vdev->unit_address, hvcsd->port.count);
43467+ hvcsd->vdev->unit_address, atomic_read(&hvcsd->port.count));
43468 }
43469
43470 spin_unlock_irqrestore(&hvcsd->lock, flags);
43471@@ -1266,7 +1267,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43472
43473 spin_lock_irqsave(&hvcsd->lock, flags);
43474 /* Preserve this so that we know how many kref refs to put */
43475- temp_open_count = hvcsd->port.count;
43476+ temp_open_count = atomic_read(&hvcsd->port.count);
43477
43478 /*
43479 * Don't kref put inside the spinlock because the destruction
43480@@ -1281,7 +1282,7 @@ static void hvcs_hangup(struct tty_struct * tty)
43481 tty->driver_data = NULL;
43482 hvcsd->port.tty = NULL;
43483
43484- hvcsd->port.count = 0;
43485+ atomic_set(&hvcsd->port.count, 0);
43486
43487 /* This will drop any buffered data on the floor which is OK in a hangup
43488 * scenario. */
43489@@ -1352,7 +1353,7 @@ static int hvcs_write(struct tty_struct *tty,
43490 * the middle of a write operation? This is a crummy place to do this
43491 * but we want to keep it all in the spinlock.
43492 */
43493- if (hvcsd->port.count <= 0) {
43494+ if (atomic_read(&hvcsd->port.count) <= 0) {
43495 spin_unlock_irqrestore(&hvcsd->lock, flags);
43496 return -ENODEV;
43497 }
43498@@ -1426,7 +1427,7 @@ static int hvcs_write_room(struct tty_struct *tty)
43499 {
43500 struct hvcs_struct *hvcsd = tty->driver_data;
43501
43502- if (!hvcsd || hvcsd->port.count <= 0)
43503+ if (!hvcsd || atomic_read(&hvcsd->port.count) <= 0)
43504 return 0;
43505
43506 return HVCS_BUFF_LEN - hvcsd->chars_in_buffer;
43507diff --git a/drivers/tty/ipwireless/tty.c b/drivers/tty/ipwireless/tty.c
43508index 8fd72ff..34a0bed 100644
43509--- a/drivers/tty/ipwireless/tty.c
43510+++ b/drivers/tty/ipwireless/tty.c
43511@@ -29,6 +29,7 @@
43512 #include <linux/tty_driver.h>
43513 #include <linux/tty_flip.h>
43514 #include <linux/uaccess.h>
43515+#include <asm/local.h>
43516
43517 #include "tty.h"
43518 #include "network.h"
43519@@ -99,10 +100,10 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43520 mutex_unlock(&tty->ipw_tty_mutex);
43521 return -ENODEV;
43522 }
43523- if (tty->port.count == 0)
43524+ if (atomic_read(&tty->port.count) == 0)
43525 tty->tx_bytes_queued = 0;
43526
43527- tty->port.count++;
43528+ atomic_inc(&tty->port.count);
43529
43530 tty->port.tty = linux_tty;
43531 linux_tty->driver_data = tty;
43532@@ -118,9 +119,7 @@ static int ipw_open(struct tty_struct *linux_tty, struct file *filp)
43533
43534 static void do_ipw_close(struct ipw_tty *tty)
43535 {
43536- tty->port.count--;
43537-
43538- if (tty->port.count == 0) {
43539+ if (atomic_dec_return(&tty->port.count) == 0) {
43540 struct tty_struct *linux_tty = tty->port.tty;
43541
43542 if (linux_tty != NULL) {
43543@@ -141,7 +140,7 @@ static void ipw_hangup(struct tty_struct *linux_tty)
43544 return;
43545
43546 mutex_lock(&tty->ipw_tty_mutex);
43547- if (tty->port.count == 0) {
43548+ if (atomic_read(&tty->port.count) == 0) {
43549 mutex_unlock(&tty->ipw_tty_mutex);
43550 return;
43551 }
43552@@ -164,7 +163,7 @@ void ipwireless_tty_received(struct ipw_tty *tty, unsigned char *data,
43553
43554 mutex_lock(&tty->ipw_tty_mutex);
43555
43556- if (!tty->port.count) {
43557+ if (!atomic_read(&tty->port.count)) {
43558 mutex_unlock(&tty->ipw_tty_mutex);
43559 return;
43560 }
43561@@ -206,7 +205,7 @@ static int ipw_write(struct tty_struct *linux_tty,
43562 return -ENODEV;
43563
43564 mutex_lock(&tty->ipw_tty_mutex);
43565- if (!tty->port.count) {
43566+ if (!atomic_read(&tty->port.count)) {
43567 mutex_unlock(&tty->ipw_tty_mutex);
43568 return -EINVAL;
43569 }
43570@@ -246,7 +245,7 @@ static int ipw_write_room(struct tty_struct *linux_tty)
43571 if (!tty)
43572 return -ENODEV;
43573
43574- if (!tty->port.count)
43575+ if (!atomic_read(&tty->port.count))
43576 return -EINVAL;
43577
43578 room = IPWIRELESS_TX_QUEUE_SIZE - tty->tx_bytes_queued;
43579@@ -288,7 +287,7 @@ static int ipw_chars_in_buffer(struct tty_struct *linux_tty)
43580 if (!tty)
43581 return 0;
43582
43583- if (!tty->port.count)
43584+ if (!atomic_read(&tty->port.count))
43585 return 0;
43586
43587 return tty->tx_bytes_queued;
43588@@ -369,7 +368,7 @@ static int ipw_tiocmget(struct tty_struct *linux_tty)
43589 if (!tty)
43590 return -ENODEV;
43591
43592- if (!tty->port.count)
43593+ if (!atomic_read(&tty->port.count))
43594 return -EINVAL;
43595
43596 return get_control_lines(tty);
43597@@ -385,7 +384,7 @@ ipw_tiocmset(struct tty_struct *linux_tty,
43598 if (!tty)
43599 return -ENODEV;
43600
43601- if (!tty->port.count)
43602+ if (!atomic_read(&tty->port.count))
43603 return -EINVAL;
43604
43605 return set_control_lines(tty, set, clear);
43606@@ -399,7 +398,7 @@ static int ipw_ioctl(struct tty_struct *linux_tty,
43607 if (!tty)
43608 return -ENODEV;
43609
43610- if (!tty->port.count)
43611+ if (!atomic_read(&tty->port.count))
43612 return -EINVAL;
43613
43614 /* FIXME: Exactly how is the tty object locked here .. */
43615@@ -555,7 +554,7 @@ void ipwireless_tty_free(struct ipw_tty *tty)
43616 * are gone */
43617 mutex_lock(&ttyj->ipw_tty_mutex);
43618 }
43619- while (ttyj->port.count)
43620+ while (atomic_read(&ttyj->port.count))
43621 do_ipw_close(ttyj);
43622 ipwireless_disassociate_network_ttys(network,
43623 ttyj->channel_idx);
43624diff --git a/drivers/tty/moxa.c b/drivers/tty/moxa.c
43625index 1deaca4..c8582d4 100644
43626--- a/drivers/tty/moxa.c
43627+++ b/drivers/tty/moxa.c
43628@@ -1189,7 +1189,7 @@ static int moxa_open(struct tty_struct *tty, struct file *filp)
43629 }
43630
43631 ch = &brd->ports[port % MAX_PORTS_PER_BOARD];
43632- ch->port.count++;
43633+ atomic_inc(&ch->port.count);
43634 tty->driver_data = ch;
43635 tty_port_tty_set(&ch->port, tty);
43636 mutex_lock(&ch->port.mutex);
43637diff --git a/drivers/tty/n_gsm.c b/drivers/tty/n_gsm.c
43638index 6422390..49003ac8 100644
43639--- a/drivers/tty/n_gsm.c
43640+++ b/drivers/tty/n_gsm.c
43641@@ -1632,7 +1632,7 @@ static struct gsm_dlci *gsm_dlci_alloc(struct gsm_mux *gsm, int addr)
43642 spin_lock_init(&dlci->lock);
43643 mutex_init(&dlci->mutex);
43644 dlci->fifo = &dlci->_fifo;
43645- if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL) < 0) {
43646+ if (kfifo_alloc(&dlci->_fifo, 4096, GFP_KERNEL)) {
43647 kfree(dlci);
43648 return NULL;
43649 }
43650@@ -2932,7 +2932,7 @@ static int gsmtty_open(struct tty_struct *tty, struct file *filp)
43651 struct gsm_dlci *dlci = tty->driver_data;
43652 struct tty_port *port = &dlci->port;
43653
43654- port->count++;
43655+ atomic_inc(&port->count);
43656 dlci_get(dlci);
43657 dlci_get(dlci->gsm->dlci[0]);
43658 mux_get(dlci->gsm);
43659diff --git a/drivers/tty/n_tty.c b/drivers/tty/n_tty.c
43660index 6c7fe90..9241dab 100644
43661--- a/drivers/tty/n_tty.c
43662+++ b/drivers/tty/n_tty.c
43663@@ -2203,6 +2203,7 @@ void n_tty_inherit_ops(struct tty_ldisc_ops *ops)
43664 {
43665 *ops = tty_ldisc_N_TTY;
43666 ops->owner = NULL;
43667- ops->refcount = ops->flags = 0;
43668+ atomic_set(&ops->refcount, 0);
43669+ ops->flags = 0;
43670 }
43671 EXPORT_SYMBOL_GPL(n_tty_inherit_ops);
43672diff --git a/drivers/tty/pty.c b/drivers/tty/pty.c
43673index abfd990..5ab5da9 100644
43674--- a/drivers/tty/pty.c
43675+++ b/drivers/tty/pty.c
43676@@ -796,8 +796,10 @@ static void __init unix98_pty_init(void)
43677 panic("Couldn't register Unix98 pts driver");
43678
43679 /* Now create the /dev/ptmx special device */
43680+ pax_open_kernel();
43681 tty_default_fops(&ptmx_fops);
43682- ptmx_fops.open = ptmx_open;
43683+ *(void **)&ptmx_fops.open = ptmx_open;
43684+ pax_close_kernel();
43685
43686 cdev_init(&ptmx_cdev, &ptmx_fops);
43687 if (cdev_add(&ptmx_cdev, MKDEV(TTYAUX_MAJOR, 2), 1) ||
43688diff --git a/drivers/tty/rocket.c b/drivers/tty/rocket.c
43689index 354564e..fe50d9a 100644
43690--- a/drivers/tty/rocket.c
43691+++ b/drivers/tty/rocket.c
43692@@ -914,7 +914,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43693 tty->driver_data = info;
43694 tty_port_tty_set(port, tty);
43695
43696- if (port->count++ == 0) {
43697+ if (atomic_inc_return(&port->count) == 1) {
43698 atomic_inc(&rp_num_ports_open);
43699
43700 #ifdef ROCKET_DEBUG_OPEN
43701@@ -923,7 +923,7 @@ static int rp_open(struct tty_struct *tty, struct file *filp)
43702 #endif
43703 }
43704 #ifdef ROCKET_DEBUG_OPEN
43705- printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, info->port.count);
43706+ printk(KERN_INFO "rp_open ttyR%d, count=%d\n", info->line, atomic-read(&info->port.count));
43707 #endif
43708
43709 /*
43710@@ -1515,7 +1515,7 @@ static void rp_hangup(struct tty_struct *tty)
43711 spin_unlock_irqrestore(&info->port.lock, flags);
43712 return;
43713 }
43714- if (info->port.count)
43715+ if (atomic_read(&info->port.count))
43716 atomic_dec(&rp_num_ports_open);
43717 clear_bit((info->aiop * 8) + info->chan, (void *) &xmit_flags[info->board]);
43718 spin_unlock_irqrestore(&info->port.lock, flags);
43719diff --git a/drivers/tty/serial/kgdboc.c b/drivers/tty/serial/kgdboc.c
43720index 1002054..dd644a8 100644
43721--- a/drivers/tty/serial/kgdboc.c
43722+++ b/drivers/tty/serial/kgdboc.c
43723@@ -24,8 +24,9 @@
43724 #define MAX_CONFIG_LEN 40
43725
43726 static struct kgdb_io kgdboc_io_ops;
43727+static struct kgdb_io kgdboc_io_ops_console;
43728
43729-/* -1 = init not run yet, 0 = unconfigured, 1 = configured. */
43730+/* -1 = init not run yet, 0 = unconfigured, 1/2 = configured. */
43731 static int configured = -1;
43732
43733 static char config[MAX_CONFIG_LEN];
43734@@ -151,6 +152,8 @@ static void cleanup_kgdboc(void)
43735 kgdboc_unregister_kbd();
43736 if (configured == 1)
43737 kgdb_unregister_io_module(&kgdboc_io_ops);
43738+ else if (configured == 2)
43739+ kgdb_unregister_io_module(&kgdboc_io_ops_console);
43740 }
43741
43742 static int configure_kgdboc(void)
43743@@ -160,13 +163,13 @@ static int configure_kgdboc(void)
43744 int err;
43745 char *cptr = config;
43746 struct console *cons;
43747+ int is_console = 0;
43748
43749 err = kgdboc_option_setup(config);
43750 if (err || !strlen(config) || isspace(config[0]))
43751 goto noconfig;
43752
43753 err = -ENODEV;
43754- kgdboc_io_ops.is_console = 0;
43755 kgdb_tty_driver = NULL;
43756
43757 kgdboc_use_kms = 0;
43758@@ -187,7 +190,7 @@ static int configure_kgdboc(void)
43759 int idx;
43760 if (cons->device && cons->device(cons, &idx) == p &&
43761 idx == tty_line) {
43762- kgdboc_io_ops.is_console = 1;
43763+ is_console = 1;
43764 break;
43765 }
43766 cons = cons->next;
43767@@ -197,7 +200,13 @@ static int configure_kgdboc(void)
43768 kgdb_tty_line = tty_line;
43769
43770 do_register:
43771- err = kgdb_register_io_module(&kgdboc_io_ops);
43772+ if (is_console) {
43773+ err = kgdb_register_io_module(&kgdboc_io_ops_console);
43774+ configured = 2;
43775+ } else {
43776+ err = kgdb_register_io_module(&kgdboc_io_ops);
43777+ configured = 1;
43778+ }
43779 if (err)
43780 goto noconfig;
43781
43782@@ -205,8 +214,6 @@ do_register:
43783 if (err)
43784 goto nmi_con_failed;
43785
43786- configured = 1;
43787-
43788 return 0;
43789
43790 nmi_con_failed:
43791@@ -223,7 +230,7 @@ noconfig:
43792 static int __init init_kgdboc(void)
43793 {
43794 /* Already configured? */
43795- if (configured == 1)
43796+ if (configured >= 1)
43797 return 0;
43798
43799 return configure_kgdboc();
43800@@ -272,7 +279,7 @@ static int param_set_kgdboc_var(const char *kmessage, struct kernel_param *kp)
43801 if (config[len - 1] == '\n')
43802 config[len - 1] = '\0';
43803
43804- if (configured == 1)
43805+ if (configured >= 1)
43806 cleanup_kgdboc();
43807
43808 /* Go and configure with the new params. */
43809@@ -312,6 +319,15 @@ static struct kgdb_io kgdboc_io_ops = {
43810 .post_exception = kgdboc_post_exp_handler,
43811 };
43812
43813+static struct kgdb_io kgdboc_io_ops_console = {
43814+ .name = "kgdboc",
43815+ .read_char = kgdboc_get_char,
43816+ .write_char = kgdboc_put_char,
43817+ .pre_exception = kgdboc_pre_exp_handler,
43818+ .post_exception = kgdboc_post_exp_handler,
43819+ .is_console = 1
43820+};
43821+
43822 #ifdef CONFIG_KGDB_SERIAL_CONSOLE
43823 /* This is only available if kgdboc is a built in for early debugging */
43824 static int __init kgdboc_early_init(char *opt)
43825diff --git a/drivers/tty/serial/samsung.c b/drivers/tty/serial/samsung.c
43826index 0c8a9fa..234a95f 100644
43827--- a/drivers/tty/serial/samsung.c
43828+++ b/drivers/tty/serial/samsung.c
43829@@ -453,11 +453,16 @@ static void s3c24xx_serial_shutdown(struct uart_port *port)
43830 }
43831 }
43832
43833+static int s3c64xx_serial_startup(struct uart_port *port);
43834 static int s3c24xx_serial_startup(struct uart_port *port)
43835 {
43836 struct s3c24xx_uart_port *ourport = to_ourport(port);
43837 int ret;
43838
43839+ /* Startup sequence is different for s3c64xx and higher SoC's */
43840+ if (s3c24xx_serial_has_interrupt_mask(port))
43841+ return s3c64xx_serial_startup(port);
43842+
43843 dbg("s3c24xx_serial_startup: port=%p (%08lx,%p)\n",
43844 port->mapbase, port->membase);
43845
43846@@ -1124,10 +1129,6 @@ static int s3c24xx_serial_init_port(struct s3c24xx_uart_port *ourport,
43847 /* setup info for port */
43848 port->dev = &platdev->dev;
43849
43850- /* Startup sequence is different for s3c64xx and higher SoC's */
43851- if (s3c24xx_serial_has_interrupt_mask(port))
43852- s3c24xx_serial_ops.startup = s3c64xx_serial_startup;
43853-
43854 port->uartclk = 1;
43855
43856 if (cfg->uart_flags & UPF_CONS_FLOW) {
43857diff --git a/drivers/tty/serial/serial_core.c b/drivers/tty/serial/serial_core.c
43858index f87dbfd..42ad4b1 100644
43859--- a/drivers/tty/serial/serial_core.c
43860+++ b/drivers/tty/serial/serial_core.c
43861@@ -1454,7 +1454,7 @@ static void uart_hangup(struct tty_struct *tty)
43862 uart_flush_buffer(tty);
43863 uart_shutdown(tty, state);
43864 spin_lock_irqsave(&port->lock, flags);
43865- port->count = 0;
43866+ atomic_set(&port->count, 0);
43867 clear_bit(ASYNCB_NORMAL_ACTIVE, &port->flags);
43868 spin_unlock_irqrestore(&port->lock, flags);
43869 tty_port_tty_set(port, NULL);
43870@@ -1550,7 +1550,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43871 goto end;
43872 }
43873
43874- port->count++;
43875+ atomic_inc(&port->count);
43876 if (!state->uart_port || state->uart_port->flags & UPF_DEAD) {
43877 retval = -ENXIO;
43878 goto err_dec_count;
43879@@ -1578,7 +1578,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43880 /*
43881 * Make sure the device is in D0 state.
43882 */
43883- if (port->count == 1)
43884+ if (atomic_read(&port->count) == 1)
43885 uart_change_pm(state, UART_PM_STATE_ON);
43886
43887 /*
43888@@ -1596,7 +1596,7 @@ static int uart_open(struct tty_struct *tty, struct file *filp)
43889 end:
43890 return retval;
43891 err_dec_count:
43892- port->count--;
43893+ atomic_inc(&port->count);
43894 mutex_unlock(&port->mutex);
43895 goto end;
43896 }
43897diff --git a/drivers/tty/synclink.c b/drivers/tty/synclink.c
43898index 8eaf1ab..85c030d 100644
43899--- a/drivers/tty/synclink.c
43900+++ b/drivers/tty/synclink.c
43901@@ -3090,7 +3090,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43902
43903 if (debug_level >= DEBUG_LEVEL_INFO)
43904 printk("%s(%d):mgsl_close(%s) entry, count=%d\n",
43905- __FILE__,__LINE__, info->device_name, info->port.count);
43906+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
43907
43908 if (tty_port_close_start(&info->port, tty, filp) == 0)
43909 goto cleanup;
43910@@ -3108,7 +3108,7 @@ static void mgsl_close(struct tty_struct *tty, struct file * filp)
43911 cleanup:
43912 if (debug_level >= DEBUG_LEVEL_INFO)
43913 printk("%s(%d):mgsl_close(%s) exit, count=%d\n", __FILE__,__LINE__,
43914- tty->driver->name, info->port.count);
43915+ tty->driver->name, atomic_read(&info->port.count));
43916
43917 } /* end of mgsl_close() */
43918
43919@@ -3207,8 +3207,8 @@ static void mgsl_hangup(struct tty_struct *tty)
43920
43921 mgsl_flush_buffer(tty);
43922 shutdown(info);
43923-
43924- info->port.count = 0;
43925+
43926+ atomic_set(&info->port.count, 0);
43927 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
43928 info->port.tty = NULL;
43929
43930@@ -3297,12 +3297,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43931
43932 if (debug_level >= DEBUG_LEVEL_INFO)
43933 printk("%s(%d):block_til_ready before block on %s count=%d\n",
43934- __FILE__,__LINE__, tty->driver->name, port->count );
43935+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43936
43937 spin_lock_irqsave(&info->irq_spinlock, flags);
43938 if (!tty_hung_up_p(filp)) {
43939 extra_count = true;
43940- port->count--;
43941+ atomic_dec(&port->count);
43942 }
43943 spin_unlock_irqrestore(&info->irq_spinlock, flags);
43944 port->blocked_open++;
43945@@ -3331,7 +3331,7 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43946
43947 if (debug_level >= DEBUG_LEVEL_INFO)
43948 printk("%s(%d):block_til_ready blocking on %s count=%d\n",
43949- __FILE__,__LINE__, tty->driver->name, port->count );
43950+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43951
43952 tty_unlock(tty);
43953 schedule();
43954@@ -3343,12 +3343,12 @@ static int block_til_ready(struct tty_struct *tty, struct file * filp,
43955
43956 /* FIXME: Racy on hangup during close wait */
43957 if (extra_count)
43958- port->count++;
43959+ atomic_inc(&port->count);
43960 port->blocked_open--;
43961
43962 if (debug_level >= DEBUG_LEVEL_INFO)
43963 printk("%s(%d):block_til_ready after blocking on %s count=%d\n",
43964- __FILE__,__LINE__, tty->driver->name, port->count );
43965+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
43966
43967 if (!retval)
43968 port->flags |= ASYNC_NORMAL_ACTIVE;
43969@@ -3400,7 +3400,7 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43970
43971 if (debug_level >= DEBUG_LEVEL_INFO)
43972 printk("%s(%d):mgsl_open(%s), old ref count = %d\n",
43973- __FILE__,__LINE__,tty->driver->name, info->port.count);
43974+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
43975
43976 /* If port is closing, signal caller to try again */
43977 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
43978@@ -3419,10 +3419,10 @@ static int mgsl_open(struct tty_struct *tty, struct file * filp)
43979 spin_unlock_irqrestore(&info->netlock, flags);
43980 goto cleanup;
43981 }
43982- info->port.count++;
43983+ atomic_inc(&info->port.count);
43984 spin_unlock_irqrestore(&info->netlock, flags);
43985
43986- if (info->port.count == 1) {
43987+ if (atomic_read(&info->port.count) == 1) {
43988 /* 1st open on this device, init hardware */
43989 retval = startup(info);
43990 if (retval < 0)
43991@@ -3446,8 +3446,8 @@ cleanup:
43992 if (retval) {
43993 if (tty->count == 1)
43994 info->port.tty = NULL; /* tty layer will release tty struct */
43995- if(info->port.count)
43996- info->port.count--;
43997+ if (atomic_read(&info->port.count))
43998+ atomic_dec(&info->port.count);
43999 }
44000
44001 return retval;
44002@@ -7665,7 +7665,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44003 unsigned short new_crctype;
44004
44005 /* return error if TTY interface open */
44006- if (info->port.count)
44007+ if (atomic_read(&info->port.count))
44008 return -EBUSY;
44009
44010 switch (encoding)
44011@@ -7760,7 +7760,7 @@ static int hdlcdev_open(struct net_device *dev)
44012
44013 /* arbitrate between network and tty opens */
44014 spin_lock_irqsave(&info->netlock, flags);
44015- if (info->port.count != 0 || info->netcount != 0) {
44016+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44017 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44018 spin_unlock_irqrestore(&info->netlock, flags);
44019 return -EBUSY;
44020@@ -7846,7 +7846,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44021 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44022
44023 /* return error if TTY interface open */
44024- if (info->port.count)
44025+ if (atomic_read(&info->port.count))
44026 return -EBUSY;
44027
44028 if (cmd != SIOCWANDEV)
44029diff --git a/drivers/tty/synclink_gt.c b/drivers/tty/synclink_gt.c
44030index 1abf946..1ee34fc 100644
44031--- a/drivers/tty/synclink_gt.c
44032+++ b/drivers/tty/synclink_gt.c
44033@@ -670,7 +670,7 @@ static int open(struct tty_struct *tty, struct file *filp)
44034 tty->driver_data = info;
44035 info->port.tty = tty;
44036
44037- DBGINFO(("%s open, old ref count = %d\n", info->device_name, info->port.count));
44038+ DBGINFO(("%s open, old ref count = %d\n", info->device_name, atomic_read(&info->port.count)));
44039
44040 /* If port is closing, signal caller to try again */
44041 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44042@@ -691,10 +691,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44043 mutex_unlock(&info->port.mutex);
44044 goto cleanup;
44045 }
44046- info->port.count++;
44047+ atomic_inc(&info->port.count);
44048 spin_unlock_irqrestore(&info->netlock, flags);
44049
44050- if (info->port.count == 1) {
44051+ if (atomic_read(&info->port.count) == 1) {
44052 /* 1st open on this device, init hardware */
44053 retval = startup(info);
44054 if (retval < 0) {
44055@@ -715,8 +715,8 @@ cleanup:
44056 if (retval) {
44057 if (tty->count == 1)
44058 info->port.tty = NULL; /* tty layer will release tty struct */
44059- if(info->port.count)
44060- info->port.count--;
44061+ if(atomic_read(&info->port.count))
44062+ atomic_dec(&info->port.count);
44063 }
44064
44065 DBGINFO(("%s open rc=%d\n", info->device_name, retval));
44066@@ -729,7 +729,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44067
44068 if (sanity_check(info, tty->name, "close"))
44069 return;
44070- DBGINFO(("%s close entry, count=%d\n", info->device_name, info->port.count));
44071+ DBGINFO(("%s close entry, count=%d\n", info->device_name, atomic_read(&info->port.count)));
44072
44073 if (tty_port_close_start(&info->port, tty, filp) == 0)
44074 goto cleanup;
44075@@ -746,7 +746,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44076 tty_port_close_end(&info->port, tty);
44077 info->port.tty = NULL;
44078 cleanup:
44079- DBGINFO(("%s close exit, count=%d\n", tty->driver->name, info->port.count));
44080+ DBGINFO(("%s close exit, count=%d\n", tty->driver->name, atomic_read(&info->port.count)));
44081 }
44082
44083 static void hangup(struct tty_struct *tty)
44084@@ -764,7 +764,7 @@ static void hangup(struct tty_struct *tty)
44085 shutdown(info);
44086
44087 spin_lock_irqsave(&info->port.lock, flags);
44088- info->port.count = 0;
44089+ atomic_set(&info->port.count, 0);
44090 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44091 info->port.tty = NULL;
44092 spin_unlock_irqrestore(&info->port.lock, flags);
44093@@ -1449,7 +1449,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44094 unsigned short new_crctype;
44095
44096 /* return error if TTY interface open */
44097- if (info->port.count)
44098+ if (atomic_read(&info->port.count))
44099 return -EBUSY;
44100
44101 DBGINFO(("%s hdlcdev_attach\n", info->device_name));
44102@@ -1544,7 +1544,7 @@ static int hdlcdev_open(struct net_device *dev)
44103
44104 /* arbitrate between network and tty opens */
44105 spin_lock_irqsave(&info->netlock, flags);
44106- if (info->port.count != 0 || info->netcount != 0) {
44107+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44108 DBGINFO(("%s hdlc_open busy\n", dev->name));
44109 spin_unlock_irqrestore(&info->netlock, flags);
44110 return -EBUSY;
44111@@ -1629,7 +1629,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44112 DBGINFO(("%s hdlcdev_ioctl\n", dev->name));
44113
44114 /* return error if TTY interface open */
44115- if (info->port.count)
44116+ if (atomic_read(&info->port.count))
44117 return -EBUSY;
44118
44119 if (cmd != SIOCWANDEV)
44120@@ -2413,7 +2413,7 @@ static irqreturn_t slgt_interrupt(int dummy, void *dev_id)
44121 if (port == NULL)
44122 continue;
44123 spin_lock(&port->lock);
44124- if ((port->port.count || port->netcount) &&
44125+ if ((atomic_read(&port->port.count) || port->netcount) &&
44126 port->pending_bh && !port->bh_running &&
44127 !port->bh_requested) {
44128 DBGISR(("%s bh queued\n", port->device_name));
44129@@ -3302,7 +3302,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44130 spin_lock_irqsave(&info->lock, flags);
44131 if (!tty_hung_up_p(filp)) {
44132 extra_count = true;
44133- port->count--;
44134+ atomic_dec(&port->count);
44135 }
44136 spin_unlock_irqrestore(&info->lock, flags);
44137 port->blocked_open++;
44138@@ -3339,7 +3339,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44139 remove_wait_queue(&port->open_wait, &wait);
44140
44141 if (extra_count)
44142- port->count++;
44143+ atomic_inc(&port->count);
44144 port->blocked_open--;
44145
44146 if (!retval)
44147diff --git a/drivers/tty/synclinkmp.c b/drivers/tty/synclinkmp.c
44148index ff17138..e38b41e 100644
44149--- a/drivers/tty/synclinkmp.c
44150+++ b/drivers/tty/synclinkmp.c
44151@@ -750,7 +750,7 @@ static int open(struct tty_struct *tty, struct file *filp)
44152
44153 if (debug_level >= DEBUG_LEVEL_INFO)
44154 printk("%s(%d):%s open(), old ref count = %d\n",
44155- __FILE__,__LINE__,tty->driver->name, info->port.count);
44156+ __FILE__,__LINE__,tty->driver->name, atomic_read(&info->port.count));
44157
44158 /* If port is closing, signal caller to try again */
44159 if (tty_hung_up_p(filp) || info->port.flags & ASYNC_CLOSING){
44160@@ -769,10 +769,10 @@ static int open(struct tty_struct *tty, struct file *filp)
44161 spin_unlock_irqrestore(&info->netlock, flags);
44162 goto cleanup;
44163 }
44164- info->port.count++;
44165+ atomic_inc(&info->port.count);
44166 spin_unlock_irqrestore(&info->netlock, flags);
44167
44168- if (info->port.count == 1) {
44169+ if (atomic_read(&info->port.count) == 1) {
44170 /* 1st open on this device, init hardware */
44171 retval = startup(info);
44172 if (retval < 0)
44173@@ -796,8 +796,8 @@ cleanup:
44174 if (retval) {
44175 if (tty->count == 1)
44176 info->port.tty = NULL; /* tty layer will release tty struct */
44177- if(info->port.count)
44178- info->port.count--;
44179+ if(atomic_read(&info->port.count))
44180+ atomic_dec(&info->port.count);
44181 }
44182
44183 return retval;
44184@@ -815,7 +815,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44185
44186 if (debug_level >= DEBUG_LEVEL_INFO)
44187 printk("%s(%d):%s close() entry, count=%d\n",
44188- __FILE__,__LINE__, info->device_name, info->port.count);
44189+ __FILE__,__LINE__, info->device_name, atomic_read(&info->port.count));
44190
44191 if (tty_port_close_start(&info->port, tty, filp) == 0)
44192 goto cleanup;
44193@@ -834,7 +834,7 @@ static void close(struct tty_struct *tty, struct file *filp)
44194 cleanup:
44195 if (debug_level >= DEBUG_LEVEL_INFO)
44196 printk("%s(%d):%s close() exit, count=%d\n", __FILE__,__LINE__,
44197- tty->driver->name, info->port.count);
44198+ tty->driver->name, atomic_read(&info->port.count));
44199 }
44200
44201 /* Called by tty_hangup() when a hangup is signaled.
44202@@ -857,7 +857,7 @@ static void hangup(struct tty_struct *tty)
44203 shutdown(info);
44204
44205 spin_lock_irqsave(&info->port.lock, flags);
44206- info->port.count = 0;
44207+ atomic_set(&info->port.count, 0);
44208 info->port.flags &= ~ASYNC_NORMAL_ACTIVE;
44209 info->port.tty = NULL;
44210 spin_unlock_irqrestore(&info->port.lock, flags);
44211@@ -1565,7 +1565,7 @@ static int hdlcdev_attach(struct net_device *dev, unsigned short encoding,
44212 unsigned short new_crctype;
44213
44214 /* return error if TTY interface open */
44215- if (info->port.count)
44216+ if (atomic_read(&info->port.count))
44217 return -EBUSY;
44218
44219 switch (encoding)
44220@@ -1660,7 +1660,7 @@ static int hdlcdev_open(struct net_device *dev)
44221
44222 /* arbitrate between network and tty opens */
44223 spin_lock_irqsave(&info->netlock, flags);
44224- if (info->port.count != 0 || info->netcount != 0) {
44225+ if (atomic_read(&info->port.count) != 0 || info->netcount != 0) {
44226 printk(KERN_WARNING "%s: hdlc_open returning busy\n", dev->name);
44227 spin_unlock_irqrestore(&info->netlock, flags);
44228 return -EBUSY;
44229@@ -1746,7 +1746,7 @@ static int hdlcdev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
44230 printk("%s:hdlcdev_ioctl(%s)\n",__FILE__,dev->name);
44231
44232 /* return error if TTY interface open */
44233- if (info->port.count)
44234+ if (atomic_read(&info->port.count))
44235 return -EBUSY;
44236
44237 if (cmd != SIOCWANDEV)
44238@@ -2620,7 +2620,7 @@ static irqreturn_t synclinkmp_interrupt(int dummy, void *dev_id)
44239 * do not request bottom half processing if the
44240 * device is not open in a normal mode.
44241 */
44242- if ( port && (port->port.count || port->netcount) &&
44243+ if ( port && (atomic_read(&port->port.count) || port->netcount) &&
44244 port->pending_bh && !port->bh_running &&
44245 !port->bh_requested ) {
44246 if ( debug_level >= DEBUG_LEVEL_ISR )
44247@@ -3318,12 +3318,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44248
44249 if (debug_level >= DEBUG_LEVEL_INFO)
44250 printk("%s(%d):%s block_til_ready() before block, count=%d\n",
44251- __FILE__,__LINE__, tty->driver->name, port->count );
44252+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44253
44254 spin_lock_irqsave(&info->lock, flags);
44255 if (!tty_hung_up_p(filp)) {
44256 extra_count = true;
44257- port->count--;
44258+ atomic_dec(&port->count);
44259 }
44260 spin_unlock_irqrestore(&info->lock, flags);
44261 port->blocked_open++;
44262@@ -3352,7 +3352,7 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44263
44264 if (debug_level >= DEBUG_LEVEL_INFO)
44265 printk("%s(%d):%s block_til_ready() count=%d\n",
44266- __FILE__,__LINE__, tty->driver->name, port->count );
44267+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44268
44269 tty_unlock(tty);
44270 schedule();
44271@@ -3363,12 +3363,12 @@ static int block_til_ready(struct tty_struct *tty, struct file *filp,
44272 remove_wait_queue(&port->open_wait, &wait);
44273
44274 if (extra_count)
44275- port->count++;
44276+ atomic_inc(&port->count);
44277 port->blocked_open--;
44278
44279 if (debug_level >= DEBUG_LEVEL_INFO)
44280 printk("%s(%d):%s block_til_ready() after, count=%d\n",
44281- __FILE__,__LINE__, tty->driver->name, port->count );
44282+ __FILE__,__LINE__, tty->driver->name, atomic_read(&port->count));
44283
44284 if (!retval)
44285 port->flags |= ASYNC_NORMAL_ACTIVE;
44286diff --git a/drivers/tty/sysrq.c b/drivers/tty/sysrq.c
44287index b51c154..17d55d1 100644
44288--- a/drivers/tty/sysrq.c
44289+++ b/drivers/tty/sysrq.c
44290@@ -1022,7 +1022,7 @@ EXPORT_SYMBOL(unregister_sysrq_key);
44291 static ssize_t write_sysrq_trigger(struct file *file, const char __user *buf,
44292 size_t count, loff_t *ppos)
44293 {
44294- if (count) {
44295+ if (count && capable(CAP_SYS_ADMIN)) {
44296 char c;
44297
44298 if (get_user(c, buf))
44299diff --git a/drivers/tty/tty_io.c b/drivers/tty/tty_io.c
44300index 4476682..d77e748 100644
44301--- a/drivers/tty/tty_io.c
44302+++ b/drivers/tty/tty_io.c
44303@@ -3466,7 +3466,7 @@ EXPORT_SYMBOL_GPL(get_current_tty);
44304
44305 void tty_default_fops(struct file_operations *fops)
44306 {
44307- *fops = tty_fops;
44308+ memcpy((void *)fops, &tty_fops, sizeof(tty_fops));
44309 }
44310
44311 /*
44312diff --git a/drivers/tty/tty_ldisc.c b/drivers/tty/tty_ldisc.c
44313index 1afe192..73d2c20 100644
44314--- a/drivers/tty/tty_ldisc.c
44315+++ b/drivers/tty/tty_ldisc.c
44316@@ -66,7 +66,7 @@ int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc)
44317 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44318 tty_ldiscs[disc] = new_ldisc;
44319 new_ldisc->num = disc;
44320- new_ldisc->refcount = 0;
44321+ atomic_set(&new_ldisc->refcount, 0);
44322 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44323
44324 return ret;
44325@@ -94,7 +94,7 @@ int tty_unregister_ldisc(int disc)
44326 return -EINVAL;
44327
44328 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44329- if (tty_ldiscs[disc]->refcount)
44330+ if (atomic_read(&tty_ldiscs[disc]->refcount))
44331 ret = -EBUSY;
44332 else
44333 tty_ldiscs[disc] = NULL;
44334@@ -115,7 +115,7 @@ static struct tty_ldisc_ops *get_ldops(int disc)
44335 if (ldops) {
44336 ret = ERR_PTR(-EAGAIN);
44337 if (try_module_get(ldops->owner)) {
44338- ldops->refcount++;
44339+ atomic_inc(&ldops->refcount);
44340 ret = ldops;
44341 }
44342 }
44343@@ -128,7 +128,7 @@ static void put_ldops(struct tty_ldisc_ops *ldops)
44344 unsigned long flags;
44345
44346 raw_spin_lock_irqsave(&tty_ldisc_lock, flags);
44347- ldops->refcount--;
44348+ atomic_dec(&ldops->refcount);
44349 module_put(ldops->owner);
44350 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44351 }
44352@@ -196,7 +196,7 @@ static inline void tty_ldisc_put(struct tty_ldisc *ld)
44353 /* unreleased reader reference(s) will cause this WARN */
44354 WARN_ON(!atomic_dec_and_test(&ld->users));
44355
44356- ld->ops->refcount--;
44357+ atomic_dec(&ld->ops->refcount);
44358 module_put(ld->ops->owner);
44359 kfree(ld);
44360 raw_spin_unlock_irqrestore(&tty_ldisc_lock, flags);
44361diff --git a/drivers/tty/tty_port.c b/drivers/tty/tty_port.c
44362index 121aeb9..0d2c4b9 100644
44363--- a/drivers/tty/tty_port.c
44364+++ b/drivers/tty/tty_port.c
44365@@ -232,7 +232,7 @@ void tty_port_hangup(struct tty_port *port)
44366 unsigned long flags;
44367
44368 spin_lock_irqsave(&port->lock, flags);
44369- port->count = 0;
44370+ atomic_set(&port->count, 0);
44371 port->flags &= ~ASYNC_NORMAL_ACTIVE;
44372 tty = port->tty;
44373 if (tty)
44374@@ -391,7 +391,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44375 /* The port lock protects the port counts */
44376 spin_lock_irqsave(&port->lock, flags);
44377 if (!tty_hung_up_p(filp))
44378- port->count--;
44379+ atomic_dec(&port->count);
44380 port->blocked_open++;
44381 spin_unlock_irqrestore(&port->lock, flags);
44382
44383@@ -433,7 +433,7 @@ int tty_port_block_til_ready(struct tty_port *port,
44384 we must not mess that up further */
44385 spin_lock_irqsave(&port->lock, flags);
44386 if (!tty_hung_up_p(filp))
44387- port->count++;
44388+ atomic_inc(&port->count);
44389 port->blocked_open--;
44390 if (retval == 0)
44391 port->flags |= ASYNC_NORMAL_ACTIVE;
44392@@ -467,19 +467,19 @@ int tty_port_close_start(struct tty_port *port,
44393 return 0;
44394 }
44395
44396- if (tty->count == 1 && port->count != 1) {
44397+ if (tty->count == 1 && atomic_read(&port->count) != 1) {
44398 printk(KERN_WARNING
44399 "tty_port_close_start: tty->count = 1 port count = %d.\n",
44400- port->count);
44401- port->count = 1;
44402+ atomic_read(&port->count));
44403+ atomic_set(&port->count, 1);
44404 }
44405- if (--port->count < 0) {
44406+ if (atomic_dec_return(&port->count) < 0) {
44407 printk(KERN_WARNING "tty_port_close_start: count = %d\n",
44408- port->count);
44409- port->count = 0;
44410+ atomic_read(&port->count));
44411+ atomic_set(&port->count, 0);
44412 }
44413
44414- if (port->count) {
44415+ if (atomic_read(&port->count)) {
44416 spin_unlock_irqrestore(&port->lock, flags);
44417 if (port->ops->drop)
44418 port->ops->drop(port);
44419@@ -565,7 +565,7 @@ int tty_port_open(struct tty_port *port, struct tty_struct *tty,
44420 {
44421 spin_lock_irq(&port->lock);
44422 if (!tty_hung_up_p(filp))
44423- ++port->count;
44424+ atomic_inc(&port->count);
44425 spin_unlock_irq(&port->lock);
44426 tty_port_tty_set(port, tty);
44427
44428diff --git a/drivers/tty/vt/keyboard.c b/drivers/tty/vt/keyboard.c
44429index a9af1b9a..1e08e7f 100644
44430--- a/drivers/tty/vt/keyboard.c
44431+++ b/drivers/tty/vt/keyboard.c
44432@@ -647,6 +647,16 @@ static void k_spec(struct vc_data *vc, unsigned char value, char up_flag)
44433 kbd->kbdmode == VC_OFF) &&
44434 value != KVAL(K_SAK))
44435 return; /* SAK is allowed even in raw mode */
44436+
44437+#if defined(CONFIG_GRKERNSEC_PROC) || defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
44438+ {
44439+ void *func = fn_handler[value];
44440+ if (func == fn_show_state || func == fn_show_ptregs ||
44441+ func == fn_show_mem)
44442+ return;
44443+ }
44444+#endif
44445+
44446 fn_handler[value](vc);
44447 }
44448
44449@@ -1795,9 +1805,6 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44450 if (copy_from_user(&tmp, user_kbe, sizeof(struct kbentry)))
44451 return -EFAULT;
44452
44453- if (!capable(CAP_SYS_TTY_CONFIG))
44454- perm = 0;
44455-
44456 switch (cmd) {
44457 case KDGKBENT:
44458 /* Ensure another thread doesn't free it under us */
44459@@ -1812,6 +1819,9 @@ int vt_do_kdsk_ioctl(int cmd, struct kbentry __user *user_kbe, int perm,
44460 spin_unlock_irqrestore(&kbd_event_lock, flags);
44461 return put_user(val, &user_kbe->kb_value);
44462 case KDSKBENT:
44463+ if (!capable(CAP_SYS_TTY_CONFIG))
44464+ perm = 0;
44465+
44466 if (!perm)
44467 return -EPERM;
44468 if (!i && v == K_NOSUCHMAP) {
44469@@ -1902,9 +1912,6 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44470 int i, j, k;
44471 int ret;
44472
44473- if (!capable(CAP_SYS_TTY_CONFIG))
44474- perm = 0;
44475-
44476 kbs = kmalloc(sizeof(*kbs), GFP_KERNEL);
44477 if (!kbs) {
44478 ret = -ENOMEM;
44479@@ -1938,6 +1945,9 @@ int vt_do_kdgkb_ioctl(int cmd, struct kbsentry __user *user_kdgkb, int perm)
44480 kfree(kbs);
44481 return ((p && *p) ? -EOVERFLOW : 0);
44482 case KDSKBSENT:
44483+ if (!capable(CAP_SYS_TTY_CONFIG))
44484+ perm = 0;
44485+
44486 if (!perm) {
44487 ret = -EPERM;
44488 goto reterr;
44489diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
44490index b645c47..a55c182 100644
44491--- a/drivers/uio/uio.c
44492+++ b/drivers/uio/uio.c
44493@@ -25,6 +25,7 @@
44494 #include <linux/kobject.h>
44495 #include <linux/cdev.h>
44496 #include <linux/uio_driver.h>
44497+#include <asm/local.h>
44498
44499 #define UIO_MAX_DEVICES (1U << MINORBITS)
44500
44501@@ -32,10 +33,10 @@ struct uio_device {
44502 struct module *owner;
44503 struct device *dev;
44504 int minor;
44505- atomic_t event;
44506+ atomic_unchecked_t event;
44507 struct fasync_struct *async_queue;
44508 wait_queue_head_t wait;
44509- int vma_count;
44510+ local_t vma_count;
44511 struct uio_info *info;
44512 struct kobject *map_dir;
44513 struct kobject *portio_dir;
44514@@ -242,7 +243,7 @@ static ssize_t show_event(struct device *dev,
44515 struct device_attribute *attr, char *buf)
44516 {
44517 struct uio_device *idev = dev_get_drvdata(dev);
44518- return sprintf(buf, "%u\n", (unsigned int)atomic_read(&idev->event));
44519+ return sprintf(buf, "%u\n", (unsigned int)atomic_read_unchecked(&idev->event));
44520 }
44521
44522 static struct device_attribute uio_class_attributes[] = {
44523@@ -398,7 +399,7 @@ void uio_event_notify(struct uio_info *info)
44524 {
44525 struct uio_device *idev = info->uio_dev;
44526
44527- atomic_inc(&idev->event);
44528+ atomic_inc_unchecked(&idev->event);
44529 wake_up_interruptible(&idev->wait);
44530 kill_fasync(&idev->async_queue, SIGIO, POLL_IN);
44531 }
44532@@ -451,7 +452,7 @@ static int uio_open(struct inode *inode, struct file *filep)
44533 }
44534
44535 listener->dev = idev;
44536- listener->event_count = atomic_read(&idev->event);
44537+ listener->event_count = atomic_read_unchecked(&idev->event);
44538 filep->private_data = listener;
44539
44540 if (idev->info->open) {
44541@@ -502,7 +503,7 @@ static unsigned int uio_poll(struct file *filep, poll_table *wait)
44542 return -EIO;
44543
44544 poll_wait(filep, &idev->wait, wait);
44545- if (listener->event_count != atomic_read(&idev->event))
44546+ if (listener->event_count != atomic_read_unchecked(&idev->event))
44547 return POLLIN | POLLRDNORM;
44548 return 0;
44549 }
44550@@ -527,7 +528,7 @@ static ssize_t uio_read(struct file *filep, char __user *buf,
44551 do {
44552 set_current_state(TASK_INTERRUPTIBLE);
44553
44554- event_count = atomic_read(&idev->event);
44555+ event_count = atomic_read_unchecked(&idev->event);
44556 if (event_count != listener->event_count) {
44557 if (copy_to_user(buf, &event_count, count))
44558 retval = -EFAULT;
44559@@ -596,13 +597,13 @@ static int uio_find_mem_index(struct vm_area_struct *vma)
44560 static void uio_vma_open(struct vm_area_struct *vma)
44561 {
44562 struct uio_device *idev = vma->vm_private_data;
44563- idev->vma_count++;
44564+ local_inc(&idev->vma_count);
44565 }
44566
44567 static void uio_vma_close(struct vm_area_struct *vma)
44568 {
44569 struct uio_device *idev = vma->vm_private_data;
44570- idev->vma_count--;
44571+ local_dec(&idev->vma_count);
44572 }
44573
44574 static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
44575@@ -809,7 +810,7 @@ int __uio_register_device(struct module *owner,
44576 idev->owner = owner;
44577 idev->info = info;
44578 init_waitqueue_head(&idev->wait);
44579- atomic_set(&idev->event, 0);
44580+ atomic_set_unchecked(&idev->event, 0);
44581
44582 ret = uio_get_minor(idev);
44583 if (ret)
44584diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c
44585index 8a7eb77..c00402f 100644
44586--- a/drivers/usb/atm/cxacru.c
44587+++ b/drivers/usb/atm/cxacru.c
44588@@ -473,7 +473,7 @@ static ssize_t cxacru_sysfs_store_adsl_config(struct device *dev,
44589 ret = sscanf(buf + pos, "%x=%x%n", &index, &value, &tmp);
44590 if (ret < 2)
44591 return -EINVAL;
44592- if (index < 0 || index > 0x7f)
44593+ if (index > 0x7f)
44594 return -EINVAL;
44595 pos += tmp;
44596
44597diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c
44598index d3527dd..26effa2 100644
44599--- a/drivers/usb/atm/usbatm.c
44600+++ b/drivers/usb/atm/usbatm.c
44601@@ -333,7 +333,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44602 if (printk_ratelimit())
44603 atm_warn(instance, "%s: OAM not supported (vpi %d, vci %d)!\n",
44604 __func__, vpi, vci);
44605- atomic_inc(&vcc->stats->rx_err);
44606+ atomic_inc_unchecked(&vcc->stats->rx_err);
44607 return;
44608 }
44609
44610@@ -361,7 +361,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44611 if (length > ATM_MAX_AAL5_PDU) {
44612 atm_rldbg(instance, "%s: bogus length %u (vcc: 0x%p)!\n",
44613 __func__, length, vcc);
44614- atomic_inc(&vcc->stats->rx_err);
44615+ atomic_inc_unchecked(&vcc->stats->rx_err);
44616 goto out;
44617 }
44618
44619@@ -370,14 +370,14 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44620 if (sarb->len < pdu_length) {
44621 atm_rldbg(instance, "%s: bogus pdu_length %u (sarb->len: %u, vcc: 0x%p)!\n",
44622 __func__, pdu_length, sarb->len, vcc);
44623- atomic_inc(&vcc->stats->rx_err);
44624+ atomic_inc_unchecked(&vcc->stats->rx_err);
44625 goto out;
44626 }
44627
44628 if (crc32_be(~0, skb_tail_pointer(sarb) - pdu_length, pdu_length) != 0xc704dd7b) {
44629 atm_rldbg(instance, "%s: packet failed crc check (vcc: 0x%p)!\n",
44630 __func__, vcc);
44631- atomic_inc(&vcc->stats->rx_err);
44632+ atomic_inc_unchecked(&vcc->stats->rx_err);
44633 goto out;
44634 }
44635
44636@@ -389,7 +389,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44637 if (printk_ratelimit())
44638 atm_err(instance, "%s: no memory for skb (length: %u)!\n",
44639 __func__, length);
44640- atomic_inc(&vcc->stats->rx_drop);
44641+ atomic_inc_unchecked(&vcc->stats->rx_drop);
44642 goto out;
44643 }
44644
44645@@ -417,7 +417,7 @@ static void usbatm_extract_one_cell(struct usbatm_data *instance, unsigned char
44646
44647 vcc->push(vcc, skb);
44648
44649- atomic_inc(&vcc->stats->rx);
44650+ atomic_inc_unchecked(&vcc->stats->rx);
44651 out:
44652 skb_trim(sarb, 0);
44653 }
44654@@ -623,7 +623,7 @@ static void usbatm_tx_process(unsigned long data)
44655 struct atm_vcc *vcc = UDSL_SKB(skb)->atm.vcc;
44656
44657 usbatm_pop(vcc, skb);
44658- atomic_inc(&vcc->stats->tx);
44659+ atomic_inc_unchecked(&vcc->stats->tx);
44660
44661 skb = skb_dequeue(&instance->sndqueue);
44662 }
44663@@ -779,11 +779,11 @@ static int usbatm_atm_proc_read(struct atm_dev *atm_dev, loff_t * pos, char *pag
44664 if (!left--)
44665 return sprintf(page,
44666 "AAL5: tx %d ( %d err ), rx %d ( %d err, %d drop )\n",
44667- atomic_read(&atm_dev->stats.aal5.tx),
44668- atomic_read(&atm_dev->stats.aal5.tx_err),
44669- atomic_read(&atm_dev->stats.aal5.rx),
44670- atomic_read(&atm_dev->stats.aal5.rx_err),
44671- atomic_read(&atm_dev->stats.aal5.rx_drop));
44672+ atomic_read_unchecked(&atm_dev->stats.aal5.tx),
44673+ atomic_read_unchecked(&atm_dev->stats.aal5.tx_err),
44674+ atomic_read_unchecked(&atm_dev->stats.aal5.rx),
44675+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_err),
44676+ atomic_read_unchecked(&atm_dev->stats.aal5.rx_drop));
44677
44678 if (!left--) {
44679 if (instance->disconnected)
44680diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c
44681index 2a3bbdf..91d72cf 100644
44682--- a/drivers/usb/core/devices.c
44683+++ b/drivers/usb/core/devices.c
44684@@ -126,7 +126,7 @@ static const char format_endpt[] =
44685 * time it gets called.
44686 */
44687 static struct device_connect_event {
44688- atomic_t count;
44689+ atomic_unchecked_t count;
44690 wait_queue_head_t wait;
44691 } device_event = {
44692 .count = ATOMIC_INIT(1),
44693@@ -164,7 +164,7 @@ static const struct class_info clas_info[] = {
44694
44695 void usbfs_conn_disc_event(void)
44696 {
44697- atomic_add(2, &device_event.count);
44698+ atomic_add_unchecked(2, &device_event.count);
44699 wake_up(&device_event.wait);
44700 }
44701
44702@@ -652,7 +652,7 @@ static unsigned int usb_device_poll(struct file *file,
44703
44704 poll_wait(file, &device_event.wait, wait);
44705
44706- event_count = atomic_read(&device_event.count);
44707+ event_count = atomic_read_unchecked(&device_event.count);
44708 if (file->f_version != event_count) {
44709 file->f_version = event_count;
44710 return POLLIN | POLLRDNORM;
44711diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
44712index d53547d..6a22d02 100644
44713--- a/drivers/usb/core/hcd.c
44714+++ b/drivers/usb/core/hcd.c
44715@@ -1526,7 +1526,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44716 */
44717 usb_get_urb(urb);
44718 atomic_inc(&urb->use_count);
44719- atomic_inc(&urb->dev->urbnum);
44720+ atomic_inc_unchecked(&urb->dev->urbnum);
44721 usbmon_urb_submit(&hcd->self, urb);
44722
44723 /* NOTE requirements on root-hub callers (usbfs and the hub
44724@@ -1553,7 +1553,7 @@ int usb_hcd_submit_urb (struct urb *urb, gfp_t mem_flags)
44725 urb->hcpriv = NULL;
44726 INIT_LIST_HEAD(&urb->urb_list);
44727 atomic_dec(&urb->use_count);
44728- atomic_dec(&urb->dev->urbnum);
44729+ atomic_dec_unchecked(&urb->dev->urbnum);
44730 if (atomic_read(&urb->reject))
44731 wake_up(&usb_kill_urb_queue);
44732 usb_put_urb(urb);
44733diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c
44734index 444d30e..f15c850 100644
44735--- a/drivers/usb/core/message.c
44736+++ b/drivers/usb/core/message.c
44737@@ -129,7 +129,7 @@ static int usb_internal_control_msg(struct usb_device *usb_dev,
44738 * method can wait for it to complete. Since you don't have a handle on the
44739 * URB used, you can't cancel the request.
44740 */
44741-int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44742+int __intentional_overflow(-1) usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request,
44743 __u8 requesttype, __u16 value, __u16 index, void *data,
44744 __u16 size, int timeout)
44745 {
44746diff --git a/drivers/usb/core/sysfs.c b/drivers/usb/core/sysfs.c
44747index aa38db4..0a08682 100644
44748--- a/drivers/usb/core/sysfs.c
44749+++ b/drivers/usb/core/sysfs.c
44750@@ -239,7 +239,7 @@ show_urbnum(struct device *dev, struct device_attribute *attr, char *buf)
44751 struct usb_device *udev;
44752
44753 udev = to_usb_device(dev);
44754- return sprintf(buf, "%d\n", atomic_read(&udev->urbnum));
44755+ return sprintf(buf, "%d\n", atomic_read_unchecked(&udev->urbnum));
44756 }
44757 static DEVICE_ATTR(urbnum, S_IRUGO, show_urbnum, NULL);
44758
44759diff --git a/drivers/usb/core/usb.c b/drivers/usb/core/usb.c
44760index b10da72..43aa0b2 100644
44761--- a/drivers/usb/core/usb.c
44762+++ b/drivers/usb/core/usb.c
44763@@ -389,7 +389,7 @@ struct usb_device *usb_alloc_dev(struct usb_device *parent,
44764 set_dev_node(&dev->dev, dev_to_node(bus->controller));
44765 dev->state = USB_STATE_ATTACHED;
44766 dev->lpm_disable_count = 1;
44767- atomic_set(&dev->urbnum, 0);
44768+ atomic_set_unchecked(&dev->urbnum, 0);
44769
44770 INIT_LIST_HEAD(&dev->ep0.urb_list);
44771 dev->ep0.desc.bLength = USB_DT_ENDPOINT_SIZE;
44772diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c
44773index 5e29dde..eca992f 100644
44774--- a/drivers/usb/early/ehci-dbgp.c
44775+++ b/drivers/usb/early/ehci-dbgp.c
44776@@ -98,7 +98,8 @@ static inline u32 dbgp_len_update(u32 x, u32 len)
44777
44778 #ifdef CONFIG_KGDB
44779 static struct kgdb_io kgdbdbgp_io_ops;
44780-#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops)
44781+static struct kgdb_io kgdbdbgp_io_ops_console;
44782+#define dbgp_kgdb_mode (dbg_io_ops == &kgdbdbgp_io_ops || dbg_io_ops == &kgdbdbgp_io_ops_console)
44783 #else
44784 #define dbgp_kgdb_mode (0)
44785 #endif
44786@@ -1047,6 +1048,13 @@ static struct kgdb_io kgdbdbgp_io_ops = {
44787 .write_char = kgdbdbgp_write_char,
44788 };
44789
44790+static struct kgdb_io kgdbdbgp_io_ops_console = {
44791+ .name = "kgdbdbgp",
44792+ .read_char = kgdbdbgp_read_char,
44793+ .write_char = kgdbdbgp_write_char,
44794+ .is_console = 1
44795+};
44796+
44797 static int kgdbdbgp_wait_time;
44798
44799 static int __init kgdbdbgp_parse_config(char *str)
44800@@ -1062,8 +1070,10 @@ static int __init kgdbdbgp_parse_config(char *str)
44801 ptr++;
44802 kgdbdbgp_wait_time = simple_strtoul(ptr, &ptr, 10);
44803 }
44804- kgdb_register_io_module(&kgdbdbgp_io_ops);
44805- kgdbdbgp_io_ops.is_console = early_dbgp_console.index != -1;
44806+ if (early_dbgp_console.index != -1)
44807+ kgdb_register_io_module(&kgdbdbgp_io_ops_console);
44808+ else
44809+ kgdb_register_io_module(&kgdbdbgp_io_ops);
44810
44811 return 0;
44812 }
44813diff --git a/drivers/usb/gadget/u_serial.c b/drivers/usb/gadget/u_serial.c
44814index b369292..9f3ba40 100644
44815--- a/drivers/usb/gadget/u_serial.c
44816+++ b/drivers/usb/gadget/u_serial.c
44817@@ -733,9 +733,9 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44818 spin_lock_irq(&port->port_lock);
44819
44820 /* already open? Great. */
44821- if (port->port.count) {
44822+ if (atomic_read(&port->port.count)) {
44823 status = 0;
44824- port->port.count++;
44825+ atomic_inc(&port->port.count);
44826
44827 /* currently opening/closing? wait ... */
44828 } else if (port->openclose) {
44829@@ -794,7 +794,7 @@ static int gs_open(struct tty_struct *tty, struct file *file)
44830 tty->driver_data = port;
44831 port->port.tty = tty;
44832
44833- port->port.count = 1;
44834+ atomic_set(&port->port.count, 1);
44835 port->openclose = false;
44836
44837 /* if connected, start the I/O stream */
44838@@ -836,11 +836,11 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44839
44840 spin_lock_irq(&port->port_lock);
44841
44842- if (port->port.count != 1) {
44843- if (port->port.count == 0)
44844+ if (atomic_read(&port->port.count) != 1) {
44845+ if (atomic_read(&port->port.count) == 0)
44846 WARN_ON(1);
44847 else
44848- --port->port.count;
44849+ atomic_dec(&port->port.count);
44850 goto exit;
44851 }
44852
44853@@ -850,7 +850,7 @@ static void gs_close(struct tty_struct *tty, struct file *file)
44854 * and sleep if necessary
44855 */
44856 port->openclose = true;
44857- port->port.count = 0;
44858+ atomic_set(&port->port.count, 0);
44859
44860 gser = port->port_usb;
44861 if (gser && gser->disconnect)
44862@@ -1066,7 +1066,7 @@ static int gs_closed(struct gs_port *port)
44863 int cond;
44864
44865 spin_lock_irq(&port->port_lock);
44866- cond = (port->port.count == 0) && !port->openclose;
44867+ cond = (atomic_read(&port->port.count) == 0) && !port->openclose;
44868 spin_unlock_irq(&port->port_lock);
44869 return cond;
44870 }
44871@@ -1209,7 +1209,7 @@ int gserial_connect(struct gserial *gser, u8 port_num)
44872 /* if it's already open, start I/O ... and notify the serial
44873 * protocol about open/close status (connect/disconnect).
44874 */
44875- if (port->port.count) {
44876+ if (atomic_read(&port->port.count)) {
44877 pr_debug("gserial_connect: start ttyGS%d\n", port->port_num);
44878 gs_start_io(port);
44879 if (gser->connect)
44880@@ -1256,7 +1256,7 @@ void gserial_disconnect(struct gserial *gser)
44881
44882 port->port_usb = NULL;
44883 gser->ioport = NULL;
44884- if (port->port.count > 0 || port->openclose) {
44885+ if (atomic_read(&port->port.count) > 0 || port->openclose) {
44886 wake_up_interruptible(&port->drain_wait);
44887 if (port->port.tty)
44888 tty_hangup(port->port.tty);
44889@@ -1272,7 +1272,7 @@ void gserial_disconnect(struct gserial *gser)
44890
44891 /* finally, free any unused/unusable I/O buffers */
44892 spin_lock_irqsave(&port->port_lock, flags);
44893- if (port->port.count == 0 && !port->openclose)
44894+ if (atomic_read(&port->port.count) == 0 && !port->openclose)
44895 gs_buf_free(&port->port_write_buf);
44896 gs_free_requests(gser->out, &port->read_pool, NULL);
44897 gs_free_requests(gser->out, &port->read_queue, NULL);
44898diff --git a/drivers/usb/serial/console.c b/drivers/usb/serial/console.c
44899index 5f3bcd3..bfca43f 100644
44900--- a/drivers/usb/serial/console.c
44901+++ b/drivers/usb/serial/console.c
44902@@ -124,7 +124,7 @@ static int usb_console_setup(struct console *co, char *options)
44903
44904 info->port = port;
44905
44906- ++port->port.count;
44907+ atomic_inc(&port->port.count);
44908 if (!test_bit(ASYNCB_INITIALIZED, &port->port.flags)) {
44909 if (serial->type->set_termios) {
44910 /*
44911@@ -174,7 +174,7 @@ static int usb_console_setup(struct console *co, char *options)
44912 }
44913 /* Now that any required fake tty operations are completed restore
44914 * the tty port count */
44915- --port->port.count;
44916+ atomic_dec(&port->port.count);
44917 /* The console is special in terms of closing the device so
44918 * indicate this port is now acting as a system console. */
44919 port->port.console = 1;
44920@@ -187,7 +187,7 @@ static int usb_console_setup(struct console *co, char *options)
44921 free_tty:
44922 kfree(tty);
44923 reset_open_count:
44924- port->port.count = 0;
44925+ atomic_set(&port->port.count, 0);
44926 usb_autopm_put_interface(serial->interface);
44927 error_get_interface:
44928 usb_serial_put(serial);
44929diff --git a/drivers/usb/storage/usb.h b/drivers/usb/storage/usb.h
44930index 75f70f0..d467e1a 100644
44931--- a/drivers/usb/storage/usb.h
44932+++ b/drivers/usb/storage/usb.h
44933@@ -63,7 +63,7 @@ struct us_unusual_dev {
44934 __u8 useProtocol;
44935 __u8 useTransport;
44936 int (*initFunction)(struct us_data *);
44937-};
44938+} __do_const;
44939
44940
44941 /* Dynamic bitflag definitions (us->dflags): used in set_bit() etc. */
44942diff --git a/drivers/usb/wusbcore/wa-hc.h b/drivers/usb/wusbcore/wa-hc.h
44943index d6bea3e..60b250e 100644
44944--- a/drivers/usb/wusbcore/wa-hc.h
44945+++ b/drivers/usb/wusbcore/wa-hc.h
44946@@ -192,7 +192,7 @@ struct wahc {
44947 struct list_head xfer_delayed_list;
44948 spinlock_t xfer_list_lock;
44949 struct work_struct xfer_work;
44950- atomic_t xfer_id_count;
44951+ atomic_unchecked_t xfer_id_count;
44952 };
44953
44954
44955@@ -246,7 +246,7 @@ static inline void wa_init(struct wahc *wa)
44956 INIT_LIST_HEAD(&wa->xfer_delayed_list);
44957 spin_lock_init(&wa->xfer_list_lock);
44958 INIT_WORK(&wa->xfer_work, wa_urb_enqueue_run);
44959- atomic_set(&wa->xfer_id_count, 1);
44960+ atomic_set_unchecked(&wa->xfer_id_count, 1);
44961 }
44962
44963 /**
44964diff --git a/drivers/usb/wusbcore/wa-xfer.c b/drivers/usb/wusbcore/wa-xfer.c
44965index 6ef94bc..1b41265 100644
44966--- a/drivers/usb/wusbcore/wa-xfer.c
44967+++ b/drivers/usb/wusbcore/wa-xfer.c
44968@@ -296,7 +296,7 @@ out:
44969 */
44970 static void wa_xfer_id_init(struct wa_xfer *xfer)
44971 {
44972- xfer->id = atomic_add_return(1, &xfer->wa->xfer_id_count);
44973+ xfer->id = atomic_add_return_unchecked(1, &xfer->wa->xfer_id_count);
44974 }
44975
44976 /*
44977diff --git a/drivers/vhost/net.c b/drivers/vhost/net.c
44978index f80d3dd..8ca5ac7 100644
44979--- a/drivers/vhost/net.c
44980+++ b/drivers/vhost/net.c
44981@@ -150,6 +150,11 @@ static void vhost_net_ubuf_put_and_wait(struct vhost_net_ubuf_ref *ubufs)
44982 {
44983 kref_put(&ubufs->kref, vhost_net_zerocopy_done_signal);
44984 wait_event(ubufs->wait, !atomic_read(&ubufs->kref.refcount));
44985+}
44986+
44987+static void vhost_net_ubuf_put_wait_and_free(struct vhost_net_ubuf_ref *ubufs)
44988+{
44989+ vhost_net_ubuf_put_and_wait(ubufs);
44990 kfree(ubufs);
44991 }
44992
44993@@ -948,7 +953,7 @@ static long vhost_net_set_backend(struct vhost_net *n, unsigned index, int fd)
44994 mutex_unlock(&vq->mutex);
44995
44996 if (oldubufs) {
44997- vhost_net_ubuf_put_and_wait(oldubufs);
44998+ vhost_net_ubuf_put_wait_and_free(oldubufs);
44999 mutex_lock(&vq->mutex);
45000 vhost_zerocopy_signal_used(n, vq);
45001 mutex_unlock(&vq->mutex);
45002@@ -966,7 +971,7 @@ err_used:
45003 rcu_assign_pointer(vq->private_data, oldsock);
45004 vhost_net_enable_vq(n, vq);
45005 if (ubufs)
45006- vhost_net_ubuf_put_and_wait(ubufs);
45007+ vhost_net_ubuf_put_wait_and_free(ubufs);
45008 err_ubufs:
45009 fput(sock->file);
45010 err_vq:
45011diff --git a/drivers/vhost/vringh.c b/drivers/vhost/vringh.c
45012index 5174eba..86e764a 100644
45013--- a/drivers/vhost/vringh.c
45014+++ b/drivers/vhost/vringh.c
45015@@ -800,7 +800,7 @@ static inline int getu16_kern(u16 *val, const u16 *p)
45016
45017 static inline int putu16_kern(u16 *p, u16 val)
45018 {
45019- ACCESS_ONCE(*p) = val;
45020+ ACCESS_ONCE_RW(*p) = val;
45021 return 0;
45022 }
45023
45024diff --git a/drivers/video/aty/aty128fb.c b/drivers/video/aty/aty128fb.c
45025index 8c55011..eed4ae1a 100644
45026--- a/drivers/video/aty/aty128fb.c
45027+++ b/drivers/video/aty/aty128fb.c
45028@@ -149,7 +149,7 @@ enum {
45029 };
45030
45031 /* Must match above enum */
45032-static char * const r128_family[] = {
45033+static const char * const r128_family[] = {
45034 "AGP",
45035 "PCI",
45036 "PRO AGP",
45037diff --git a/drivers/video/aty/atyfb_base.c b/drivers/video/aty/atyfb_base.c
45038index 4f27fdc..d3537e6 100644
45039--- a/drivers/video/aty/atyfb_base.c
45040+++ b/drivers/video/aty/atyfb_base.c
45041@@ -1325,10 +1325,14 @@ static int atyfb_set_par(struct fb_info *info)
45042 par->accel_flags = var->accel_flags; /* hack */
45043
45044 if (var->accel_flags) {
45045- info->fbops->fb_sync = atyfb_sync;
45046+ pax_open_kernel();
45047+ *(void **)&info->fbops->fb_sync = atyfb_sync;
45048+ pax_close_kernel();
45049 info->flags &= ~FBINFO_HWACCEL_DISABLED;
45050 } else {
45051- info->fbops->fb_sync = NULL;
45052+ pax_open_kernel();
45053+ *(void **)&info->fbops->fb_sync = NULL;
45054+ pax_close_kernel();
45055 info->flags |= FBINFO_HWACCEL_DISABLED;
45056 }
45057
45058diff --git a/drivers/video/aty/mach64_cursor.c b/drivers/video/aty/mach64_cursor.c
45059index 95ec042..e6affdd 100644
45060--- a/drivers/video/aty/mach64_cursor.c
45061+++ b/drivers/video/aty/mach64_cursor.c
45062@@ -7,6 +7,7 @@
45063 #include <linux/string.h>
45064
45065 #include <asm/io.h>
45066+#include <asm/pgtable.h>
45067
45068 #ifdef __sparc__
45069 #include <asm/fbio.h>
45070@@ -208,7 +209,9 @@ int aty_init_cursor(struct fb_info *info)
45071 info->sprite.buf_align = 16; /* and 64 lines tall. */
45072 info->sprite.flags = FB_PIXMAP_IO;
45073
45074- info->fbops->fb_cursor = atyfb_cursor;
45075+ pax_open_kernel();
45076+ *(void **)&info->fbops->fb_cursor = atyfb_cursor;
45077+ pax_close_kernel();
45078
45079 return 0;
45080 }
45081diff --git a/drivers/video/backlight/kb3886_bl.c b/drivers/video/backlight/kb3886_bl.c
45082index bca6ccc..252107e 100644
45083--- a/drivers/video/backlight/kb3886_bl.c
45084+++ b/drivers/video/backlight/kb3886_bl.c
45085@@ -78,7 +78,7 @@ static struct kb3886bl_machinfo *bl_machinfo;
45086 static unsigned long kb3886bl_flags;
45087 #define KB3886BL_SUSPENDED 0x01
45088
45089-static struct dmi_system_id __initdata kb3886bl_device_table[] = {
45090+static const struct dmi_system_id __initconst kb3886bl_device_table[] = {
45091 {
45092 .ident = "Sahara Touch-iT",
45093 .matches = {
45094diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
45095index 900aa4e..6d49418 100644
45096--- a/drivers/video/fb_defio.c
45097+++ b/drivers/video/fb_defio.c
45098@@ -206,7 +206,9 @@ void fb_deferred_io_init(struct fb_info *info)
45099
45100 BUG_ON(!fbdefio);
45101 mutex_init(&fbdefio->lock);
45102- info->fbops->fb_mmap = fb_deferred_io_mmap;
45103+ pax_open_kernel();
45104+ *(void **)&info->fbops->fb_mmap = fb_deferred_io_mmap;
45105+ pax_close_kernel();
45106 INIT_DELAYED_WORK(&info->deferred_work, fb_deferred_io_work);
45107 INIT_LIST_HEAD(&fbdefio->pagelist);
45108 if (fbdefio->delay == 0) /* set a default of 1 s */
45109@@ -237,7 +239,7 @@ void fb_deferred_io_cleanup(struct fb_info *info)
45110 page->mapping = NULL;
45111 }
45112
45113- info->fbops->fb_mmap = NULL;
45114+ *(void **)&info->fbops->fb_mmap = NULL;
45115 mutex_destroy(&fbdefio->lock);
45116 }
45117 EXPORT_SYMBOL_GPL(fb_deferred_io_cleanup);
45118diff --git a/drivers/video/fbcmap.c b/drivers/video/fbcmap.c
45119index 5c3960d..15cf8fc 100644
45120--- a/drivers/video/fbcmap.c
45121+++ b/drivers/video/fbcmap.c
45122@@ -285,8 +285,7 @@ int fb_set_user_cmap(struct fb_cmap_user *cmap, struct fb_info *info)
45123 rc = -ENODEV;
45124 goto out;
45125 }
45126- if (cmap->start < 0 || (!info->fbops->fb_setcolreg &&
45127- !info->fbops->fb_setcmap)) {
45128+ if (!info->fbops->fb_setcolreg && !info->fbops->fb_setcmap) {
45129 rc = -EINVAL;
45130 goto out1;
45131 }
45132diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
45133index 098bfc6..796841d 100644
45134--- a/drivers/video/fbmem.c
45135+++ b/drivers/video/fbmem.c
45136@@ -428,7 +428,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45137 image->dx += image->width + 8;
45138 }
45139 } else if (rotate == FB_ROTATE_UD) {
45140- for (x = 0; x < num && image->dx >= 0; x++) {
45141+ for (x = 0; x < num && (__s32)image->dx >= 0; x++) {
45142 info->fbops->fb_imageblit(info, image);
45143 image->dx -= image->width + 8;
45144 }
45145@@ -440,7 +440,7 @@ static void fb_do_show_logo(struct fb_info *info, struct fb_image *image,
45146 image->dy += image->height + 8;
45147 }
45148 } else if (rotate == FB_ROTATE_CCW) {
45149- for (x = 0; x < num && image->dy >= 0; x++) {
45150+ for (x = 0; x < num && (__s32)image->dy >= 0; x++) {
45151 info->fbops->fb_imageblit(info, image);
45152 image->dy -= image->height + 8;
45153 }
45154@@ -1166,7 +1166,7 @@ static long do_fb_ioctl(struct fb_info *info, unsigned int cmd,
45155 return -EFAULT;
45156 if (con2fb.console < 1 || con2fb.console > MAX_NR_CONSOLES)
45157 return -EINVAL;
45158- if (con2fb.framebuffer < 0 || con2fb.framebuffer >= FB_MAX)
45159+ if (con2fb.framebuffer >= FB_MAX)
45160 return -EINVAL;
45161 if (!registered_fb[con2fb.framebuffer])
45162 request_module("fb%d", con2fb.framebuffer);
45163diff --git a/drivers/video/i810/i810_accel.c b/drivers/video/i810/i810_accel.c
45164index 7672d2e..b56437f 100644
45165--- a/drivers/video/i810/i810_accel.c
45166+++ b/drivers/video/i810/i810_accel.c
45167@@ -73,6 +73,7 @@ static inline int wait_for_space(struct fb_info *info, u32 space)
45168 }
45169 }
45170 printk("ringbuffer lockup!!!\n");
45171+ printk("head:%u tail:%u iring.size:%u space:%u\n", head, tail, par->iring.size, space);
45172 i810_report_error(mmio);
45173 par->dev_flags |= LOCKUP;
45174 info->pixmap.scan_align = 1;
45175diff --git a/drivers/video/logo/logo_linux_clut224.ppm b/drivers/video/logo/logo_linux_clut224.ppm
45176index 3c14e43..eafa544 100644
45177--- a/drivers/video/logo/logo_linux_clut224.ppm
45178+++ b/drivers/video/logo/logo_linux_clut224.ppm
45179@@ -1,1604 +1,1123 @@
45180 P3
45181-# Standard 224-color Linux logo
45182 80 80
45183 255
45184- 0 0 0 0 0 0 0 0 0 0 0 0
45185- 0 0 0 0 0 0 0 0 0 0 0 0
45186- 0 0 0 0 0 0 0 0 0 0 0 0
45187- 0 0 0 0 0 0 0 0 0 0 0 0
45188- 0 0 0 0 0 0 0 0 0 0 0 0
45189- 0 0 0 0 0 0 0 0 0 0 0 0
45190- 0 0 0 0 0 0 0 0 0 0 0 0
45191- 0 0 0 0 0 0 0 0 0 0 0 0
45192- 0 0 0 0 0 0 0 0 0 0 0 0
45193- 6 6 6 6 6 6 10 10 10 10 10 10
45194- 10 10 10 6 6 6 6 6 6 6 6 6
45195- 0 0 0 0 0 0 0 0 0 0 0 0
45196- 0 0 0 0 0 0 0 0 0 0 0 0
45197- 0 0 0 0 0 0 0 0 0 0 0 0
45198- 0 0 0 0 0 0 0 0 0 0 0 0
45199- 0 0 0 0 0 0 0 0 0 0 0 0
45200- 0 0 0 0 0 0 0 0 0 0 0 0
45201- 0 0 0 0 0 0 0 0 0 0 0 0
45202- 0 0 0 0 0 0 0 0 0 0 0 0
45203- 0 0 0 0 0 0 0 0 0 0 0 0
45204- 0 0 0 0 0 0 0 0 0 0 0 0
45205- 0 0 0 0 0 0 0 0 0 0 0 0
45206- 0 0 0 0 0 0 0 0 0 0 0 0
45207- 0 0 0 0 0 0 0 0 0 0 0 0
45208- 0 0 0 0 0 0 0 0 0 0 0 0
45209- 0 0 0 0 0 0 0 0 0 0 0 0
45210- 0 0 0 0 0 0 0 0 0 0 0 0
45211- 0 0 0 0 0 0 0 0 0 0 0 0
45212- 0 0 0 6 6 6 10 10 10 14 14 14
45213- 22 22 22 26 26 26 30 30 30 34 34 34
45214- 30 30 30 30 30 30 26 26 26 18 18 18
45215- 14 14 14 10 10 10 6 6 6 0 0 0
45216- 0 0 0 0 0 0 0 0 0 0 0 0
45217- 0 0 0 0 0 0 0 0 0 0 0 0
45218- 0 0 0 0 0 0 0 0 0 0 0 0
45219- 0 0 0 0 0 0 0 0 0 0 0 0
45220- 0 0 0 0 0 0 0 0 0 0 0 0
45221- 0 0 0 0 0 0 0 0 0 0 0 0
45222- 0 0 0 0 0 0 0 0 0 0 0 0
45223- 0 0 0 0 0 0 0 0 0 0 0 0
45224- 0 0 0 0 0 0 0 0 0 0 0 0
45225- 0 0 0 0 0 1 0 0 1 0 0 0
45226- 0 0 0 0 0 0 0 0 0 0 0 0
45227- 0 0 0 0 0 0 0 0 0 0 0 0
45228- 0 0 0 0 0 0 0 0 0 0 0 0
45229- 0 0 0 0 0 0 0 0 0 0 0 0
45230- 0 0 0 0 0 0 0 0 0 0 0 0
45231- 0 0 0 0 0 0 0 0 0 0 0 0
45232- 6 6 6 14 14 14 26 26 26 42 42 42
45233- 54 54 54 66 66 66 78 78 78 78 78 78
45234- 78 78 78 74 74 74 66 66 66 54 54 54
45235- 42 42 42 26 26 26 18 18 18 10 10 10
45236- 6 6 6 0 0 0 0 0 0 0 0 0
45237- 0 0 0 0 0 0 0 0 0 0 0 0
45238- 0 0 0 0 0 0 0 0 0 0 0 0
45239- 0 0 0 0 0 0 0 0 0 0 0 0
45240- 0 0 0 0 0 0 0 0 0 0 0 0
45241- 0 0 0 0 0 0 0 0 0 0 0 0
45242- 0 0 0 0 0 0 0 0 0 0 0 0
45243- 0 0 0 0 0 0 0 0 0 0 0 0
45244- 0 0 0 0 0 0 0 0 0 0 0 0
45245- 0 0 1 0 0 0 0 0 0 0 0 0
45246- 0 0 0 0 0 0 0 0 0 0 0 0
45247- 0 0 0 0 0 0 0 0 0 0 0 0
45248- 0 0 0 0 0 0 0 0 0 0 0 0
45249- 0 0 0 0 0 0 0 0 0 0 0 0
45250- 0 0 0 0 0 0 0 0 0 0 0 0
45251- 0 0 0 0 0 0 0 0 0 10 10 10
45252- 22 22 22 42 42 42 66 66 66 86 86 86
45253- 66 66 66 38 38 38 38 38 38 22 22 22
45254- 26 26 26 34 34 34 54 54 54 66 66 66
45255- 86 86 86 70 70 70 46 46 46 26 26 26
45256- 14 14 14 6 6 6 0 0 0 0 0 0
45257- 0 0 0 0 0 0 0 0 0 0 0 0
45258- 0 0 0 0 0 0 0 0 0 0 0 0
45259- 0 0 0 0 0 0 0 0 0 0 0 0
45260- 0 0 0 0 0 0 0 0 0 0 0 0
45261- 0 0 0 0 0 0 0 0 0 0 0 0
45262- 0 0 0 0 0 0 0 0 0 0 0 0
45263- 0 0 0 0 0 0 0 0 0 0 0 0
45264- 0 0 0 0 0 0 0 0 0 0 0 0
45265- 0 0 1 0 0 1 0 0 1 0 0 0
45266- 0 0 0 0 0 0 0 0 0 0 0 0
45267- 0 0 0 0 0 0 0 0 0 0 0 0
45268- 0 0 0 0 0 0 0 0 0 0 0 0
45269- 0 0 0 0 0 0 0 0 0 0 0 0
45270- 0 0 0 0 0 0 0 0 0 0 0 0
45271- 0 0 0 0 0 0 10 10 10 26 26 26
45272- 50 50 50 82 82 82 58 58 58 6 6 6
45273- 2 2 6 2 2 6 2 2 6 2 2 6
45274- 2 2 6 2 2 6 2 2 6 2 2 6
45275- 6 6 6 54 54 54 86 86 86 66 66 66
45276- 38 38 38 18 18 18 6 6 6 0 0 0
45277- 0 0 0 0 0 0 0 0 0 0 0 0
45278- 0 0 0 0 0 0 0 0 0 0 0 0
45279- 0 0 0 0 0 0 0 0 0 0 0 0
45280- 0 0 0 0 0 0 0 0 0 0 0 0
45281- 0 0 0 0 0 0 0 0 0 0 0 0
45282- 0 0 0 0 0 0 0 0 0 0 0 0
45283- 0 0 0 0 0 0 0 0 0 0 0 0
45284- 0 0 0 0 0 0 0 0 0 0 0 0
45285- 0 0 0 0 0 0 0 0 0 0 0 0
45286- 0 0 0 0 0 0 0 0 0 0 0 0
45287- 0 0 0 0 0 0 0 0 0 0 0 0
45288- 0 0 0 0 0 0 0 0 0 0 0 0
45289- 0 0 0 0 0 0 0 0 0 0 0 0
45290- 0 0 0 0 0 0 0 0 0 0 0 0
45291- 0 0 0 6 6 6 22 22 22 50 50 50
45292- 78 78 78 34 34 34 2 2 6 2 2 6
45293- 2 2 6 2 2 6 2 2 6 2 2 6
45294- 2 2 6 2 2 6 2 2 6 2 2 6
45295- 2 2 6 2 2 6 6 6 6 70 70 70
45296- 78 78 78 46 46 46 22 22 22 6 6 6
45297- 0 0 0 0 0 0 0 0 0 0 0 0
45298- 0 0 0 0 0 0 0 0 0 0 0 0
45299- 0 0 0 0 0 0 0 0 0 0 0 0
45300- 0 0 0 0 0 0 0 0 0 0 0 0
45301- 0 0 0 0 0 0 0 0 0 0 0 0
45302- 0 0 0 0 0 0 0 0 0 0 0 0
45303- 0 0 0 0 0 0 0 0 0 0 0 0
45304- 0 0 0 0 0 0 0 0 0 0 0 0
45305- 0 0 1 0 0 1 0 0 1 0 0 0
45306- 0 0 0 0 0 0 0 0 0 0 0 0
45307- 0 0 0 0 0 0 0 0 0 0 0 0
45308- 0 0 0 0 0 0 0 0 0 0 0 0
45309- 0 0 0 0 0 0 0 0 0 0 0 0
45310- 0 0 0 0 0 0 0 0 0 0 0 0
45311- 6 6 6 18 18 18 42 42 42 82 82 82
45312- 26 26 26 2 2 6 2 2 6 2 2 6
45313- 2 2 6 2 2 6 2 2 6 2 2 6
45314- 2 2 6 2 2 6 2 2 6 14 14 14
45315- 46 46 46 34 34 34 6 6 6 2 2 6
45316- 42 42 42 78 78 78 42 42 42 18 18 18
45317- 6 6 6 0 0 0 0 0 0 0 0 0
45318- 0 0 0 0 0 0 0 0 0 0 0 0
45319- 0 0 0 0 0 0 0 0 0 0 0 0
45320- 0 0 0 0 0 0 0 0 0 0 0 0
45321- 0 0 0 0 0 0 0 0 0 0 0 0
45322- 0 0 0 0 0 0 0 0 0 0 0 0
45323- 0 0 0 0 0 0 0 0 0 0 0 0
45324- 0 0 0 0 0 0 0 0 0 0 0 0
45325- 0 0 1 0 0 0 0 0 1 0 0 0
45326- 0 0 0 0 0 0 0 0 0 0 0 0
45327- 0 0 0 0 0 0 0 0 0 0 0 0
45328- 0 0 0 0 0 0 0 0 0 0 0 0
45329- 0 0 0 0 0 0 0 0 0 0 0 0
45330- 0 0 0 0 0 0 0 0 0 0 0 0
45331- 10 10 10 30 30 30 66 66 66 58 58 58
45332- 2 2 6 2 2 6 2 2 6 2 2 6
45333- 2 2 6 2 2 6 2 2 6 2 2 6
45334- 2 2 6 2 2 6 2 2 6 26 26 26
45335- 86 86 86 101 101 101 46 46 46 10 10 10
45336- 2 2 6 58 58 58 70 70 70 34 34 34
45337- 10 10 10 0 0 0 0 0 0 0 0 0
45338- 0 0 0 0 0 0 0 0 0 0 0 0
45339- 0 0 0 0 0 0 0 0 0 0 0 0
45340- 0 0 0 0 0 0 0 0 0 0 0 0
45341- 0 0 0 0 0 0 0 0 0 0 0 0
45342- 0 0 0 0 0 0 0 0 0 0 0 0
45343- 0 0 0 0 0 0 0 0 0 0 0 0
45344- 0 0 0 0 0 0 0 0 0 0 0 0
45345- 0 0 1 0 0 1 0 0 1 0 0 0
45346- 0 0 0 0 0 0 0 0 0 0 0 0
45347- 0 0 0 0 0 0 0 0 0 0 0 0
45348- 0 0 0 0 0 0 0 0 0 0 0 0
45349- 0 0 0 0 0 0 0 0 0 0 0 0
45350- 0 0 0 0 0 0 0 0 0 0 0 0
45351- 14 14 14 42 42 42 86 86 86 10 10 10
45352- 2 2 6 2 2 6 2 2 6 2 2 6
45353- 2 2 6 2 2 6 2 2 6 2 2 6
45354- 2 2 6 2 2 6 2 2 6 30 30 30
45355- 94 94 94 94 94 94 58 58 58 26 26 26
45356- 2 2 6 6 6 6 78 78 78 54 54 54
45357- 22 22 22 6 6 6 0 0 0 0 0 0
45358- 0 0 0 0 0 0 0 0 0 0 0 0
45359- 0 0 0 0 0 0 0 0 0 0 0 0
45360- 0 0 0 0 0 0 0 0 0 0 0 0
45361- 0 0 0 0 0 0 0 0 0 0 0 0
45362- 0 0 0 0 0 0 0 0 0 0 0 0
45363- 0 0 0 0 0 0 0 0 0 0 0 0
45364- 0 0 0 0 0 0 0 0 0 0 0 0
45365- 0 0 0 0 0 0 0 0 0 0 0 0
45366- 0 0 0 0 0 0 0 0 0 0 0 0
45367- 0 0 0 0 0 0 0 0 0 0 0 0
45368- 0 0 0 0 0 0 0 0 0 0 0 0
45369- 0 0 0 0 0 0 0 0 0 0 0 0
45370- 0 0 0 0 0 0 0 0 0 6 6 6
45371- 22 22 22 62 62 62 62 62 62 2 2 6
45372- 2 2 6 2 2 6 2 2 6 2 2 6
45373- 2 2 6 2 2 6 2 2 6 2 2 6
45374- 2 2 6 2 2 6 2 2 6 26 26 26
45375- 54 54 54 38 38 38 18 18 18 10 10 10
45376- 2 2 6 2 2 6 34 34 34 82 82 82
45377- 38 38 38 14 14 14 0 0 0 0 0 0
45378- 0 0 0 0 0 0 0 0 0 0 0 0
45379- 0 0 0 0 0 0 0 0 0 0 0 0
45380- 0 0 0 0 0 0 0 0 0 0 0 0
45381- 0 0 0 0 0 0 0 0 0 0 0 0
45382- 0 0 0 0 0 0 0 0 0 0 0 0
45383- 0 0 0 0 0 0 0 0 0 0 0 0
45384- 0 0 0 0 0 0 0 0 0 0 0 0
45385- 0 0 0 0 0 1 0 0 1 0 0 0
45386- 0 0 0 0 0 0 0 0 0 0 0 0
45387- 0 0 0 0 0 0 0 0 0 0 0 0
45388- 0 0 0 0 0 0 0 0 0 0 0 0
45389- 0 0 0 0 0 0 0 0 0 0 0 0
45390- 0 0 0 0 0 0 0 0 0 6 6 6
45391- 30 30 30 78 78 78 30 30 30 2 2 6
45392- 2 2 6 2 2 6 2 2 6 2 2 6
45393- 2 2 6 2 2 6 2 2 6 2 2 6
45394- 2 2 6 2 2 6 2 2 6 10 10 10
45395- 10 10 10 2 2 6 2 2 6 2 2 6
45396- 2 2 6 2 2 6 2 2 6 78 78 78
45397- 50 50 50 18 18 18 6 6 6 0 0 0
45398- 0 0 0 0 0 0 0 0 0 0 0 0
45399- 0 0 0 0 0 0 0 0 0 0 0 0
45400- 0 0 0 0 0 0 0 0 0 0 0 0
45401- 0 0 0 0 0 0 0 0 0 0 0 0
45402- 0 0 0 0 0 0 0 0 0 0 0 0
45403- 0 0 0 0 0 0 0 0 0 0 0 0
45404- 0 0 0 0 0 0 0 0 0 0 0 0
45405- 0 0 1 0 0 0 0 0 0 0 0 0
45406- 0 0 0 0 0 0 0 0 0 0 0 0
45407- 0 0 0 0 0 0 0 0 0 0 0 0
45408- 0 0 0 0 0 0 0 0 0 0 0 0
45409- 0 0 0 0 0 0 0 0 0 0 0 0
45410- 0 0 0 0 0 0 0 0 0 10 10 10
45411- 38 38 38 86 86 86 14 14 14 2 2 6
45412- 2 2 6 2 2 6 2 2 6 2 2 6
45413- 2 2 6 2 2 6 2 2 6 2 2 6
45414- 2 2 6 2 2 6 2 2 6 2 2 6
45415- 2 2 6 2 2 6 2 2 6 2 2 6
45416- 2 2 6 2 2 6 2 2 6 54 54 54
45417- 66 66 66 26 26 26 6 6 6 0 0 0
45418- 0 0 0 0 0 0 0 0 0 0 0 0
45419- 0 0 0 0 0 0 0 0 0 0 0 0
45420- 0 0 0 0 0 0 0 0 0 0 0 0
45421- 0 0 0 0 0 0 0 0 0 0 0 0
45422- 0 0 0 0 0 0 0 0 0 0 0 0
45423- 0 0 0 0 0 0 0 0 0 0 0 0
45424- 0 0 0 0 0 0 0 0 0 0 0 0
45425- 0 0 0 0 0 1 0 0 1 0 0 0
45426- 0 0 0 0 0 0 0 0 0 0 0 0
45427- 0 0 0 0 0 0 0 0 0 0 0 0
45428- 0 0 0 0 0 0 0 0 0 0 0 0
45429- 0 0 0 0 0 0 0 0 0 0 0 0
45430- 0 0 0 0 0 0 0 0 0 14 14 14
45431- 42 42 42 82 82 82 2 2 6 2 2 6
45432- 2 2 6 6 6 6 10 10 10 2 2 6
45433- 2 2 6 2 2 6 2 2 6 2 2 6
45434- 2 2 6 2 2 6 2 2 6 6 6 6
45435- 14 14 14 10 10 10 2 2 6 2 2 6
45436- 2 2 6 2 2 6 2 2 6 18 18 18
45437- 82 82 82 34 34 34 10 10 10 0 0 0
45438- 0 0 0 0 0 0 0 0 0 0 0 0
45439- 0 0 0 0 0 0 0 0 0 0 0 0
45440- 0 0 0 0 0 0 0 0 0 0 0 0
45441- 0 0 0 0 0 0 0 0 0 0 0 0
45442- 0 0 0 0 0 0 0 0 0 0 0 0
45443- 0 0 0 0 0 0 0 0 0 0 0 0
45444- 0 0 0 0 0 0 0 0 0 0 0 0
45445- 0 0 1 0 0 0 0 0 0 0 0 0
45446- 0 0 0 0 0 0 0 0 0 0 0 0
45447- 0 0 0 0 0 0 0 0 0 0 0 0
45448- 0 0 0 0 0 0 0 0 0 0 0 0
45449- 0 0 0 0 0 0 0 0 0 0 0 0
45450- 0 0 0 0 0 0 0 0 0 14 14 14
45451- 46 46 46 86 86 86 2 2 6 2 2 6
45452- 6 6 6 6 6 6 22 22 22 34 34 34
45453- 6 6 6 2 2 6 2 2 6 2 2 6
45454- 2 2 6 2 2 6 18 18 18 34 34 34
45455- 10 10 10 50 50 50 22 22 22 2 2 6
45456- 2 2 6 2 2 6 2 2 6 10 10 10
45457- 86 86 86 42 42 42 14 14 14 0 0 0
45458- 0 0 0 0 0 0 0 0 0 0 0 0
45459- 0 0 0 0 0 0 0 0 0 0 0 0
45460- 0 0 0 0 0 0 0 0 0 0 0 0
45461- 0 0 0 0 0 0 0 0 0 0 0 0
45462- 0 0 0 0 0 0 0 0 0 0 0 0
45463- 0 0 0 0 0 0 0 0 0 0 0 0
45464- 0 0 0 0 0 0 0 0 0 0 0 0
45465- 0 0 1 0 0 1 0 0 1 0 0 0
45466- 0 0 0 0 0 0 0 0 0 0 0 0
45467- 0 0 0 0 0 0 0 0 0 0 0 0
45468- 0 0 0 0 0 0 0 0 0 0 0 0
45469- 0 0 0 0 0 0 0 0 0 0 0 0
45470- 0 0 0 0 0 0 0 0 0 14 14 14
45471- 46 46 46 86 86 86 2 2 6 2 2 6
45472- 38 38 38 116 116 116 94 94 94 22 22 22
45473- 22 22 22 2 2 6 2 2 6 2 2 6
45474- 14 14 14 86 86 86 138 138 138 162 162 162
45475-154 154 154 38 38 38 26 26 26 6 6 6
45476- 2 2 6 2 2 6 2 2 6 2 2 6
45477- 86 86 86 46 46 46 14 14 14 0 0 0
45478- 0 0 0 0 0 0 0 0 0 0 0 0
45479- 0 0 0 0 0 0 0 0 0 0 0 0
45480- 0 0 0 0 0 0 0 0 0 0 0 0
45481- 0 0 0 0 0 0 0 0 0 0 0 0
45482- 0 0 0 0 0 0 0 0 0 0 0 0
45483- 0 0 0 0 0 0 0 0 0 0 0 0
45484- 0 0 0 0 0 0 0 0 0 0 0 0
45485- 0 0 0 0 0 0 0 0 0 0 0 0
45486- 0 0 0 0 0 0 0 0 0 0 0 0
45487- 0 0 0 0 0 0 0 0 0 0 0 0
45488- 0 0 0 0 0 0 0 0 0 0 0 0
45489- 0 0 0 0 0 0 0 0 0 0 0 0
45490- 0 0 0 0 0 0 0 0 0 14 14 14
45491- 46 46 46 86 86 86 2 2 6 14 14 14
45492-134 134 134 198 198 198 195 195 195 116 116 116
45493- 10 10 10 2 2 6 2 2 6 6 6 6
45494-101 98 89 187 187 187 210 210 210 218 218 218
45495-214 214 214 134 134 134 14 14 14 6 6 6
45496- 2 2 6 2 2 6 2 2 6 2 2 6
45497- 86 86 86 50 50 50 18 18 18 6 6 6
45498- 0 0 0 0 0 0 0 0 0 0 0 0
45499- 0 0 0 0 0 0 0 0 0 0 0 0
45500- 0 0 0 0 0 0 0 0 0 0 0 0
45501- 0 0 0 0 0 0 0 0 0 0 0 0
45502- 0 0 0 0 0 0 0 0 0 0 0 0
45503- 0 0 0 0 0 0 0 0 0 0 0 0
45504- 0 0 0 0 0 0 0 0 1 0 0 0
45505- 0 0 1 0 0 1 0 0 1 0 0 0
45506- 0 0 0 0 0 0 0 0 0 0 0 0
45507- 0 0 0 0 0 0 0 0 0 0 0 0
45508- 0 0 0 0 0 0 0 0 0 0 0 0
45509- 0 0 0 0 0 0 0 0 0 0 0 0
45510- 0 0 0 0 0 0 0 0 0 14 14 14
45511- 46 46 46 86 86 86 2 2 6 54 54 54
45512-218 218 218 195 195 195 226 226 226 246 246 246
45513- 58 58 58 2 2 6 2 2 6 30 30 30
45514-210 210 210 253 253 253 174 174 174 123 123 123
45515-221 221 221 234 234 234 74 74 74 2 2 6
45516- 2 2 6 2 2 6 2 2 6 2 2 6
45517- 70 70 70 58 58 58 22 22 22 6 6 6
45518- 0 0 0 0 0 0 0 0 0 0 0 0
45519- 0 0 0 0 0 0 0 0 0 0 0 0
45520- 0 0 0 0 0 0 0 0 0 0 0 0
45521- 0 0 0 0 0 0 0 0 0 0 0 0
45522- 0 0 0 0 0 0 0 0 0 0 0 0
45523- 0 0 0 0 0 0 0 0 0 0 0 0
45524- 0 0 0 0 0 0 0 0 0 0 0 0
45525- 0 0 0 0 0 0 0 0 0 0 0 0
45526- 0 0 0 0 0 0 0 0 0 0 0 0
45527- 0 0 0 0 0 0 0 0 0 0 0 0
45528- 0 0 0 0 0 0 0 0 0 0 0 0
45529- 0 0 0 0 0 0 0 0 0 0 0 0
45530- 0 0 0 0 0 0 0 0 0 14 14 14
45531- 46 46 46 82 82 82 2 2 6 106 106 106
45532-170 170 170 26 26 26 86 86 86 226 226 226
45533-123 123 123 10 10 10 14 14 14 46 46 46
45534-231 231 231 190 190 190 6 6 6 70 70 70
45535- 90 90 90 238 238 238 158 158 158 2 2 6
45536- 2 2 6 2 2 6 2 2 6 2 2 6
45537- 70 70 70 58 58 58 22 22 22 6 6 6
45538- 0 0 0 0 0 0 0 0 0 0 0 0
45539- 0 0 0 0 0 0 0 0 0 0 0 0
45540- 0 0 0 0 0 0 0 0 0 0 0 0
45541- 0 0 0 0 0 0 0 0 0 0 0 0
45542- 0 0 0 0 0 0 0 0 0 0 0 0
45543- 0 0 0 0 0 0 0 0 0 0 0 0
45544- 0 0 0 0 0 0 0 0 1 0 0 0
45545- 0 0 1 0 0 1 0 0 1 0 0 0
45546- 0 0 0 0 0 0 0 0 0 0 0 0
45547- 0 0 0 0 0 0 0 0 0 0 0 0
45548- 0 0 0 0 0 0 0 0 0 0 0 0
45549- 0 0 0 0 0 0 0 0 0 0 0 0
45550- 0 0 0 0 0 0 0 0 0 14 14 14
45551- 42 42 42 86 86 86 6 6 6 116 116 116
45552-106 106 106 6 6 6 70 70 70 149 149 149
45553-128 128 128 18 18 18 38 38 38 54 54 54
45554-221 221 221 106 106 106 2 2 6 14 14 14
45555- 46 46 46 190 190 190 198 198 198 2 2 6
45556- 2 2 6 2 2 6 2 2 6 2 2 6
45557- 74 74 74 62 62 62 22 22 22 6 6 6
45558- 0 0 0 0 0 0 0 0 0 0 0 0
45559- 0 0 0 0 0 0 0 0 0 0 0 0
45560- 0 0 0 0 0 0 0 0 0 0 0 0
45561- 0 0 0 0 0 0 0 0 0 0 0 0
45562- 0 0 0 0 0 0 0 0 0 0 0 0
45563- 0 0 0 0 0 0 0 0 0 0 0 0
45564- 0 0 0 0 0 0 0 0 1 0 0 0
45565- 0 0 1 0 0 0 0 0 1 0 0 0
45566- 0 0 0 0 0 0 0 0 0 0 0 0
45567- 0 0 0 0 0 0 0 0 0 0 0 0
45568- 0 0 0 0 0 0 0 0 0 0 0 0
45569- 0 0 0 0 0 0 0 0 0 0 0 0
45570- 0 0 0 0 0 0 0 0 0 14 14 14
45571- 42 42 42 94 94 94 14 14 14 101 101 101
45572-128 128 128 2 2 6 18 18 18 116 116 116
45573-118 98 46 121 92 8 121 92 8 98 78 10
45574-162 162 162 106 106 106 2 2 6 2 2 6
45575- 2 2 6 195 195 195 195 195 195 6 6 6
45576- 2 2 6 2 2 6 2 2 6 2 2 6
45577- 74 74 74 62 62 62 22 22 22 6 6 6
45578- 0 0 0 0 0 0 0 0 0 0 0 0
45579- 0 0 0 0 0 0 0 0 0 0 0 0
45580- 0 0 0 0 0 0 0 0 0 0 0 0
45581- 0 0 0 0 0 0 0 0 0 0 0 0
45582- 0 0 0 0 0 0 0 0 0 0 0 0
45583- 0 0 0 0 0 0 0 0 0 0 0 0
45584- 0 0 0 0 0 0 0 0 1 0 0 1
45585- 0 0 1 0 0 0 0 0 1 0 0 0
45586- 0 0 0 0 0 0 0 0 0 0 0 0
45587- 0 0 0 0 0 0 0 0 0 0 0 0
45588- 0 0 0 0 0 0 0 0 0 0 0 0
45589- 0 0 0 0 0 0 0 0 0 0 0 0
45590- 0 0 0 0 0 0 0 0 0 10 10 10
45591- 38 38 38 90 90 90 14 14 14 58 58 58
45592-210 210 210 26 26 26 54 38 6 154 114 10
45593-226 170 11 236 186 11 225 175 15 184 144 12
45594-215 174 15 175 146 61 37 26 9 2 2 6
45595- 70 70 70 246 246 246 138 138 138 2 2 6
45596- 2 2 6 2 2 6 2 2 6 2 2 6
45597- 70 70 70 66 66 66 26 26 26 6 6 6
45598- 0 0 0 0 0 0 0 0 0 0 0 0
45599- 0 0 0 0 0 0 0 0 0 0 0 0
45600- 0 0 0 0 0 0 0 0 0 0 0 0
45601- 0 0 0 0 0 0 0 0 0 0 0 0
45602- 0 0 0 0 0 0 0 0 0 0 0 0
45603- 0 0 0 0 0 0 0 0 0 0 0 0
45604- 0 0 0 0 0 0 0 0 0 0 0 0
45605- 0 0 0 0 0 0 0 0 0 0 0 0
45606- 0 0 0 0 0 0 0 0 0 0 0 0
45607- 0 0 0 0 0 0 0 0 0 0 0 0
45608- 0 0 0 0 0 0 0 0 0 0 0 0
45609- 0 0 0 0 0 0 0 0 0 0 0 0
45610- 0 0 0 0 0 0 0 0 0 10 10 10
45611- 38 38 38 86 86 86 14 14 14 10 10 10
45612-195 195 195 188 164 115 192 133 9 225 175 15
45613-239 182 13 234 190 10 232 195 16 232 200 30
45614-245 207 45 241 208 19 232 195 16 184 144 12
45615-218 194 134 211 206 186 42 42 42 2 2 6
45616- 2 2 6 2 2 6 2 2 6 2 2 6
45617- 50 50 50 74 74 74 30 30 30 6 6 6
45618- 0 0 0 0 0 0 0 0 0 0 0 0
45619- 0 0 0 0 0 0 0 0 0 0 0 0
45620- 0 0 0 0 0 0 0 0 0 0 0 0
45621- 0 0 0 0 0 0 0 0 0 0 0 0
45622- 0 0 0 0 0 0 0 0 0 0 0 0
45623- 0 0 0 0 0 0 0 0 0 0 0 0
45624- 0 0 0 0 0 0 0 0 0 0 0 0
45625- 0 0 0 0 0 0 0 0 0 0 0 0
45626- 0 0 0 0 0 0 0 0 0 0 0 0
45627- 0 0 0 0 0 0 0 0 0 0 0 0
45628- 0 0 0 0 0 0 0 0 0 0 0 0
45629- 0 0 0 0 0 0 0 0 0 0 0 0
45630- 0 0 0 0 0 0 0 0 0 10 10 10
45631- 34 34 34 86 86 86 14 14 14 2 2 6
45632-121 87 25 192 133 9 219 162 10 239 182 13
45633-236 186 11 232 195 16 241 208 19 244 214 54
45634-246 218 60 246 218 38 246 215 20 241 208 19
45635-241 208 19 226 184 13 121 87 25 2 2 6
45636- 2 2 6 2 2 6 2 2 6 2 2 6
45637- 50 50 50 82 82 82 34 34 34 10 10 10
45638- 0 0 0 0 0 0 0 0 0 0 0 0
45639- 0 0 0 0 0 0 0 0 0 0 0 0
45640- 0 0 0 0 0 0 0 0 0 0 0 0
45641- 0 0 0 0 0 0 0 0 0 0 0 0
45642- 0 0 0 0 0 0 0 0 0 0 0 0
45643- 0 0 0 0 0 0 0 0 0 0 0 0
45644- 0 0 0 0 0 0 0 0 0 0 0 0
45645- 0 0 0 0 0 0 0 0 0 0 0 0
45646- 0 0 0 0 0 0 0 0 0 0 0 0
45647- 0 0 0 0 0 0 0 0 0 0 0 0
45648- 0 0 0 0 0 0 0 0 0 0 0 0
45649- 0 0 0 0 0 0 0 0 0 0 0 0
45650- 0 0 0 0 0 0 0 0 0 10 10 10
45651- 34 34 34 82 82 82 30 30 30 61 42 6
45652-180 123 7 206 145 10 230 174 11 239 182 13
45653-234 190 10 238 202 15 241 208 19 246 218 74
45654-246 218 38 246 215 20 246 215 20 246 215 20
45655-226 184 13 215 174 15 184 144 12 6 6 6
45656- 2 2 6 2 2 6 2 2 6 2 2 6
45657- 26 26 26 94 94 94 42 42 42 14 14 14
45658- 0 0 0 0 0 0 0 0 0 0 0 0
45659- 0 0 0 0 0 0 0 0 0 0 0 0
45660- 0 0 0 0 0 0 0 0 0 0 0 0
45661- 0 0 0 0 0 0 0 0 0 0 0 0
45662- 0 0 0 0 0 0 0 0 0 0 0 0
45663- 0 0 0 0 0 0 0 0 0 0 0 0
45664- 0 0 0 0 0 0 0 0 0 0 0 0
45665- 0 0 0 0 0 0 0 0 0 0 0 0
45666- 0 0 0 0 0 0 0 0 0 0 0 0
45667- 0 0 0 0 0 0 0 0 0 0 0 0
45668- 0 0 0 0 0 0 0 0 0 0 0 0
45669- 0 0 0 0 0 0 0 0 0 0 0 0
45670- 0 0 0 0 0 0 0 0 0 10 10 10
45671- 30 30 30 78 78 78 50 50 50 104 69 6
45672-192 133 9 216 158 10 236 178 12 236 186 11
45673-232 195 16 241 208 19 244 214 54 245 215 43
45674-246 215 20 246 215 20 241 208 19 198 155 10
45675-200 144 11 216 158 10 156 118 10 2 2 6
45676- 2 2 6 2 2 6 2 2 6 2 2 6
45677- 6 6 6 90 90 90 54 54 54 18 18 18
45678- 6 6 6 0 0 0 0 0 0 0 0 0
45679- 0 0 0 0 0 0 0 0 0 0 0 0
45680- 0 0 0 0 0 0 0 0 0 0 0 0
45681- 0 0 0 0 0 0 0 0 0 0 0 0
45682- 0 0 0 0 0 0 0 0 0 0 0 0
45683- 0 0 0 0 0 0 0 0 0 0 0 0
45684- 0 0 0 0 0 0 0 0 0 0 0 0
45685- 0 0 0 0 0 0 0 0 0 0 0 0
45686- 0 0 0 0 0 0 0 0 0 0 0 0
45687- 0 0 0 0 0 0 0 0 0 0 0 0
45688- 0 0 0 0 0 0 0 0 0 0 0 0
45689- 0 0 0 0 0 0 0 0 0 0 0 0
45690- 0 0 0 0 0 0 0 0 0 10 10 10
45691- 30 30 30 78 78 78 46 46 46 22 22 22
45692-137 92 6 210 162 10 239 182 13 238 190 10
45693-238 202 15 241 208 19 246 215 20 246 215 20
45694-241 208 19 203 166 17 185 133 11 210 150 10
45695-216 158 10 210 150 10 102 78 10 2 2 6
45696- 6 6 6 54 54 54 14 14 14 2 2 6
45697- 2 2 6 62 62 62 74 74 74 30 30 30
45698- 10 10 10 0 0 0 0 0 0 0 0 0
45699- 0 0 0 0 0 0 0 0 0 0 0 0
45700- 0 0 0 0 0 0 0 0 0 0 0 0
45701- 0 0 0 0 0 0 0 0 0 0 0 0
45702- 0 0 0 0 0 0 0 0 0 0 0 0
45703- 0 0 0 0 0 0 0 0 0 0 0 0
45704- 0 0 0 0 0 0 0 0 0 0 0 0
45705- 0 0 0 0 0 0 0 0 0 0 0 0
45706- 0 0 0 0 0 0 0 0 0 0 0 0
45707- 0 0 0 0 0 0 0 0 0 0 0 0
45708- 0 0 0 0 0 0 0 0 0 0 0 0
45709- 0 0 0 0 0 0 0 0 0 0 0 0
45710- 0 0 0 0 0 0 0 0 0 10 10 10
45711- 34 34 34 78 78 78 50 50 50 6 6 6
45712- 94 70 30 139 102 15 190 146 13 226 184 13
45713-232 200 30 232 195 16 215 174 15 190 146 13
45714-168 122 10 192 133 9 210 150 10 213 154 11
45715-202 150 34 182 157 106 101 98 89 2 2 6
45716- 2 2 6 78 78 78 116 116 116 58 58 58
45717- 2 2 6 22 22 22 90 90 90 46 46 46
45718- 18 18 18 6 6 6 0 0 0 0 0 0
45719- 0 0 0 0 0 0 0 0 0 0 0 0
45720- 0 0 0 0 0 0 0 0 0 0 0 0
45721- 0 0 0 0 0 0 0 0 0 0 0 0
45722- 0 0 0 0 0 0 0 0 0 0 0 0
45723- 0 0 0 0 0 0 0 0 0 0 0 0
45724- 0 0 0 0 0 0 0 0 0 0 0 0
45725- 0 0 0 0 0 0 0 0 0 0 0 0
45726- 0 0 0 0 0 0 0 0 0 0 0 0
45727- 0 0 0 0 0 0 0 0 0 0 0 0
45728- 0 0 0 0 0 0 0 0 0 0 0 0
45729- 0 0 0 0 0 0 0 0 0 0 0 0
45730- 0 0 0 0 0 0 0 0 0 10 10 10
45731- 38 38 38 86 86 86 50 50 50 6 6 6
45732-128 128 128 174 154 114 156 107 11 168 122 10
45733-198 155 10 184 144 12 197 138 11 200 144 11
45734-206 145 10 206 145 10 197 138 11 188 164 115
45735-195 195 195 198 198 198 174 174 174 14 14 14
45736- 2 2 6 22 22 22 116 116 116 116 116 116
45737- 22 22 22 2 2 6 74 74 74 70 70 70
45738- 30 30 30 10 10 10 0 0 0 0 0 0
45739- 0 0 0 0 0 0 0 0 0 0 0 0
45740- 0 0 0 0 0 0 0 0 0 0 0 0
45741- 0 0 0 0 0 0 0 0 0 0 0 0
45742- 0 0 0 0 0 0 0 0 0 0 0 0
45743- 0 0 0 0 0 0 0 0 0 0 0 0
45744- 0 0 0 0 0 0 0 0 0 0 0 0
45745- 0 0 0 0 0 0 0 0 0 0 0 0
45746- 0 0 0 0 0 0 0 0 0 0 0 0
45747- 0 0 0 0 0 0 0 0 0 0 0 0
45748- 0 0 0 0 0 0 0 0 0 0 0 0
45749- 0 0 0 0 0 0 0 0 0 0 0 0
45750- 0 0 0 0 0 0 6 6 6 18 18 18
45751- 50 50 50 101 101 101 26 26 26 10 10 10
45752-138 138 138 190 190 190 174 154 114 156 107 11
45753-197 138 11 200 144 11 197 138 11 192 133 9
45754-180 123 7 190 142 34 190 178 144 187 187 187
45755-202 202 202 221 221 221 214 214 214 66 66 66
45756- 2 2 6 2 2 6 50 50 50 62 62 62
45757- 6 6 6 2 2 6 10 10 10 90 90 90
45758- 50 50 50 18 18 18 6 6 6 0 0 0
45759- 0 0 0 0 0 0 0 0 0 0 0 0
45760- 0 0 0 0 0 0 0 0 0 0 0 0
45761- 0 0 0 0 0 0 0 0 0 0 0 0
45762- 0 0 0 0 0 0 0 0 0 0 0 0
45763- 0 0 0 0 0 0 0 0 0 0 0 0
45764- 0 0 0 0 0 0 0 0 0 0 0 0
45765- 0 0 0 0 0 0 0 0 0 0 0 0
45766- 0 0 0 0 0 0 0 0 0 0 0 0
45767- 0 0 0 0 0 0 0 0 0 0 0 0
45768- 0 0 0 0 0 0 0 0 0 0 0 0
45769- 0 0 0 0 0 0 0 0 0 0 0 0
45770- 0 0 0 0 0 0 10 10 10 34 34 34
45771- 74 74 74 74 74 74 2 2 6 6 6 6
45772-144 144 144 198 198 198 190 190 190 178 166 146
45773-154 121 60 156 107 11 156 107 11 168 124 44
45774-174 154 114 187 187 187 190 190 190 210 210 210
45775-246 246 246 253 253 253 253 253 253 182 182 182
45776- 6 6 6 2 2 6 2 2 6 2 2 6
45777- 2 2 6 2 2 6 2 2 6 62 62 62
45778- 74 74 74 34 34 34 14 14 14 0 0 0
45779- 0 0 0 0 0 0 0 0 0 0 0 0
45780- 0 0 0 0 0 0 0 0 0 0 0 0
45781- 0 0 0 0 0 0 0 0 0 0 0 0
45782- 0 0 0 0 0 0 0 0 0 0 0 0
45783- 0 0 0 0 0 0 0 0 0 0 0 0
45784- 0 0 0 0 0 0 0 0 0 0 0 0
45785- 0 0 0 0 0 0 0 0 0 0 0 0
45786- 0 0 0 0 0 0 0 0 0 0 0 0
45787- 0 0 0 0 0 0 0 0 0 0 0 0
45788- 0 0 0 0 0 0 0 0 0 0 0 0
45789- 0 0 0 0 0 0 0 0 0 0 0 0
45790- 0 0 0 10 10 10 22 22 22 54 54 54
45791- 94 94 94 18 18 18 2 2 6 46 46 46
45792-234 234 234 221 221 221 190 190 190 190 190 190
45793-190 190 190 187 187 187 187 187 187 190 190 190
45794-190 190 190 195 195 195 214 214 214 242 242 242
45795-253 253 253 253 253 253 253 253 253 253 253 253
45796- 82 82 82 2 2 6 2 2 6 2 2 6
45797- 2 2 6 2 2 6 2 2 6 14 14 14
45798- 86 86 86 54 54 54 22 22 22 6 6 6
45799- 0 0 0 0 0 0 0 0 0 0 0 0
45800- 0 0 0 0 0 0 0 0 0 0 0 0
45801- 0 0 0 0 0 0 0 0 0 0 0 0
45802- 0 0 0 0 0 0 0 0 0 0 0 0
45803- 0 0 0 0 0 0 0 0 0 0 0 0
45804- 0 0 0 0 0 0 0 0 0 0 0 0
45805- 0 0 0 0 0 0 0 0 0 0 0 0
45806- 0 0 0 0 0 0 0 0 0 0 0 0
45807- 0 0 0 0 0 0 0 0 0 0 0 0
45808- 0 0 0 0 0 0 0 0 0 0 0 0
45809- 0 0 0 0 0 0 0 0 0 0 0 0
45810- 6 6 6 18 18 18 46 46 46 90 90 90
45811- 46 46 46 18 18 18 6 6 6 182 182 182
45812-253 253 253 246 246 246 206 206 206 190 190 190
45813-190 190 190 190 190 190 190 190 190 190 190 190
45814-206 206 206 231 231 231 250 250 250 253 253 253
45815-253 253 253 253 253 253 253 253 253 253 253 253
45816-202 202 202 14 14 14 2 2 6 2 2 6
45817- 2 2 6 2 2 6 2 2 6 2 2 6
45818- 42 42 42 86 86 86 42 42 42 18 18 18
45819- 6 6 6 0 0 0 0 0 0 0 0 0
45820- 0 0 0 0 0 0 0 0 0 0 0 0
45821- 0 0 0 0 0 0 0 0 0 0 0 0
45822- 0 0 0 0 0 0 0 0 0 0 0 0
45823- 0 0 0 0 0 0 0 0 0 0 0 0
45824- 0 0 0 0 0 0 0 0 0 0 0 0
45825- 0 0 0 0 0 0 0 0 0 0 0 0
45826- 0 0 0 0 0 0 0 0 0 0 0 0
45827- 0 0 0 0 0 0 0 0 0 0 0 0
45828- 0 0 0 0 0 0 0 0 0 0 0 0
45829- 0 0 0 0 0 0 0 0 0 6 6 6
45830- 14 14 14 38 38 38 74 74 74 66 66 66
45831- 2 2 6 6 6 6 90 90 90 250 250 250
45832-253 253 253 253 253 253 238 238 238 198 198 198
45833-190 190 190 190 190 190 195 195 195 221 221 221
45834-246 246 246 253 253 253 253 253 253 253 253 253
45835-253 253 253 253 253 253 253 253 253 253 253 253
45836-253 253 253 82 82 82 2 2 6 2 2 6
45837- 2 2 6 2 2 6 2 2 6 2 2 6
45838- 2 2 6 78 78 78 70 70 70 34 34 34
45839- 14 14 14 6 6 6 0 0 0 0 0 0
45840- 0 0 0 0 0 0 0 0 0 0 0 0
45841- 0 0 0 0 0 0 0 0 0 0 0 0
45842- 0 0 0 0 0 0 0 0 0 0 0 0
45843- 0 0 0 0 0 0 0 0 0 0 0 0
45844- 0 0 0 0 0 0 0 0 0 0 0 0
45845- 0 0 0 0 0 0 0 0 0 0 0 0
45846- 0 0 0 0 0 0 0 0 0 0 0 0
45847- 0 0 0 0 0 0 0 0 0 0 0 0
45848- 0 0 0 0 0 0 0 0 0 0 0 0
45849- 0 0 0 0 0 0 0 0 0 14 14 14
45850- 34 34 34 66 66 66 78 78 78 6 6 6
45851- 2 2 6 18 18 18 218 218 218 253 253 253
45852-253 253 253 253 253 253 253 253 253 246 246 246
45853-226 226 226 231 231 231 246 246 246 253 253 253
45854-253 253 253 253 253 253 253 253 253 253 253 253
45855-253 253 253 253 253 253 253 253 253 253 253 253
45856-253 253 253 178 178 178 2 2 6 2 2 6
45857- 2 2 6 2 2 6 2 2 6 2 2 6
45858- 2 2 6 18 18 18 90 90 90 62 62 62
45859- 30 30 30 10 10 10 0 0 0 0 0 0
45860- 0 0 0 0 0 0 0 0 0 0 0 0
45861- 0 0 0 0 0 0 0 0 0 0 0 0
45862- 0 0 0 0 0 0 0 0 0 0 0 0
45863- 0 0 0 0 0 0 0 0 0 0 0 0
45864- 0 0 0 0 0 0 0 0 0 0 0 0
45865- 0 0 0 0 0 0 0 0 0 0 0 0
45866- 0 0 0 0 0 0 0 0 0 0 0 0
45867- 0 0 0 0 0 0 0 0 0 0 0 0
45868- 0 0 0 0 0 0 0 0 0 0 0 0
45869- 0 0 0 0 0 0 10 10 10 26 26 26
45870- 58 58 58 90 90 90 18 18 18 2 2 6
45871- 2 2 6 110 110 110 253 253 253 253 253 253
45872-253 253 253 253 253 253 253 253 253 253 253 253
45873-250 250 250 253 253 253 253 253 253 253 253 253
45874-253 253 253 253 253 253 253 253 253 253 253 253
45875-253 253 253 253 253 253 253 253 253 253 253 253
45876-253 253 253 231 231 231 18 18 18 2 2 6
45877- 2 2 6 2 2 6 2 2 6 2 2 6
45878- 2 2 6 2 2 6 18 18 18 94 94 94
45879- 54 54 54 26 26 26 10 10 10 0 0 0
45880- 0 0 0 0 0 0 0 0 0 0 0 0
45881- 0 0 0 0 0 0 0 0 0 0 0 0
45882- 0 0 0 0 0 0 0 0 0 0 0 0
45883- 0 0 0 0 0 0 0 0 0 0 0 0
45884- 0 0 0 0 0 0 0 0 0 0 0 0
45885- 0 0 0 0 0 0 0 0 0 0 0 0
45886- 0 0 0 0 0 0 0 0 0 0 0 0
45887- 0 0 0 0 0 0 0 0 0 0 0 0
45888- 0 0 0 0 0 0 0 0 0 0 0 0
45889- 0 0 0 6 6 6 22 22 22 50 50 50
45890- 90 90 90 26 26 26 2 2 6 2 2 6
45891- 14 14 14 195 195 195 250 250 250 253 253 253
45892-253 253 253 253 253 253 253 253 253 253 253 253
45893-253 253 253 253 253 253 253 253 253 253 253 253
45894-253 253 253 253 253 253 253 253 253 253 253 253
45895-253 253 253 253 253 253 253 253 253 253 253 253
45896-250 250 250 242 242 242 54 54 54 2 2 6
45897- 2 2 6 2 2 6 2 2 6 2 2 6
45898- 2 2 6 2 2 6 2 2 6 38 38 38
45899- 86 86 86 50 50 50 22 22 22 6 6 6
45900- 0 0 0 0 0 0 0 0 0 0 0 0
45901- 0 0 0 0 0 0 0 0 0 0 0 0
45902- 0 0 0 0 0 0 0 0 0 0 0 0
45903- 0 0 0 0 0 0 0 0 0 0 0 0
45904- 0 0 0 0 0 0 0 0 0 0 0 0
45905- 0 0 0 0 0 0 0 0 0 0 0 0
45906- 0 0 0 0 0 0 0 0 0 0 0 0
45907- 0 0 0 0 0 0 0 0 0 0 0 0
45908- 0 0 0 0 0 0 0 0 0 0 0 0
45909- 6 6 6 14 14 14 38 38 38 82 82 82
45910- 34 34 34 2 2 6 2 2 6 2 2 6
45911- 42 42 42 195 195 195 246 246 246 253 253 253
45912-253 253 253 253 253 253 253 253 253 250 250 250
45913-242 242 242 242 242 242 250 250 250 253 253 253
45914-253 253 253 253 253 253 253 253 253 253 253 253
45915-253 253 253 250 250 250 246 246 246 238 238 238
45916-226 226 226 231 231 231 101 101 101 6 6 6
45917- 2 2 6 2 2 6 2 2 6 2 2 6
45918- 2 2 6 2 2 6 2 2 6 2 2 6
45919- 38 38 38 82 82 82 42 42 42 14 14 14
45920- 6 6 6 0 0 0 0 0 0 0 0 0
45921- 0 0 0 0 0 0 0 0 0 0 0 0
45922- 0 0 0 0 0 0 0 0 0 0 0 0
45923- 0 0 0 0 0 0 0 0 0 0 0 0
45924- 0 0 0 0 0 0 0 0 0 0 0 0
45925- 0 0 0 0 0 0 0 0 0 0 0 0
45926- 0 0 0 0 0 0 0 0 0 0 0 0
45927- 0 0 0 0 0 0 0 0 0 0 0 0
45928- 0 0 0 0 0 0 0 0 0 0 0 0
45929- 10 10 10 26 26 26 62 62 62 66 66 66
45930- 2 2 6 2 2 6 2 2 6 6 6 6
45931- 70 70 70 170 170 170 206 206 206 234 234 234
45932-246 246 246 250 250 250 250 250 250 238 238 238
45933-226 226 226 231 231 231 238 238 238 250 250 250
45934-250 250 250 250 250 250 246 246 246 231 231 231
45935-214 214 214 206 206 206 202 202 202 202 202 202
45936-198 198 198 202 202 202 182 182 182 18 18 18
45937- 2 2 6 2 2 6 2 2 6 2 2 6
45938- 2 2 6 2 2 6 2 2 6 2 2 6
45939- 2 2 6 62 62 62 66 66 66 30 30 30
45940- 10 10 10 0 0 0 0 0 0 0 0 0
45941- 0 0 0 0 0 0 0 0 0 0 0 0
45942- 0 0 0 0 0 0 0 0 0 0 0 0
45943- 0 0 0 0 0 0 0 0 0 0 0 0
45944- 0 0 0 0 0 0 0 0 0 0 0 0
45945- 0 0 0 0 0 0 0 0 0 0 0 0
45946- 0 0 0 0 0 0 0 0 0 0 0 0
45947- 0 0 0 0 0 0 0 0 0 0 0 0
45948- 0 0 0 0 0 0 0 0 0 0 0 0
45949- 14 14 14 42 42 42 82 82 82 18 18 18
45950- 2 2 6 2 2 6 2 2 6 10 10 10
45951- 94 94 94 182 182 182 218 218 218 242 242 242
45952-250 250 250 253 253 253 253 253 253 250 250 250
45953-234 234 234 253 253 253 253 253 253 253 253 253
45954-253 253 253 253 253 253 253 253 253 246 246 246
45955-238 238 238 226 226 226 210 210 210 202 202 202
45956-195 195 195 195 195 195 210 210 210 158 158 158
45957- 6 6 6 14 14 14 50 50 50 14 14 14
45958- 2 2 6 2 2 6 2 2 6 2 2 6
45959- 2 2 6 6 6 6 86 86 86 46 46 46
45960- 18 18 18 6 6 6 0 0 0 0 0 0
45961- 0 0 0 0 0 0 0 0 0 0 0 0
45962- 0 0 0 0 0 0 0 0 0 0 0 0
45963- 0 0 0 0 0 0 0 0 0 0 0 0
45964- 0 0 0 0 0 0 0 0 0 0 0 0
45965- 0 0 0 0 0 0 0 0 0 0 0 0
45966- 0 0 0 0 0 0 0 0 0 0 0 0
45967- 0 0 0 0 0 0 0 0 0 0 0 0
45968- 0 0 0 0 0 0 0 0 0 6 6 6
45969- 22 22 22 54 54 54 70 70 70 2 2 6
45970- 2 2 6 10 10 10 2 2 6 22 22 22
45971-166 166 166 231 231 231 250 250 250 253 253 253
45972-253 253 253 253 253 253 253 253 253 250 250 250
45973-242 242 242 253 253 253 253 253 253 253 253 253
45974-253 253 253 253 253 253 253 253 253 253 253 253
45975-253 253 253 253 253 253 253 253 253 246 246 246
45976-231 231 231 206 206 206 198 198 198 226 226 226
45977- 94 94 94 2 2 6 6 6 6 38 38 38
45978- 30 30 30 2 2 6 2 2 6 2 2 6
45979- 2 2 6 2 2 6 62 62 62 66 66 66
45980- 26 26 26 10 10 10 0 0 0 0 0 0
45981- 0 0 0 0 0 0 0 0 0 0 0 0
45982- 0 0 0 0 0 0 0 0 0 0 0 0
45983- 0 0 0 0 0 0 0 0 0 0 0 0
45984- 0 0 0 0 0 0 0 0 0 0 0 0
45985- 0 0 0 0 0 0 0 0 0 0 0 0
45986- 0 0 0 0 0 0 0 0 0 0 0 0
45987- 0 0 0 0 0 0 0 0 0 0 0 0
45988- 0 0 0 0 0 0 0 0 0 10 10 10
45989- 30 30 30 74 74 74 50 50 50 2 2 6
45990- 26 26 26 26 26 26 2 2 6 106 106 106
45991-238 238 238 253 253 253 253 253 253 253 253 253
45992-253 253 253 253 253 253 253 253 253 253 253 253
45993-253 253 253 253 253 253 253 253 253 253 253 253
45994-253 253 253 253 253 253 253 253 253 253 253 253
45995-253 253 253 253 253 253 253 253 253 253 253 253
45996-253 253 253 246 246 246 218 218 218 202 202 202
45997-210 210 210 14 14 14 2 2 6 2 2 6
45998- 30 30 30 22 22 22 2 2 6 2 2 6
45999- 2 2 6 2 2 6 18 18 18 86 86 86
46000- 42 42 42 14 14 14 0 0 0 0 0 0
46001- 0 0 0 0 0 0 0 0 0 0 0 0
46002- 0 0 0 0 0 0 0 0 0 0 0 0
46003- 0 0 0 0 0 0 0 0 0 0 0 0
46004- 0 0 0 0 0 0 0 0 0 0 0 0
46005- 0 0 0 0 0 0 0 0 0 0 0 0
46006- 0 0 0 0 0 0 0 0 0 0 0 0
46007- 0 0 0 0 0 0 0 0 0 0 0 0
46008- 0 0 0 0 0 0 0 0 0 14 14 14
46009- 42 42 42 90 90 90 22 22 22 2 2 6
46010- 42 42 42 2 2 6 18 18 18 218 218 218
46011-253 253 253 253 253 253 253 253 253 253 253 253
46012-253 253 253 253 253 253 253 253 253 253 253 253
46013-253 253 253 253 253 253 253 253 253 253 253 253
46014-253 253 253 253 253 253 253 253 253 253 253 253
46015-253 253 253 253 253 253 253 253 253 253 253 253
46016-253 253 253 253 253 253 250 250 250 221 221 221
46017-218 218 218 101 101 101 2 2 6 14 14 14
46018- 18 18 18 38 38 38 10 10 10 2 2 6
46019- 2 2 6 2 2 6 2 2 6 78 78 78
46020- 58 58 58 22 22 22 6 6 6 0 0 0
46021- 0 0 0 0 0 0 0 0 0 0 0 0
46022- 0 0 0 0 0 0 0 0 0 0 0 0
46023- 0 0 0 0 0 0 0 0 0 0 0 0
46024- 0 0 0 0 0 0 0 0 0 0 0 0
46025- 0 0 0 0 0 0 0 0 0 0 0 0
46026- 0 0 0 0 0 0 0 0 0 0 0 0
46027- 0 0 0 0 0 0 0 0 0 0 0 0
46028- 0 0 0 0 0 0 6 6 6 18 18 18
46029- 54 54 54 82 82 82 2 2 6 26 26 26
46030- 22 22 22 2 2 6 123 123 123 253 253 253
46031-253 253 253 253 253 253 253 253 253 253 253 253
46032-253 253 253 253 253 253 253 253 253 253 253 253
46033-253 253 253 253 253 253 253 253 253 253 253 253
46034-253 253 253 253 253 253 253 253 253 253 253 253
46035-253 253 253 253 253 253 253 253 253 253 253 253
46036-253 253 253 253 253 253 253 253 253 250 250 250
46037-238 238 238 198 198 198 6 6 6 38 38 38
46038- 58 58 58 26 26 26 38 38 38 2 2 6
46039- 2 2 6 2 2 6 2 2 6 46 46 46
46040- 78 78 78 30 30 30 10 10 10 0 0 0
46041- 0 0 0 0 0 0 0 0 0 0 0 0
46042- 0 0 0 0 0 0 0 0 0 0 0 0
46043- 0 0 0 0 0 0 0 0 0 0 0 0
46044- 0 0 0 0 0 0 0 0 0 0 0 0
46045- 0 0 0 0 0 0 0 0 0 0 0 0
46046- 0 0 0 0 0 0 0 0 0 0 0 0
46047- 0 0 0 0 0 0 0 0 0 0 0 0
46048- 0 0 0 0 0 0 10 10 10 30 30 30
46049- 74 74 74 58 58 58 2 2 6 42 42 42
46050- 2 2 6 22 22 22 231 231 231 253 253 253
46051-253 253 253 253 253 253 253 253 253 253 253 253
46052-253 253 253 253 253 253 253 253 253 250 250 250
46053-253 253 253 253 253 253 253 253 253 253 253 253
46054-253 253 253 253 253 253 253 253 253 253 253 253
46055-253 253 253 253 253 253 253 253 253 253 253 253
46056-253 253 253 253 253 253 253 253 253 253 253 253
46057-253 253 253 246 246 246 46 46 46 38 38 38
46058- 42 42 42 14 14 14 38 38 38 14 14 14
46059- 2 2 6 2 2 6 2 2 6 6 6 6
46060- 86 86 86 46 46 46 14 14 14 0 0 0
46061- 0 0 0 0 0 0 0 0 0 0 0 0
46062- 0 0 0 0 0 0 0 0 0 0 0 0
46063- 0 0 0 0 0 0 0 0 0 0 0 0
46064- 0 0 0 0 0 0 0 0 0 0 0 0
46065- 0 0 0 0 0 0 0 0 0 0 0 0
46066- 0 0 0 0 0 0 0 0 0 0 0 0
46067- 0 0 0 0 0 0 0 0 0 0 0 0
46068- 0 0 0 6 6 6 14 14 14 42 42 42
46069- 90 90 90 18 18 18 18 18 18 26 26 26
46070- 2 2 6 116 116 116 253 253 253 253 253 253
46071-253 253 253 253 253 253 253 253 253 253 253 253
46072-253 253 253 253 253 253 250 250 250 238 238 238
46073-253 253 253 253 253 253 253 253 253 253 253 253
46074-253 253 253 253 253 253 253 253 253 253 253 253
46075-253 253 253 253 253 253 253 253 253 253 253 253
46076-253 253 253 253 253 253 253 253 253 253 253 253
46077-253 253 253 253 253 253 94 94 94 6 6 6
46078- 2 2 6 2 2 6 10 10 10 34 34 34
46079- 2 2 6 2 2 6 2 2 6 2 2 6
46080- 74 74 74 58 58 58 22 22 22 6 6 6
46081- 0 0 0 0 0 0 0 0 0 0 0 0
46082- 0 0 0 0 0 0 0 0 0 0 0 0
46083- 0 0 0 0 0 0 0 0 0 0 0 0
46084- 0 0 0 0 0 0 0 0 0 0 0 0
46085- 0 0 0 0 0 0 0 0 0 0 0 0
46086- 0 0 0 0 0 0 0 0 0 0 0 0
46087- 0 0 0 0 0 0 0 0 0 0 0 0
46088- 0 0 0 10 10 10 26 26 26 66 66 66
46089- 82 82 82 2 2 6 38 38 38 6 6 6
46090- 14 14 14 210 210 210 253 253 253 253 253 253
46091-253 253 253 253 253 253 253 253 253 253 253 253
46092-253 253 253 253 253 253 246 246 246 242 242 242
46093-253 253 253 253 253 253 253 253 253 253 253 253
46094-253 253 253 253 253 253 253 253 253 253 253 253
46095-253 253 253 253 253 253 253 253 253 253 253 253
46096-253 253 253 253 253 253 253 253 253 253 253 253
46097-253 253 253 253 253 253 144 144 144 2 2 6
46098- 2 2 6 2 2 6 2 2 6 46 46 46
46099- 2 2 6 2 2 6 2 2 6 2 2 6
46100- 42 42 42 74 74 74 30 30 30 10 10 10
46101- 0 0 0 0 0 0 0 0 0 0 0 0
46102- 0 0 0 0 0 0 0 0 0 0 0 0
46103- 0 0 0 0 0 0 0 0 0 0 0 0
46104- 0 0 0 0 0 0 0 0 0 0 0 0
46105- 0 0 0 0 0 0 0 0 0 0 0 0
46106- 0 0 0 0 0 0 0 0 0 0 0 0
46107- 0 0 0 0 0 0 0 0 0 0 0 0
46108- 6 6 6 14 14 14 42 42 42 90 90 90
46109- 26 26 26 6 6 6 42 42 42 2 2 6
46110- 74 74 74 250 250 250 253 253 253 253 253 253
46111-253 253 253 253 253 253 253 253 253 253 253 253
46112-253 253 253 253 253 253 242 242 242 242 242 242
46113-253 253 253 253 253 253 253 253 253 253 253 253
46114-253 253 253 253 253 253 253 253 253 253 253 253
46115-253 253 253 253 253 253 253 253 253 253 253 253
46116-253 253 253 253 253 253 253 253 253 253 253 253
46117-253 253 253 253 253 253 182 182 182 2 2 6
46118- 2 2 6 2 2 6 2 2 6 46 46 46
46119- 2 2 6 2 2 6 2 2 6 2 2 6
46120- 10 10 10 86 86 86 38 38 38 10 10 10
46121- 0 0 0 0 0 0 0 0 0 0 0 0
46122- 0 0 0 0 0 0 0 0 0 0 0 0
46123- 0 0 0 0 0 0 0 0 0 0 0 0
46124- 0 0 0 0 0 0 0 0 0 0 0 0
46125- 0 0 0 0 0 0 0 0 0 0 0 0
46126- 0 0 0 0 0 0 0 0 0 0 0 0
46127- 0 0 0 0 0 0 0 0 0 0 0 0
46128- 10 10 10 26 26 26 66 66 66 82 82 82
46129- 2 2 6 22 22 22 18 18 18 2 2 6
46130-149 149 149 253 253 253 253 253 253 253 253 253
46131-253 253 253 253 253 253 253 253 253 253 253 253
46132-253 253 253 253 253 253 234 234 234 242 242 242
46133-253 253 253 253 253 253 253 253 253 253 253 253
46134-253 253 253 253 253 253 253 253 253 253 253 253
46135-253 253 253 253 253 253 253 253 253 253 253 253
46136-253 253 253 253 253 253 253 253 253 253 253 253
46137-253 253 253 253 253 253 206 206 206 2 2 6
46138- 2 2 6 2 2 6 2 2 6 38 38 38
46139- 2 2 6 2 2 6 2 2 6 2 2 6
46140- 6 6 6 86 86 86 46 46 46 14 14 14
46141- 0 0 0 0 0 0 0 0 0 0 0 0
46142- 0 0 0 0 0 0 0 0 0 0 0 0
46143- 0 0 0 0 0 0 0 0 0 0 0 0
46144- 0 0 0 0 0 0 0 0 0 0 0 0
46145- 0 0 0 0 0 0 0 0 0 0 0 0
46146- 0 0 0 0 0 0 0 0 0 0 0 0
46147- 0 0 0 0 0 0 0 0 0 6 6 6
46148- 18 18 18 46 46 46 86 86 86 18 18 18
46149- 2 2 6 34 34 34 10 10 10 6 6 6
46150-210 210 210 253 253 253 253 253 253 253 253 253
46151-253 253 253 253 253 253 253 253 253 253 253 253
46152-253 253 253 253 253 253 234 234 234 242 242 242
46153-253 253 253 253 253 253 253 253 253 253 253 253
46154-253 253 253 253 253 253 253 253 253 253 253 253
46155-253 253 253 253 253 253 253 253 253 253 253 253
46156-253 253 253 253 253 253 253 253 253 253 253 253
46157-253 253 253 253 253 253 221 221 221 6 6 6
46158- 2 2 6 2 2 6 6 6 6 30 30 30
46159- 2 2 6 2 2 6 2 2 6 2 2 6
46160- 2 2 6 82 82 82 54 54 54 18 18 18
46161- 6 6 6 0 0 0 0 0 0 0 0 0
46162- 0 0 0 0 0 0 0 0 0 0 0 0
46163- 0 0 0 0 0 0 0 0 0 0 0 0
46164- 0 0 0 0 0 0 0 0 0 0 0 0
46165- 0 0 0 0 0 0 0 0 0 0 0 0
46166- 0 0 0 0 0 0 0 0 0 0 0 0
46167- 0 0 0 0 0 0 0 0 0 10 10 10
46168- 26 26 26 66 66 66 62 62 62 2 2 6
46169- 2 2 6 38 38 38 10 10 10 26 26 26
46170-238 238 238 253 253 253 253 253 253 253 253 253
46171-253 253 253 253 253 253 253 253 253 253 253 253
46172-253 253 253 253 253 253 231 231 231 238 238 238
46173-253 253 253 253 253 253 253 253 253 253 253 253
46174-253 253 253 253 253 253 253 253 253 253 253 253
46175-253 253 253 253 253 253 253 253 253 253 253 253
46176-253 253 253 253 253 253 253 253 253 253 253 253
46177-253 253 253 253 253 253 231 231 231 6 6 6
46178- 2 2 6 2 2 6 10 10 10 30 30 30
46179- 2 2 6 2 2 6 2 2 6 2 2 6
46180- 2 2 6 66 66 66 58 58 58 22 22 22
46181- 6 6 6 0 0 0 0 0 0 0 0 0
46182- 0 0 0 0 0 0 0 0 0 0 0 0
46183- 0 0 0 0 0 0 0 0 0 0 0 0
46184- 0 0 0 0 0 0 0 0 0 0 0 0
46185- 0 0 0 0 0 0 0 0 0 0 0 0
46186- 0 0 0 0 0 0 0 0 0 0 0 0
46187- 0 0 0 0 0 0 0 0 0 10 10 10
46188- 38 38 38 78 78 78 6 6 6 2 2 6
46189- 2 2 6 46 46 46 14 14 14 42 42 42
46190-246 246 246 253 253 253 253 253 253 253 253 253
46191-253 253 253 253 253 253 253 253 253 253 253 253
46192-253 253 253 253 253 253 231 231 231 242 242 242
46193-253 253 253 253 253 253 253 253 253 253 253 253
46194-253 253 253 253 253 253 253 253 253 253 253 253
46195-253 253 253 253 253 253 253 253 253 253 253 253
46196-253 253 253 253 253 253 253 253 253 253 253 253
46197-253 253 253 253 253 253 234 234 234 10 10 10
46198- 2 2 6 2 2 6 22 22 22 14 14 14
46199- 2 2 6 2 2 6 2 2 6 2 2 6
46200- 2 2 6 66 66 66 62 62 62 22 22 22
46201- 6 6 6 0 0 0 0 0 0 0 0 0
46202- 0 0 0 0 0 0 0 0 0 0 0 0
46203- 0 0 0 0 0 0 0 0 0 0 0 0
46204- 0 0 0 0 0 0 0 0 0 0 0 0
46205- 0 0 0 0 0 0 0 0 0 0 0 0
46206- 0 0 0 0 0 0 0 0 0 0 0 0
46207- 0 0 0 0 0 0 6 6 6 18 18 18
46208- 50 50 50 74 74 74 2 2 6 2 2 6
46209- 14 14 14 70 70 70 34 34 34 62 62 62
46210-250 250 250 253 253 253 253 253 253 253 253 253
46211-253 253 253 253 253 253 253 253 253 253 253 253
46212-253 253 253 253 253 253 231 231 231 246 246 246
46213-253 253 253 253 253 253 253 253 253 253 253 253
46214-253 253 253 253 253 253 253 253 253 253 253 253
46215-253 253 253 253 253 253 253 253 253 253 253 253
46216-253 253 253 253 253 253 253 253 253 253 253 253
46217-253 253 253 253 253 253 234 234 234 14 14 14
46218- 2 2 6 2 2 6 30 30 30 2 2 6
46219- 2 2 6 2 2 6 2 2 6 2 2 6
46220- 2 2 6 66 66 66 62 62 62 22 22 22
46221- 6 6 6 0 0 0 0 0 0 0 0 0
46222- 0 0 0 0 0 0 0 0 0 0 0 0
46223- 0 0 0 0 0 0 0 0 0 0 0 0
46224- 0 0 0 0 0 0 0 0 0 0 0 0
46225- 0 0 0 0 0 0 0 0 0 0 0 0
46226- 0 0 0 0 0 0 0 0 0 0 0 0
46227- 0 0 0 0 0 0 6 6 6 18 18 18
46228- 54 54 54 62 62 62 2 2 6 2 2 6
46229- 2 2 6 30 30 30 46 46 46 70 70 70
46230-250 250 250 253 253 253 253 253 253 253 253 253
46231-253 253 253 253 253 253 253 253 253 253 253 253
46232-253 253 253 253 253 253 231 231 231 246 246 246
46233-253 253 253 253 253 253 253 253 253 253 253 253
46234-253 253 253 253 253 253 253 253 253 253 253 253
46235-253 253 253 253 253 253 253 253 253 253 253 253
46236-253 253 253 253 253 253 253 253 253 253 253 253
46237-253 253 253 253 253 253 226 226 226 10 10 10
46238- 2 2 6 6 6 6 30 30 30 2 2 6
46239- 2 2 6 2 2 6 2 2 6 2 2 6
46240- 2 2 6 66 66 66 58 58 58 22 22 22
46241- 6 6 6 0 0 0 0 0 0 0 0 0
46242- 0 0 0 0 0 0 0 0 0 0 0 0
46243- 0 0 0 0 0 0 0 0 0 0 0 0
46244- 0 0 0 0 0 0 0 0 0 0 0 0
46245- 0 0 0 0 0 0 0 0 0 0 0 0
46246- 0 0 0 0 0 0 0 0 0 0 0 0
46247- 0 0 0 0 0 0 6 6 6 22 22 22
46248- 58 58 58 62 62 62 2 2 6 2 2 6
46249- 2 2 6 2 2 6 30 30 30 78 78 78
46250-250 250 250 253 253 253 253 253 253 253 253 253
46251-253 253 253 253 253 253 253 253 253 253 253 253
46252-253 253 253 253 253 253 231 231 231 246 246 246
46253-253 253 253 253 253 253 253 253 253 253 253 253
46254-253 253 253 253 253 253 253 253 253 253 253 253
46255-253 253 253 253 253 253 253 253 253 253 253 253
46256-253 253 253 253 253 253 253 253 253 253 253 253
46257-253 253 253 253 253 253 206 206 206 2 2 6
46258- 22 22 22 34 34 34 18 14 6 22 22 22
46259- 26 26 26 18 18 18 6 6 6 2 2 6
46260- 2 2 6 82 82 82 54 54 54 18 18 18
46261- 6 6 6 0 0 0 0 0 0 0 0 0
46262- 0 0 0 0 0 0 0 0 0 0 0 0
46263- 0 0 0 0 0 0 0 0 0 0 0 0
46264- 0 0 0 0 0 0 0 0 0 0 0 0
46265- 0 0 0 0 0 0 0 0 0 0 0 0
46266- 0 0 0 0 0 0 0 0 0 0 0 0
46267- 0 0 0 0 0 0 6 6 6 26 26 26
46268- 62 62 62 106 106 106 74 54 14 185 133 11
46269-210 162 10 121 92 8 6 6 6 62 62 62
46270-238 238 238 253 253 253 253 253 253 253 253 253
46271-253 253 253 253 253 253 253 253 253 253 253 253
46272-253 253 253 253 253 253 231 231 231 246 246 246
46273-253 253 253 253 253 253 253 253 253 253 253 253
46274-253 253 253 253 253 253 253 253 253 253 253 253
46275-253 253 253 253 253 253 253 253 253 253 253 253
46276-253 253 253 253 253 253 253 253 253 253 253 253
46277-253 253 253 253 253 253 158 158 158 18 18 18
46278- 14 14 14 2 2 6 2 2 6 2 2 6
46279- 6 6 6 18 18 18 66 66 66 38 38 38
46280- 6 6 6 94 94 94 50 50 50 18 18 18
46281- 6 6 6 0 0 0 0 0 0 0 0 0
46282- 0 0 0 0 0 0 0 0 0 0 0 0
46283- 0 0 0 0 0 0 0 0 0 0 0 0
46284- 0 0 0 0 0 0 0 0 0 0 0 0
46285- 0 0 0 0 0 0 0 0 0 0 0 0
46286- 0 0 0 0 0 0 0 0 0 6 6 6
46287- 10 10 10 10 10 10 18 18 18 38 38 38
46288- 78 78 78 142 134 106 216 158 10 242 186 14
46289-246 190 14 246 190 14 156 118 10 10 10 10
46290- 90 90 90 238 238 238 253 253 253 253 253 253
46291-253 253 253 253 253 253 253 253 253 253 253 253
46292-253 253 253 253 253 253 231 231 231 250 250 250
46293-253 253 253 253 253 253 253 253 253 253 253 253
46294-253 253 253 253 253 253 253 253 253 253 253 253
46295-253 253 253 253 253 253 253 253 253 253 253 253
46296-253 253 253 253 253 253 253 253 253 246 230 190
46297-238 204 91 238 204 91 181 142 44 37 26 9
46298- 2 2 6 2 2 6 2 2 6 2 2 6
46299- 2 2 6 2 2 6 38 38 38 46 46 46
46300- 26 26 26 106 106 106 54 54 54 18 18 18
46301- 6 6 6 0 0 0 0 0 0 0 0 0
46302- 0 0 0 0 0 0 0 0 0 0 0 0
46303- 0 0 0 0 0 0 0 0 0 0 0 0
46304- 0 0 0 0 0 0 0 0 0 0 0 0
46305- 0 0 0 0 0 0 0 0 0 0 0 0
46306- 0 0 0 6 6 6 14 14 14 22 22 22
46307- 30 30 30 38 38 38 50 50 50 70 70 70
46308-106 106 106 190 142 34 226 170 11 242 186 14
46309-246 190 14 246 190 14 246 190 14 154 114 10
46310- 6 6 6 74 74 74 226 226 226 253 253 253
46311-253 253 253 253 253 253 253 253 253 253 253 253
46312-253 253 253 253 253 253 231 231 231 250 250 250
46313-253 253 253 253 253 253 253 253 253 253 253 253
46314-253 253 253 253 253 253 253 253 253 253 253 253
46315-253 253 253 253 253 253 253 253 253 253 253 253
46316-253 253 253 253 253 253 253 253 253 228 184 62
46317-241 196 14 241 208 19 232 195 16 38 30 10
46318- 2 2 6 2 2 6 2 2 6 2 2 6
46319- 2 2 6 6 6 6 30 30 30 26 26 26
46320-203 166 17 154 142 90 66 66 66 26 26 26
46321- 6 6 6 0 0 0 0 0 0 0 0 0
46322- 0 0 0 0 0 0 0 0 0 0 0 0
46323- 0 0 0 0 0 0 0 0 0 0 0 0
46324- 0 0 0 0 0 0 0 0 0 0 0 0
46325- 0 0 0 0 0 0 0 0 0 0 0 0
46326- 6 6 6 18 18 18 38 38 38 58 58 58
46327- 78 78 78 86 86 86 101 101 101 123 123 123
46328-175 146 61 210 150 10 234 174 13 246 186 14
46329-246 190 14 246 190 14 246 190 14 238 190 10
46330-102 78 10 2 2 6 46 46 46 198 198 198
46331-253 253 253 253 253 253 253 253 253 253 253 253
46332-253 253 253 253 253 253 234 234 234 242 242 242
46333-253 253 253 253 253 253 253 253 253 253 253 253
46334-253 253 253 253 253 253 253 253 253 253 253 253
46335-253 253 253 253 253 253 253 253 253 253 253 253
46336-253 253 253 253 253 253 253 253 253 224 178 62
46337-242 186 14 241 196 14 210 166 10 22 18 6
46338- 2 2 6 2 2 6 2 2 6 2 2 6
46339- 2 2 6 2 2 6 6 6 6 121 92 8
46340-238 202 15 232 195 16 82 82 82 34 34 34
46341- 10 10 10 0 0 0 0 0 0 0 0 0
46342- 0 0 0 0 0 0 0 0 0 0 0 0
46343- 0 0 0 0 0 0 0 0 0 0 0 0
46344- 0 0 0 0 0 0 0 0 0 0 0 0
46345- 0 0 0 0 0 0 0 0 0 0 0 0
46346- 14 14 14 38 38 38 70 70 70 154 122 46
46347-190 142 34 200 144 11 197 138 11 197 138 11
46348-213 154 11 226 170 11 242 186 14 246 190 14
46349-246 190 14 246 190 14 246 190 14 246 190 14
46350-225 175 15 46 32 6 2 2 6 22 22 22
46351-158 158 158 250 250 250 253 253 253 253 253 253
46352-253 253 253 253 253 253 253 253 253 253 253 253
46353-253 253 253 253 253 253 253 253 253 253 253 253
46354-253 253 253 253 253 253 253 253 253 253 253 253
46355-253 253 253 253 253 253 253 253 253 253 253 253
46356-253 253 253 250 250 250 242 242 242 224 178 62
46357-239 182 13 236 186 11 213 154 11 46 32 6
46358- 2 2 6 2 2 6 2 2 6 2 2 6
46359- 2 2 6 2 2 6 61 42 6 225 175 15
46360-238 190 10 236 186 11 112 100 78 42 42 42
46361- 14 14 14 0 0 0 0 0 0 0 0 0
46362- 0 0 0 0 0 0 0 0 0 0 0 0
46363- 0 0 0 0 0 0 0 0 0 0 0 0
46364- 0 0 0 0 0 0 0 0 0 0 0 0
46365- 0 0 0 0 0 0 0 0 0 6 6 6
46366- 22 22 22 54 54 54 154 122 46 213 154 11
46367-226 170 11 230 174 11 226 170 11 226 170 11
46368-236 178 12 242 186 14 246 190 14 246 190 14
46369-246 190 14 246 190 14 246 190 14 246 190 14
46370-241 196 14 184 144 12 10 10 10 2 2 6
46371- 6 6 6 116 116 116 242 242 242 253 253 253
46372-253 253 253 253 253 253 253 253 253 253 253 253
46373-253 253 253 253 253 253 253 253 253 253 253 253
46374-253 253 253 253 253 253 253 253 253 253 253 253
46375-253 253 253 253 253 253 253 253 253 253 253 253
46376-253 253 253 231 231 231 198 198 198 214 170 54
46377-236 178 12 236 178 12 210 150 10 137 92 6
46378- 18 14 6 2 2 6 2 2 6 2 2 6
46379- 6 6 6 70 47 6 200 144 11 236 178 12
46380-239 182 13 239 182 13 124 112 88 58 58 58
46381- 22 22 22 6 6 6 0 0 0 0 0 0
46382- 0 0 0 0 0 0 0 0 0 0 0 0
46383- 0 0 0 0 0 0 0 0 0 0 0 0
46384- 0 0 0 0 0 0 0 0 0 0 0 0
46385- 0 0 0 0 0 0 0 0 0 10 10 10
46386- 30 30 30 70 70 70 180 133 36 226 170 11
46387-239 182 13 242 186 14 242 186 14 246 186 14
46388-246 190 14 246 190 14 246 190 14 246 190 14
46389-246 190 14 246 190 14 246 190 14 246 190 14
46390-246 190 14 232 195 16 98 70 6 2 2 6
46391- 2 2 6 2 2 6 66 66 66 221 221 221
46392-253 253 253 253 253 253 253 253 253 253 253 253
46393-253 253 253 253 253 253 253 253 253 253 253 253
46394-253 253 253 253 253 253 253 253 253 253 253 253
46395-253 253 253 253 253 253 253 253 253 253 253 253
46396-253 253 253 206 206 206 198 198 198 214 166 58
46397-230 174 11 230 174 11 216 158 10 192 133 9
46398-163 110 8 116 81 8 102 78 10 116 81 8
46399-167 114 7 197 138 11 226 170 11 239 182 13
46400-242 186 14 242 186 14 162 146 94 78 78 78
46401- 34 34 34 14 14 14 6 6 6 0 0 0
46402- 0 0 0 0 0 0 0 0 0 0 0 0
46403- 0 0 0 0 0 0 0 0 0 0 0 0
46404- 0 0 0 0 0 0 0 0 0 0 0 0
46405- 0 0 0 0 0 0 0 0 0 6 6 6
46406- 30 30 30 78 78 78 190 142 34 226 170 11
46407-239 182 13 246 190 14 246 190 14 246 190 14
46408-246 190 14 246 190 14 246 190 14 246 190 14
46409-246 190 14 246 190 14 246 190 14 246 190 14
46410-246 190 14 241 196 14 203 166 17 22 18 6
46411- 2 2 6 2 2 6 2 2 6 38 38 38
46412-218 218 218 253 253 253 253 253 253 253 253 253
46413-253 253 253 253 253 253 253 253 253 253 253 253
46414-253 253 253 253 253 253 253 253 253 253 253 253
46415-253 253 253 253 253 253 253 253 253 253 253 253
46416-250 250 250 206 206 206 198 198 198 202 162 69
46417-226 170 11 236 178 12 224 166 10 210 150 10
46418-200 144 11 197 138 11 192 133 9 197 138 11
46419-210 150 10 226 170 11 242 186 14 246 190 14
46420-246 190 14 246 186 14 225 175 15 124 112 88
46421- 62 62 62 30 30 30 14 14 14 6 6 6
46422- 0 0 0 0 0 0 0 0 0 0 0 0
46423- 0 0 0 0 0 0 0 0 0 0 0 0
46424- 0 0 0 0 0 0 0 0 0 0 0 0
46425- 0 0 0 0 0 0 0 0 0 10 10 10
46426- 30 30 30 78 78 78 174 135 50 224 166 10
46427-239 182 13 246 190 14 246 190 14 246 190 14
46428-246 190 14 246 190 14 246 190 14 246 190 14
46429-246 190 14 246 190 14 246 190 14 246 190 14
46430-246 190 14 246 190 14 241 196 14 139 102 15
46431- 2 2 6 2 2 6 2 2 6 2 2 6
46432- 78 78 78 250 250 250 253 253 253 253 253 253
46433-253 253 253 253 253 253 253 253 253 253 253 253
46434-253 253 253 253 253 253 253 253 253 253 253 253
46435-253 253 253 253 253 253 253 253 253 253 253 253
46436-250 250 250 214 214 214 198 198 198 190 150 46
46437-219 162 10 236 178 12 234 174 13 224 166 10
46438-216 158 10 213 154 11 213 154 11 216 158 10
46439-226 170 11 239 182 13 246 190 14 246 190 14
46440-246 190 14 246 190 14 242 186 14 206 162 42
46441-101 101 101 58 58 58 30 30 30 14 14 14
46442- 6 6 6 0 0 0 0 0 0 0 0 0
46443- 0 0 0 0 0 0 0 0 0 0 0 0
46444- 0 0 0 0 0 0 0 0 0 0 0 0
46445- 0 0 0 0 0 0 0 0 0 10 10 10
46446- 30 30 30 74 74 74 174 135 50 216 158 10
46447-236 178 12 246 190 14 246 190 14 246 190 14
46448-246 190 14 246 190 14 246 190 14 246 190 14
46449-246 190 14 246 190 14 246 190 14 246 190 14
46450-246 190 14 246 190 14 241 196 14 226 184 13
46451- 61 42 6 2 2 6 2 2 6 2 2 6
46452- 22 22 22 238 238 238 253 253 253 253 253 253
46453-253 253 253 253 253 253 253 253 253 253 253 253
46454-253 253 253 253 253 253 253 253 253 253 253 253
46455-253 253 253 253 253 253 253 253 253 253 253 253
46456-253 253 253 226 226 226 187 187 187 180 133 36
46457-216 158 10 236 178 12 239 182 13 236 178 12
46458-230 174 11 226 170 11 226 170 11 230 174 11
46459-236 178 12 242 186 14 246 190 14 246 190 14
46460-246 190 14 246 190 14 246 186 14 239 182 13
46461-206 162 42 106 106 106 66 66 66 34 34 34
46462- 14 14 14 6 6 6 0 0 0 0 0 0
46463- 0 0 0 0 0 0 0 0 0 0 0 0
46464- 0 0 0 0 0 0 0 0 0 0 0 0
46465- 0 0 0 0 0 0 0 0 0 6 6 6
46466- 26 26 26 70 70 70 163 133 67 213 154 11
46467-236 178 12 246 190 14 246 190 14 246 190 14
46468-246 190 14 246 190 14 246 190 14 246 190 14
46469-246 190 14 246 190 14 246 190 14 246 190 14
46470-246 190 14 246 190 14 246 190 14 241 196 14
46471-190 146 13 18 14 6 2 2 6 2 2 6
46472- 46 46 46 246 246 246 253 253 253 253 253 253
46473-253 253 253 253 253 253 253 253 253 253 253 253
46474-253 253 253 253 253 253 253 253 253 253 253 253
46475-253 253 253 253 253 253 253 253 253 253 253 253
46476-253 253 253 221 221 221 86 86 86 156 107 11
46477-216 158 10 236 178 12 242 186 14 246 186 14
46478-242 186 14 239 182 13 239 182 13 242 186 14
46479-242 186 14 246 186 14 246 190 14 246 190 14
46480-246 190 14 246 190 14 246 190 14 246 190 14
46481-242 186 14 225 175 15 142 122 72 66 66 66
46482- 30 30 30 10 10 10 0 0 0 0 0 0
46483- 0 0 0 0 0 0 0 0 0 0 0 0
46484- 0 0 0 0 0 0 0 0 0 0 0 0
46485- 0 0 0 0 0 0 0 0 0 6 6 6
46486- 26 26 26 70 70 70 163 133 67 210 150 10
46487-236 178 12 246 190 14 246 190 14 246 190 14
46488-246 190 14 246 190 14 246 190 14 246 190 14
46489-246 190 14 246 190 14 246 190 14 246 190 14
46490-246 190 14 246 190 14 246 190 14 246 190 14
46491-232 195 16 121 92 8 34 34 34 106 106 106
46492-221 221 221 253 253 253 253 253 253 253 253 253
46493-253 253 253 253 253 253 253 253 253 253 253 253
46494-253 253 253 253 253 253 253 253 253 253 253 253
46495-253 253 253 253 253 253 253 253 253 253 253 253
46496-242 242 242 82 82 82 18 14 6 163 110 8
46497-216 158 10 236 178 12 242 186 14 246 190 14
46498-246 190 14 246 190 14 246 190 14 246 190 14
46499-246 190 14 246 190 14 246 190 14 246 190 14
46500-246 190 14 246 190 14 246 190 14 246 190 14
46501-246 190 14 246 190 14 242 186 14 163 133 67
46502- 46 46 46 18 18 18 6 6 6 0 0 0
46503- 0 0 0 0 0 0 0 0 0 0 0 0
46504- 0 0 0 0 0 0 0 0 0 0 0 0
46505- 0 0 0 0 0 0 0 0 0 10 10 10
46506- 30 30 30 78 78 78 163 133 67 210 150 10
46507-236 178 12 246 186 14 246 190 14 246 190 14
46508-246 190 14 246 190 14 246 190 14 246 190 14
46509-246 190 14 246 190 14 246 190 14 246 190 14
46510-246 190 14 246 190 14 246 190 14 246 190 14
46511-241 196 14 215 174 15 190 178 144 253 253 253
46512-253 253 253 253 253 253 253 253 253 253 253 253
46513-253 253 253 253 253 253 253 253 253 253 253 253
46514-253 253 253 253 253 253 253 253 253 253 253 253
46515-253 253 253 253 253 253 253 253 253 218 218 218
46516- 58 58 58 2 2 6 22 18 6 167 114 7
46517-216 158 10 236 178 12 246 186 14 246 190 14
46518-246 190 14 246 190 14 246 190 14 246 190 14
46519-246 190 14 246 190 14 246 190 14 246 190 14
46520-246 190 14 246 190 14 246 190 14 246 190 14
46521-246 190 14 246 186 14 242 186 14 190 150 46
46522- 54 54 54 22 22 22 6 6 6 0 0 0
46523- 0 0 0 0 0 0 0 0 0 0 0 0
46524- 0 0 0 0 0 0 0 0 0 0 0 0
46525- 0 0 0 0 0 0 0 0 0 14 14 14
46526- 38 38 38 86 86 86 180 133 36 213 154 11
46527-236 178 12 246 186 14 246 190 14 246 190 14
46528-246 190 14 246 190 14 246 190 14 246 190 14
46529-246 190 14 246 190 14 246 190 14 246 190 14
46530-246 190 14 246 190 14 246 190 14 246 190 14
46531-246 190 14 232 195 16 190 146 13 214 214 214
46532-253 253 253 253 253 253 253 253 253 253 253 253
46533-253 253 253 253 253 253 253 253 253 253 253 253
46534-253 253 253 253 253 253 253 253 253 253 253 253
46535-253 253 253 250 250 250 170 170 170 26 26 26
46536- 2 2 6 2 2 6 37 26 9 163 110 8
46537-219 162 10 239 182 13 246 186 14 246 190 14
46538-246 190 14 246 190 14 246 190 14 246 190 14
46539-246 190 14 246 190 14 246 190 14 246 190 14
46540-246 190 14 246 190 14 246 190 14 246 190 14
46541-246 186 14 236 178 12 224 166 10 142 122 72
46542- 46 46 46 18 18 18 6 6 6 0 0 0
46543- 0 0 0 0 0 0 0 0 0 0 0 0
46544- 0 0 0 0 0 0 0 0 0 0 0 0
46545- 0 0 0 0 0 0 6 6 6 18 18 18
46546- 50 50 50 109 106 95 192 133 9 224 166 10
46547-242 186 14 246 190 14 246 190 14 246 190 14
46548-246 190 14 246 190 14 246 190 14 246 190 14
46549-246 190 14 246 190 14 246 190 14 246 190 14
46550-246 190 14 246 190 14 246 190 14 246 190 14
46551-242 186 14 226 184 13 210 162 10 142 110 46
46552-226 226 226 253 253 253 253 253 253 253 253 253
46553-253 253 253 253 253 253 253 253 253 253 253 253
46554-253 253 253 253 253 253 253 253 253 253 253 253
46555-198 198 198 66 66 66 2 2 6 2 2 6
46556- 2 2 6 2 2 6 50 34 6 156 107 11
46557-219 162 10 239 182 13 246 186 14 246 190 14
46558-246 190 14 246 190 14 246 190 14 246 190 14
46559-246 190 14 246 190 14 246 190 14 246 190 14
46560-246 190 14 246 190 14 246 190 14 242 186 14
46561-234 174 13 213 154 11 154 122 46 66 66 66
46562- 30 30 30 10 10 10 0 0 0 0 0 0
46563- 0 0 0 0 0 0 0 0 0 0 0 0
46564- 0 0 0 0 0 0 0 0 0 0 0 0
46565- 0 0 0 0 0 0 6 6 6 22 22 22
46566- 58 58 58 154 121 60 206 145 10 234 174 13
46567-242 186 14 246 186 14 246 190 14 246 190 14
46568-246 190 14 246 190 14 246 190 14 246 190 14
46569-246 190 14 246 190 14 246 190 14 246 190 14
46570-246 190 14 246 190 14 246 190 14 246 190 14
46571-246 186 14 236 178 12 210 162 10 163 110 8
46572- 61 42 6 138 138 138 218 218 218 250 250 250
46573-253 253 253 253 253 253 253 253 253 250 250 250
46574-242 242 242 210 210 210 144 144 144 66 66 66
46575- 6 6 6 2 2 6 2 2 6 2 2 6
46576- 2 2 6 2 2 6 61 42 6 163 110 8
46577-216 158 10 236 178 12 246 190 14 246 190 14
46578-246 190 14 246 190 14 246 190 14 246 190 14
46579-246 190 14 246 190 14 246 190 14 246 190 14
46580-246 190 14 239 182 13 230 174 11 216 158 10
46581-190 142 34 124 112 88 70 70 70 38 38 38
46582- 18 18 18 6 6 6 0 0 0 0 0 0
46583- 0 0 0 0 0 0 0 0 0 0 0 0
46584- 0 0 0 0 0 0 0 0 0 0 0 0
46585- 0 0 0 0 0 0 6 6 6 22 22 22
46586- 62 62 62 168 124 44 206 145 10 224 166 10
46587-236 178 12 239 182 13 242 186 14 242 186 14
46588-246 186 14 246 190 14 246 190 14 246 190 14
46589-246 190 14 246 190 14 246 190 14 246 190 14
46590-246 190 14 246 190 14 246 190 14 246 190 14
46591-246 190 14 236 178 12 216 158 10 175 118 6
46592- 80 54 7 2 2 6 6 6 6 30 30 30
46593- 54 54 54 62 62 62 50 50 50 38 38 38
46594- 14 14 14 2 2 6 2 2 6 2 2 6
46595- 2 2 6 2 2 6 2 2 6 2 2 6
46596- 2 2 6 6 6 6 80 54 7 167 114 7
46597-213 154 11 236 178 12 246 190 14 246 190 14
46598-246 190 14 246 190 14 246 190 14 246 190 14
46599-246 190 14 242 186 14 239 182 13 239 182 13
46600-230 174 11 210 150 10 174 135 50 124 112 88
46601- 82 82 82 54 54 54 34 34 34 18 18 18
46602- 6 6 6 0 0 0 0 0 0 0 0 0
46603- 0 0 0 0 0 0 0 0 0 0 0 0
46604- 0 0 0 0 0 0 0 0 0 0 0 0
46605- 0 0 0 0 0 0 6 6 6 18 18 18
46606- 50 50 50 158 118 36 192 133 9 200 144 11
46607-216 158 10 219 162 10 224 166 10 226 170 11
46608-230 174 11 236 178 12 239 182 13 239 182 13
46609-242 186 14 246 186 14 246 190 14 246 190 14
46610-246 190 14 246 190 14 246 190 14 246 190 14
46611-246 186 14 230 174 11 210 150 10 163 110 8
46612-104 69 6 10 10 10 2 2 6 2 2 6
46613- 2 2 6 2 2 6 2 2 6 2 2 6
46614- 2 2 6 2 2 6 2 2 6 2 2 6
46615- 2 2 6 2 2 6 2 2 6 2 2 6
46616- 2 2 6 6 6 6 91 60 6 167 114 7
46617-206 145 10 230 174 11 242 186 14 246 190 14
46618-246 190 14 246 190 14 246 186 14 242 186 14
46619-239 182 13 230 174 11 224 166 10 213 154 11
46620-180 133 36 124 112 88 86 86 86 58 58 58
46621- 38 38 38 22 22 22 10 10 10 6 6 6
46622- 0 0 0 0 0 0 0 0 0 0 0 0
46623- 0 0 0 0 0 0 0 0 0 0 0 0
46624- 0 0 0 0 0 0 0 0 0 0 0 0
46625- 0 0 0 0 0 0 0 0 0 14 14 14
46626- 34 34 34 70 70 70 138 110 50 158 118 36
46627-167 114 7 180 123 7 192 133 9 197 138 11
46628-200 144 11 206 145 10 213 154 11 219 162 10
46629-224 166 10 230 174 11 239 182 13 242 186 14
46630-246 186 14 246 186 14 246 186 14 246 186 14
46631-239 182 13 216 158 10 185 133 11 152 99 6
46632-104 69 6 18 14 6 2 2 6 2 2 6
46633- 2 2 6 2 2 6 2 2 6 2 2 6
46634- 2 2 6 2 2 6 2 2 6 2 2 6
46635- 2 2 6 2 2 6 2 2 6 2 2 6
46636- 2 2 6 6 6 6 80 54 7 152 99 6
46637-192 133 9 219 162 10 236 178 12 239 182 13
46638-246 186 14 242 186 14 239 182 13 236 178 12
46639-224 166 10 206 145 10 192 133 9 154 121 60
46640- 94 94 94 62 62 62 42 42 42 22 22 22
46641- 14 14 14 6 6 6 0 0 0 0 0 0
46642- 0 0 0 0 0 0 0 0 0 0 0 0
46643- 0 0 0 0 0 0 0 0 0 0 0 0
46644- 0 0 0 0 0 0 0 0 0 0 0 0
46645- 0 0 0 0 0 0 0 0 0 6 6 6
46646- 18 18 18 34 34 34 58 58 58 78 78 78
46647-101 98 89 124 112 88 142 110 46 156 107 11
46648-163 110 8 167 114 7 175 118 6 180 123 7
46649-185 133 11 197 138 11 210 150 10 219 162 10
46650-226 170 11 236 178 12 236 178 12 234 174 13
46651-219 162 10 197 138 11 163 110 8 130 83 6
46652- 91 60 6 10 10 10 2 2 6 2 2 6
46653- 18 18 18 38 38 38 38 38 38 38 38 38
46654- 38 38 38 38 38 38 38 38 38 38 38 38
46655- 38 38 38 38 38 38 26 26 26 2 2 6
46656- 2 2 6 6 6 6 70 47 6 137 92 6
46657-175 118 6 200 144 11 219 162 10 230 174 11
46658-234 174 13 230 174 11 219 162 10 210 150 10
46659-192 133 9 163 110 8 124 112 88 82 82 82
46660- 50 50 50 30 30 30 14 14 14 6 6 6
46661- 0 0 0 0 0 0 0 0 0 0 0 0
46662- 0 0 0 0 0 0 0 0 0 0 0 0
46663- 0 0 0 0 0 0 0 0 0 0 0 0
46664- 0 0 0 0 0 0 0 0 0 0 0 0
46665- 0 0 0 0 0 0 0 0 0 0 0 0
46666- 6 6 6 14 14 14 22 22 22 34 34 34
46667- 42 42 42 58 58 58 74 74 74 86 86 86
46668-101 98 89 122 102 70 130 98 46 121 87 25
46669-137 92 6 152 99 6 163 110 8 180 123 7
46670-185 133 11 197 138 11 206 145 10 200 144 11
46671-180 123 7 156 107 11 130 83 6 104 69 6
46672- 50 34 6 54 54 54 110 110 110 101 98 89
46673- 86 86 86 82 82 82 78 78 78 78 78 78
46674- 78 78 78 78 78 78 78 78 78 78 78 78
46675- 78 78 78 82 82 82 86 86 86 94 94 94
46676-106 106 106 101 101 101 86 66 34 124 80 6
46677-156 107 11 180 123 7 192 133 9 200 144 11
46678-206 145 10 200 144 11 192 133 9 175 118 6
46679-139 102 15 109 106 95 70 70 70 42 42 42
46680- 22 22 22 10 10 10 0 0 0 0 0 0
46681- 0 0 0 0 0 0 0 0 0 0 0 0
46682- 0 0 0 0 0 0 0 0 0 0 0 0
46683- 0 0 0 0 0 0 0 0 0 0 0 0
46684- 0 0 0 0 0 0 0 0 0 0 0 0
46685- 0 0 0 0 0 0 0 0 0 0 0 0
46686- 0 0 0 0 0 0 6 6 6 10 10 10
46687- 14 14 14 22 22 22 30 30 30 38 38 38
46688- 50 50 50 62 62 62 74 74 74 90 90 90
46689-101 98 89 112 100 78 121 87 25 124 80 6
46690-137 92 6 152 99 6 152 99 6 152 99 6
46691-138 86 6 124 80 6 98 70 6 86 66 30
46692-101 98 89 82 82 82 58 58 58 46 46 46
46693- 38 38 38 34 34 34 34 34 34 34 34 34
46694- 34 34 34 34 34 34 34 34 34 34 34 34
46695- 34 34 34 34 34 34 38 38 38 42 42 42
46696- 54 54 54 82 82 82 94 86 76 91 60 6
46697-134 86 6 156 107 11 167 114 7 175 118 6
46698-175 118 6 167 114 7 152 99 6 121 87 25
46699-101 98 89 62 62 62 34 34 34 18 18 18
46700- 6 6 6 0 0 0 0 0 0 0 0 0
46701- 0 0 0 0 0 0 0 0 0 0 0 0
46702- 0 0 0 0 0 0 0 0 0 0 0 0
46703- 0 0 0 0 0 0 0 0 0 0 0 0
46704- 0 0 0 0 0 0 0 0 0 0 0 0
46705- 0 0 0 0 0 0 0 0 0 0 0 0
46706- 0 0 0 0 0 0 0 0 0 0 0 0
46707- 0 0 0 6 6 6 6 6 6 10 10 10
46708- 18 18 18 22 22 22 30 30 30 42 42 42
46709- 50 50 50 66 66 66 86 86 86 101 98 89
46710-106 86 58 98 70 6 104 69 6 104 69 6
46711-104 69 6 91 60 6 82 62 34 90 90 90
46712- 62 62 62 38 38 38 22 22 22 14 14 14
46713- 10 10 10 10 10 10 10 10 10 10 10 10
46714- 10 10 10 10 10 10 6 6 6 10 10 10
46715- 10 10 10 10 10 10 10 10 10 14 14 14
46716- 22 22 22 42 42 42 70 70 70 89 81 66
46717- 80 54 7 104 69 6 124 80 6 137 92 6
46718-134 86 6 116 81 8 100 82 52 86 86 86
46719- 58 58 58 30 30 30 14 14 14 6 6 6
46720- 0 0 0 0 0 0 0 0 0 0 0 0
46721- 0 0 0 0 0 0 0 0 0 0 0 0
46722- 0 0 0 0 0 0 0 0 0 0 0 0
46723- 0 0 0 0 0 0 0 0 0 0 0 0
46724- 0 0 0 0 0 0 0 0 0 0 0 0
46725- 0 0 0 0 0 0 0 0 0 0 0 0
46726- 0 0 0 0 0 0 0 0 0 0 0 0
46727- 0 0 0 0 0 0 0 0 0 0 0 0
46728- 0 0 0 6 6 6 10 10 10 14 14 14
46729- 18 18 18 26 26 26 38 38 38 54 54 54
46730- 70 70 70 86 86 86 94 86 76 89 81 66
46731- 89 81 66 86 86 86 74 74 74 50 50 50
46732- 30 30 30 14 14 14 6 6 6 0 0 0
46733- 0 0 0 0 0 0 0 0 0 0 0 0
46734- 0 0 0 0 0 0 0 0 0 0 0 0
46735- 0 0 0 0 0 0 0 0 0 0 0 0
46736- 6 6 6 18 18 18 34 34 34 58 58 58
46737- 82 82 82 89 81 66 89 81 66 89 81 66
46738- 94 86 66 94 86 76 74 74 74 50 50 50
46739- 26 26 26 14 14 14 6 6 6 0 0 0
46740- 0 0 0 0 0 0 0 0 0 0 0 0
46741- 0 0 0 0 0 0 0 0 0 0 0 0
46742- 0 0 0 0 0 0 0 0 0 0 0 0
46743- 0 0 0 0 0 0 0 0 0 0 0 0
46744- 0 0 0 0 0 0 0 0 0 0 0 0
46745- 0 0 0 0 0 0 0 0 0 0 0 0
46746- 0 0 0 0 0 0 0 0 0 0 0 0
46747- 0 0 0 0 0 0 0 0 0 0 0 0
46748- 0 0 0 0 0 0 0 0 0 0 0 0
46749- 6 6 6 6 6 6 14 14 14 18 18 18
46750- 30 30 30 38 38 38 46 46 46 54 54 54
46751- 50 50 50 42 42 42 30 30 30 18 18 18
46752- 10 10 10 0 0 0 0 0 0 0 0 0
46753- 0 0 0 0 0 0 0 0 0 0 0 0
46754- 0 0 0 0 0 0 0 0 0 0 0 0
46755- 0 0 0 0 0 0 0 0 0 0 0 0
46756- 0 0 0 6 6 6 14 14 14 26 26 26
46757- 38 38 38 50 50 50 58 58 58 58 58 58
46758- 54 54 54 42 42 42 30 30 30 18 18 18
46759- 10 10 10 0 0 0 0 0 0 0 0 0
46760- 0 0 0 0 0 0 0 0 0 0 0 0
46761- 0 0 0 0 0 0 0 0 0 0 0 0
46762- 0 0 0 0 0 0 0 0 0 0 0 0
46763- 0 0 0 0 0 0 0 0 0 0 0 0
46764- 0 0 0 0 0 0 0 0 0 0 0 0
46765- 0 0 0 0 0 0 0 0 0 0 0 0
46766- 0 0 0 0 0 0 0 0 0 0 0 0
46767- 0 0 0 0 0 0 0 0 0 0 0 0
46768- 0 0 0 0 0 0 0 0 0 0 0 0
46769- 0 0 0 0 0 0 0 0 0 6 6 6
46770- 6 6 6 10 10 10 14 14 14 18 18 18
46771- 18 18 18 14 14 14 10 10 10 6 6 6
46772- 0 0 0 0 0 0 0 0 0 0 0 0
46773- 0 0 0 0 0 0 0 0 0 0 0 0
46774- 0 0 0 0 0 0 0 0 0 0 0 0
46775- 0 0 0 0 0 0 0 0 0 0 0 0
46776- 0 0 0 0 0 0 0 0 0 6 6 6
46777- 14 14 14 18 18 18 22 22 22 22 22 22
46778- 18 18 18 14 14 14 10 10 10 6 6 6
46779- 0 0 0 0 0 0 0 0 0 0 0 0
46780- 0 0 0 0 0 0 0 0 0 0 0 0
46781- 0 0 0 0 0 0 0 0 0 0 0 0
46782- 0 0 0 0 0 0 0 0 0 0 0 0
46783- 0 0 0 0 0 0 0 0 0 0 0 0
46784+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46791+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46795+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46796+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46797+4 4 4 4 4 4
46798+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46805+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46809+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46810+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46811+4 4 4 4 4 4
46812+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46819+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46823+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46824+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46825+4 4 4 4 4 4
46826+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46833+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46838+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46839+4 4 4 4 4 4
46840+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46847+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46852+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46853+4 4 4 4 4 4
46854+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46861+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46866+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46867+4 4 4 4 4 4
46868+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46872+4 4 4 4 4 4 4 4 4 3 3 3 0 0 0 0 0 0
46873+0 0 0 0 0 0 0 0 0 0 0 0 3 3 3 4 4 4
46874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46875+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46877+4 4 4 4 4 4 4 4 4 4 4 4 1 1 1 0 0 0
46878+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46879+4 4 4 4 4 4 4 4 4 2 1 0 2 1 0 3 2 2
46880+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46881+4 4 4 4 4 4
46882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46886+4 4 4 4 4 4 2 2 2 0 0 0 3 4 3 26 28 28
46887+37 38 37 37 38 37 14 17 19 2 2 2 0 0 0 2 2 2
46888+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46889+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46891+4 4 4 4 4 4 3 3 3 0 0 0 1 1 1 6 6 6
46892+2 2 2 0 0 0 3 3 3 4 4 4 4 4 4 4 4 4
46893+4 4 5 3 3 3 1 0 0 0 0 0 1 0 0 0 0 0
46894+1 1 1 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46895+4 4 4 4 4 4
46896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46900+2 2 2 0 0 0 0 0 0 14 17 19 60 74 84 137 136 137
46901+153 152 153 137 136 137 125 124 125 60 73 81 6 6 6 3 1 0
46902+0 0 0 3 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46903+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46904+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46905+4 4 4 4 4 4 0 0 0 4 4 4 41 54 63 125 124 125
46906+60 73 81 6 6 6 4 0 0 3 3 3 4 4 4 4 4 4
46907+4 4 4 0 0 0 6 9 11 41 54 63 41 65 82 22 30 35
46908+2 2 2 2 1 0 4 4 4 4 4 4 4 4 4 4 4 4
46909+4 4 4 4 4 4
46910+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46911+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46912+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46913+4 4 4 4 4 4 5 5 5 5 5 5 2 2 2 0 0 0
46914+4 0 0 6 6 6 41 54 63 137 136 137 174 174 174 167 166 167
46915+165 164 165 165 164 165 163 162 163 163 162 163 125 124 125 41 54 63
46916+1 1 1 0 0 0 0 0 0 3 3 3 5 5 5 4 4 4
46917+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46918+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
46919+3 3 3 2 0 0 4 0 0 60 73 81 156 155 156 167 166 167
46920+163 162 163 85 115 134 5 7 8 0 0 0 4 4 4 5 5 5
46921+0 0 0 2 5 5 55 98 126 90 154 193 90 154 193 72 125 159
46922+37 51 59 2 0 0 1 1 1 4 5 5 4 4 4 4 4 4
46923+4 4 4 4 4 4
46924+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46925+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46926+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46927+4 4 4 5 5 5 4 4 4 1 1 1 0 0 0 3 3 3
46928+37 38 37 125 124 125 163 162 163 174 174 174 158 157 158 158 157 158
46929+156 155 156 156 155 156 158 157 158 165 164 165 174 174 174 166 165 166
46930+125 124 125 16 19 21 1 0 0 0 0 0 0 0 0 4 4 4
46931+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
46932+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 1 1 1
46933+0 0 0 0 0 0 37 38 37 153 152 153 174 174 174 158 157 158
46934+174 174 174 163 162 163 37 38 37 4 3 3 4 0 0 1 1 1
46935+0 0 0 22 40 52 101 161 196 101 161 196 90 154 193 101 161 196
46936+64 123 161 14 17 19 0 0 0 4 4 4 4 4 4 4 4 4
46937+4 4 4 4 4 4
46938+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46939+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46940+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
46941+5 5 5 2 2 2 0 0 0 4 0 0 24 26 27 85 115 134
46942+156 155 156 174 174 174 167 166 167 156 155 156 154 153 154 157 156 157
46943+156 155 156 156 155 156 155 154 155 153 152 153 158 157 158 167 166 167
46944+174 174 174 156 155 156 60 74 84 16 19 21 0 0 0 0 0 0
46945+1 1 1 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
46946+4 4 4 5 5 5 6 6 6 3 3 3 0 0 0 4 0 0
46947+13 16 17 60 73 81 137 136 137 165 164 165 156 155 156 153 152 153
46948+174 174 174 177 184 187 60 73 81 3 1 0 0 0 0 1 1 2
46949+22 30 35 64 123 161 136 185 209 90 154 193 90 154 193 90 154 193
46950+90 154 193 21 29 34 0 0 0 3 2 2 4 4 5 4 4 4
46951+4 4 4 4 4 4
46952+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46953+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46954+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 3 3 3
46955+0 0 0 0 0 0 10 13 16 60 74 84 157 156 157 174 174 174
46956+174 174 174 158 157 158 153 152 153 154 153 154 156 155 156 155 154 155
46957+156 155 156 155 154 155 154 153 154 157 156 157 154 153 154 153 152 153
46958+163 162 163 174 174 174 177 184 187 137 136 137 60 73 81 13 16 17
46959+4 0 0 0 0 0 3 3 3 5 5 5 4 4 4 4 4 4
46960+5 5 5 4 4 4 1 1 1 0 0 0 3 3 3 41 54 63
46961+131 129 131 174 174 174 174 174 174 174 174 174 167 166 167 174 174 174
46962+190 197 201 137 136 137 24 26 27 4 0 0 16 21 25 50 82 103
46963+90 154 193 136 185 209 90 154 193 101 161 196 101 161 196 101 161 196
46964+31 91 132 3 6 7 0 0 0 4 4 4 4 4 4 4 4 4
46965+4 4 4 4 4 4
46966+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46967+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46968+4 4 4 4 4 4 4 4 4 2 2 2 0 0 0 4 0 0
46969+4 0 0 43 57 68 137 136 137 177 184 187 174 174 174 163 162 163
46970+155 154 155 155 154 155 156 155 156 155 154 155 158 157 158 165 164 165
46971+167 166 167 166 165 166 163 162 163 157 156 157 155 154 155 155 154 155
46972+153 152 153 156 155 156 167 166 167 174 174 174 174 174 174 131 129 131
46973+41 54 63 5 5 5 0 0 0 0 0 0 3 3 3 4 4 4
46974+1 1 1 0 0 0 1 0 0 26 28 28 125 124 125 174 174 174
46975+177 184 187 174 174 174 174 174 174 156 155 156 131 129 131 137 136 137
46976+125 124 125 24 26 27 4 0 0 41 65 82 90 154 193 136 185 209
46977+136 185 209 101 161 196 53 118 160 37 112 160 90 154 193 34 86 122
46978+7 12 15 0 0 0 4 4 4 4 4 4 4 4 4 4 4 4
46979+4 4 4 4 4 4
46980+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46981+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46982+4 4 4 3 3 3 0 0 0 0 0 0 5 5 5 37 38 37
46983+125 124 125 167 166 167 174 174 174 167 166 167 158 157 158 155 154 155
46984+156 155 156 156 155 156 156 155 156 163 162 163 167 166 167 155 154 155
46985+137 136 137 153 152 153 156 155 156 165 164 165 163 162 163 156 155 156
46986+156 155 156 156 155 156 155 154 155 158 157 158 166 165 166 174 174 174
46987+167 166 167 125 124 125 37 38 37 1 0 0 0 0 0 0 0 0
46988+0 0 0 24 26 27 60 74 84 158 157 158 174 174 174 174 174 174
46989+166 165 166 158 157 158 125 124 125 41 54 63 13 16 17 6 6 6
46990+6 6 6 37 38 37 80 127 157 136 185 209 101 161 196 101 161 196
46991+90 154 193 28 67 93 6 10 14 13 20 25 13 20 25 6 10 14
46992+1 1 2 4 3 3 4 4 4 4 4 4 4 4 4 4 4 4
46993+4 4 4 4 4 4
46994+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46995+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
46996+1 1 1 1 0 0 4 3 3 37 38 37 60 74 84 153 152 153
46997+167 166 167 167 166 167 158 157 158 154 153 154 155 154 155 156 155 156
46998+157 156 157 158 157 158 167 166 167 167 166 167 131 129 131 43 57 68
46999+26 28 28 37 38 37 60 73 81 131 129 131 165 164 165 166 165 166
47000+158 157 158 155 154 155 156 155 156 156 155 156 156 155 156 158 157 158
47001+165 164 165 174 174 174 163 162 163 60 74 84 16 19 21 13 16 17
47002+60 73 81 131 129 131 174 174 174 174 174 174 167 166 167 165 164 165
47003+137 136 137 60 73 81 24 26 27 4 0 0 4 0 0 16 19 21
47004+52 104 138 101 161 196 136 185 209 136 185 209 90 154 193 27 99 146
47005+13 20 25 4 5 7 2 5 5 4 5 7 1 1 2 0 0 0
47006+4 4 4 4 4 4 3 3 3 2 2 2 2 2 2 4 4 4
47007+4 4 4 4 4 4
47008+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47009+4 4 4 4 4 4 4 4 4 4 4 4 3 3 3 0 0 0
47010+0 0 0 13 16 17 60 73 81 137 136 137 174 174 174 166 165 166
47011+158 157 158 156 155 156 157 156 157 156 155 156 155 154 155 158 157 158
47012+167 166 167 174 174 174 153 152 153 60 73 81 16 19 21 4 0 0
47013+4 0 0 4 0 0 6 6 6 26 28 28 60 74 84 158 157 158
47014+174 174 174 166 165 166 157 156 157 155 154 155 156 155 156 156 155 156
47015+155 154 155 158 157 158 167 166 167 167 166 167 131 129 131 125 124 125
47016+137 136 137 167 166 167 167 166 167 174 174 174 158 157 158 125 124 125
47017+16 19 21 4 0 0 4 0 0 10 13 16 49 76 92 107 159 188
47018+136 185 209 136 185 209 90 154 193 26 108 161 22 40 52 6 10 14
47019+2 3 3 1 1 2 1 1 2 4 4 5 4 4 5 4 4 5
47020+4 4 5 2 2 1 0 0 0 0 0 0 0 0 0 2 2 2
47021+4 4 4 4 4 4
47022+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47023+4 4 4 5 5 5 3 3 3 0 0 0 1 0 0 4 0 0
47024+37 51 59 131 129 131 167 166 167 167 166 167 163 162 163 157 156 157
47025+157 156 157 155 154 155 153 152 153 157 156 157 167 166 167 174 174 174
47026+153 152 153 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47027+4 3 3 4 3 3 4 0 0 6 6 6 4 0 0 37 38 37
47028+125 124 125 174 174 174 174 174 174 165 164 165 156 155 156 154 153 154
47029+156 155 156 156 155 156 155 154 155 163 162 163 158 157 158 163 162 163
47030+174 174 174 174 174 174 174 174 174 125 124 125 37 38 37 0 0 0
47031+4 0 0 6 9 11 41 54 63 90 154 193 136 185 209 146 190 211
47032+136 185 209 37 112 160 22 40 52 6 10 14 3 6 7 1 1 2
47033+1 1 2 3 3 3 1 1 2 3 3 3 4 4 4 4 4 4
47034+2 2 2 2 0 0 16 19 21 37 38 37 24 26 27 0 0 0
47035+0 0 0 4 4 4
47036+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 5 5 5
47037+4 4 4 0 0 0 0 0 0 0 0 0 26 28 28 120 125 127
47038+158 157 158 174 174 174 165 164 165 157 156 157 155 154 155 156 155 156
47039+153 152 153 153 152 153 167 166 167 174 174 174 174 174 174 125 124 125
47040+37 38 37 4 0 0 0 0 0 4 0 0 4 3 3 4 4 4
47041+4 4 4 4 4 4 5 5 5 4 0 0 4 0 0 4 0 0
47042+4 3 3 43 57 68 137 136 137 174 174 174 174 174 174 165 164 165
47043+154 153 154 153 152 153 153 152 153 153 152 153 163 162 163 174 174 174
47044+174 174 174 153 152 153 60 73 81 6 6 6 4 0 0 4 3 3
47045+32 43 50 80 127 157 136 185 209 146 190 211 146 190 211 90 154 193
47046+28 67 93 28 67 93 40 71 93 3 6 7 1 1 2 2 5 5
47047+50 82 103 79 117 143 26 37 45 0 0 0 3 3 3 1 1 1
47048+0 0 0 41 54 63 137 136 137 174 174 174 153 152 153 60 73 81
47049+2 0 0 0 0 0
47050+4 4 4 4 4 4 4 4 4 4 4 4 6 6 6 2 2 2
47051+0 0 0 2 0 0 24 26 27 60 74 84 153 152 153 174 174 174
47052+174 174 174 157 156 157 154 153 154 156 155 156 154 153 154 153 152 153
47053+165 164 165 174 174 174 177 184 187 137 136 137 43 57 68 6 6 6
47054+4 0 0 2 0 0 3 3 3 5 5 5 5 5 5 4 4 4
47055+4 4 4 4 4 4 4 4 4 5 5 5 6 6 6 4 3 3
47056+4 0 0 4 0 0 24 26 27 60 73 81 153 152 153 174 174 174
47057+174 174 174 158 157 158 158 157 158 174 174 174 174 174 174 158 157 158
47058+60 74 84 24 26 27 4 0 0 4 0 0 17 23 27 59 113 148
47059+136 185 209 191 222 234 146 190 211 136 185 209 31 91 132 7 11 13
47060+22 40 52 101 161 196 90 154 193 6 9 11 3 4 4 43 95 132
47061+136 185 209 172 205 220 55 98 126 0 0 0 0 0 0 2 0 0
47062+26 28 28 153 152 153 177 184 187 167 166 167 177 184 187 165 164 165
47063+37 38 37 0 0 0
47064+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47065+13 16 17 60 73 81 137 136 137 174 174 174 174 174 174 165 164 165
47066+153 152 153 153 152 153 155 154 155 154 153 154 158 157 158 174 174 174
47067+177 184 187 163 162 163 60 73 81 16 19 21 4 0 0 4 0 0
47068+4 3 3 4 4 4 5 5 5 5 5 5 4 4 4 5 5 5
47069+5 5 5 5 5 5 5 5 5 4 4 4 4 4 4 5 5 5
47070+6 6 6 4 0 0 4 0 0 4 0 0 24 26 27 60 74 84
47071+166 165 166 174 174 174 177 184 187 165 164 165 125 124 125 24 26 27
47072+4 0 0 4 0 0 5 5 5 50 82 103 136 185 209 172 205 220
47073+146 190 211 136 185 209 26 108 161 22 40 52 7 12 15 44 81 103
47074+71 116 144 28 67 93 37 51 59 41 65 82 100 139 164 101 161 196
47075+90 154 193 90 154 193 28 67 93 0 0 0 0 0 0 26 28 28
47076+125 124 125 167 166 167 163 162 163 153 152 153 163 162 163 174 174 174
47077+85 115 134 4 0 0
47078+4 4 4 5 5 5 4 4 4 1 0 0 4 0 0 34 47 55
47079+125 124 125 174 174 174 174 174 174 167 166 167 157 156 157 153 152 153
47080+155 154 155 155 154 155 158 157 158 166 165 166 167 166 167 154 153 154
47081+125 124 125 26 28 28 4 0 0 4 0 0 4 0 0 5 5 5
47082+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 1 1 1
47083+0 0 0 0 0 0 1 1 1 4 4 4 4 4 4 4 4 4
47084+5 5 5 5 5 5 4 3 3 4 0 0 4 0 0 6 6 6
47085+37 38 37 131 129 131 137 136 137 37 38 37 0 0 0 4 0 0
47086+4 5 5 43 61 72 90 154 193 172 205 220 146 190 211 136 185 209
47087+90 154 193 28 67 93 13 20 25 43 61 72 71 116 144 44 81 103
47088+2 5 5 7 11 13 59 113 148 101 161 196 90 154 193 28 67 93
47089+13 20 25 6 10 14 0 0 0 13 16 17 60 73 81 137 136 137
47090+166 165 166 158 157 158 156 155 156 154 153 154 167 166 167 174 174 174
47091+60 73 81 4 0 0
47092+4 4 4 4 4 4 0 0 0 3 3 3 60 74 84 174 174 174
47093+174 174 174 167 166 167 163 162 163 155 154 155 157 156 157 155 154 155
47094+156 155 156 163 162 163 167 166 167 158 157 158 125 124 125 37 38 37
47095+4 3 3 4 0 0 4 0 0 6 6 6 6 6 6 5 5 5
47096+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 2 3 3
47097+10 13 16 7 11 13 1 0 0 0 0 0 2 2 1 4 4 4
47098+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 4 0 0
47099+4 0 0 7 11 13 13 16 17 4 0 0 3 3 3 34 47 55
47100+80 127 157 146 190 211 172 205 220 136 185 209 136 185 209 136 185 209
47101+28 67 93 22 40 52 55 98 126 55 98 126 21 29 34 7 11 13
47102+50 82 103 101 161 196 101 161 196 35 83 115 13 20 25 2 2 1
47103+1 1 2 1 1 2 37 51 59 131 129 131 174 174 174 174 174 174
47104+167 166 167 163 162 163 163 162 163 167 166 167 174 174 174 125 124 125
47105+16 19 21 4 0 0
47106+4 4 4 4 0 0 4 0 0 60 74 84 174 174 174 174 174 174
47107+158 157 158 155 154 155 155 154 155 156 155 156 155 154 155 158 157 158
47108+167 166 167 165 164 165 131 129 131 60 73 81 13 16 17 4 0 0
47109+4 0 0 4 3 3 6 6 6 4 3 3 5 5 5 4 4 4
47110+4 4 4 3 2 2 0 0 0 0 0 0 7 11 13 45 69 86
47111+80 127 157 71 116 144 43 61 72 7 11 13 0 0 0 1 1 1
47112+4 3 3 4 4 4 4 4 4 4 4 4 6 6 6 5 5 5
47113+3 2 2 4 0 0 1 0 0 21 29 34 59 113 148 136 185 209
47114+146 190 211 136 185 209 136 185 209 136 185 209 136 185 209 136 185 209
47115+68 124 159 44 81 103 22 40 52 13 16 17 43 61 72 90 154 193
47116+136 185 209 59 113 148 21 29 34 3 4 3 1 1 1 0 0 0
47117+24 26 27 125 124 125 163 162 163 174 174 174 166 165 166 165 164 165
47118+163 162 163 125 124 125 125 124 125 125 124 125 125 124 125 26 28 28
47119+4 0 0 4 3 3
47120+3 3 3 0 0 0 24 26 27 153 152 153 177 184 187 158 157 158
47121+156 155 156 156 155 156 155 154 155 155 154 155 165 164 165 174 174 174
47122+155 154 155 60 74 84 26 28 28 4 0 0 4 0 0 3 1 0
47123+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 3 3
47124+2 0 0 0 0 0 0 0 0 32 43 50 72 125 159 101 161 196
47125+136 185 209 101 161 196 101 161 196 79 117 143 32 43 50 0 0 0
47126+0 0 0 2 2 2 4 4 4 4 4 4 3 3 3 1 0 0
47127+0 0 0 4 5 5 49 76 92 101 161 196 146 190 211 146 190 211
47128+136 185 209 136 185 209 136 185 209 136 185 209 136 185 209 90 154 193
47129+28 67 93 13 16 17 37 51 59 80 127 157 136 185 209 90 154 193
47130+22 40 52 6 9 11 3 4 3 2 2 1 16 19 21 60 73 81
47131+137 136 137 163 162 163 158 157 158 166 165 166 167 166 167 153 152 153
47132+60 74 84 37 38 37 6 6 6 13 16 17 4 0 0 1 0 0
47133+3 2 2 4 4 4
47134+3 2 2 4 0 0 37 38 37 137 136 137 167 166 167 158 157 158
47135+157 156 157 154 153 154 157 156 157 167 166 167 174 174 174 125 124 125
47136+37 38 37 4 0 0 4 0 0 4 0 0 4 3 3 4 4 4
47137+4 4 4 4 4 4 5 5 5 5 5 5 1 1 1 0 0 0
47138+0 0 0 16 21 25 55 98 126 90 154 193 136 185 209 101 161 196
47139+101 161 196 101 161 196 136 185 209 136 185 209 101 161 196 55 98 126
47140+14 17 19 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0
47141+22 40 52 90 154 193 146 190 211 146 190 211 136 185 209 136 185 209
47142+136 185 209 136 185 209 136 185 209 101 161 196 35 83 115 7 11 13
47143+17 23 27 59 113 148 136 185 209 101 161 196 34 86 122 7 12 15
47144+2 5 5 3 4 3 6 6 6 60 73 81 131 129 131 163 162 163
47145+166 165 166 174 174 174 174 174 174 163 162 163 125 124 125 41 54 63
47146+13 16 17 4 0 0 4 0 0 4 0 0 1 0 0 2 2 2
47147+4 4 4 4 4 4
47148+1 1 1 2 1 0 43 57 68 137 136 137 153 152 153 153 152 153
47149+163 162 163 156 155 156 165 164 165 167 166 167 60 74 84 6 6 6
47150+4 0 0 4 0 0 5 5 5 4 4 4 4 4 4 4 4 4
47151+4 5 5 6 6 6 4 3 3 0 0 0 0 0 0 11 15 18
47152+40 71 93 100 139 164 101 161 196 101 161 196 101 161 196 101 161 196
47153+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 136 185 209
47154+101 161 196 45 69 86 6 6 6 0 0 0 17 23 27 55 98 126
47155+136 185 209 146 190 211 136 185 209 136 185 209 136 185 209 136 185 209
47156+136 185 209 136 185 209 90 154 193 22 40 52 7 11 13 50 82 103
47157+136 185 209 136 185 209 53 118 160 22 40 52 7 11 13 2 5 5
47158+3 4 3 37 38 37 125 124 125 157 156 157 166 165 166 167 166 167
47159+174 174 174 174 174 174 137 136 137 60 73 81 4 0 0 4 0 0
47160+4 0 0 4 0 0 5 5 5 3 3 3 3 3 3 4 4 4
47161+4 4 4 4 4 4
47162+4 0 0 4 0 0 41 54 63 137 136 137 125 124 125 131 129 131
47163+155 154 155 167 166 167 174 174 174 60 74 84 6 6 6 4 0 0
47164+4 3 3 6 6 6 4 4 4 4 4 4 4 4 4 5 5 5
47165+4 4 4 1 1 1 0 0 0 3 6 7 41 65 82 72 125 159
47166+101 161 196 101 161 196 101 161 196 90 154 193 90 154 193 101 161 196
47167+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47168+136 185 209 136 185 209 80 127 157 55 98 126 101 161 196 146 190 211
47169+136 185 209 136 185 209 136 185 209 101 161 196 136 185 209 101 161 196
47170+136 185 209 101 161 196 35 83 115 22 30 35 101 161 196 172 205 220
47171+90 154 193 28 67 93 7 11 13 2 5 5 3 4 3 13 16 17
47172+85 115 134 167 166 167 174 174 174 174 174 174 174 174 174 174 174 174
47173+167 166 167 60 74 84 13 16 17 4 0 0 4 0 0 4 3 3
47174+6 6 6 5 5 5 4 4 4 5 5 5 4 4 4 5 5 5
47175+5 5 5 5 5 5
47176+1 1 1 4 0 0 41 54 63 137 136 137 137 136 137 125 124 125
47177+131 129 131 167 166 167 157 156 157 37 38 37 6 6 6 4 0 0
47178+6 6 6 5 5 5 4 4 4 4 4 4 4 5 5 2 2 1
47179+0 0 0 0 0 0 26 37 45 58 111 146 101 161 196 101 161 196
47180+101 161 196 90 154 193 90 154 193 90 154 193 101 161 196 101 161 196
47181+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47182+101 161 196 136 185 209 136 185 209 136 185 209 146 190 211 136 185 209
47183+136 185 209 101 161 196 136 185 209 136 185 209 101 161 196 136 185 209
47184+101 161 196 136 185 209 136 185 209 136 185 209 136 185 209 16 89 141
47185+7 11 13 2 5 5 2 5 5 13 16 17 60 73 81 154 154 154
47186+174 174 174 174 174 174 174 174 174 174 174 174 163 162 163 125 124 125
47187+24 26 27 4 0 0 4 0 0 4 0 0 5 5 5 5 5 5
47188+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47189+5 5 5 4 4 4
47190+4 0 0 6 6 6 37 38 37 137 136 137 137 136 137 131 129 131
47191+131 129 131 153 152 153 131 129 131 26 28 28 4 0 0 4 3 3
47192+6 6 6 4 4 4 4 4 4 4 4 4 0 0 0 0 0 0
47193+13 20 25 51 88 114 90 154 193 101 161 196 101 161 196 90 154 193
47194+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47195+101 161 196 101 161 196 101 161 196 101 161 196 136 185 209 101 161 196
47196+101 161 196 136 185 209 101 161 196 136 185 209 136 185 209 101 161 196
47197+136 185 209 101 161 196 136 185 209 101 161 196 101 161 196 101 161 196
47198+136 185 209 136 185 209 136 185 209 37 112 160 21 29 34 5 7 8
47199+2 5 5 13 16 17 43 57 68 131 129 131 174 174 174 174 174 174
47200+174 174 174 167 166 167 157 156 157 125 124 125 37 38 37 4 0 0
47201+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47202+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47203+4 4 4 4 4 4
47204+1 1 1 4 0 0 41 54 63 153 152 153 137 136 137 137 136 137
47205+137 136 137 153 152 153 125 124 125 24 26 27 4 0 0 3 2 2
47206+4 4 4 4 4 4 4 3 3 4 0 0 3 6 7 43 61 72
47207+64 123 161 101 161 196 90 154 193 90 154 193 90 154 193 90 154 193
47208+90 154 193 90 154 193 90 154 193 90 154 193 101 161 196 90 154 193
47209+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47210+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47211+136 185 209 101 161 196 101 161 196 136 185 209 136 185 209 101 161 196
47212+101 161 196 90 154 193 28 67 93 13 16 17 7 11 13 3 6 7
47213+37 51 59 125 124 125 163 162 163 174 174 174 167 166 167 166 165 166
47214+167 166 167 131 129 131 60 73 81 4 0 0 4 0 0 4 0 0
47215+3 3 3 5 5 5 6 6 6 4 4 4 4 4 4 4 4 4
47216+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47217+4 4 4 4 4 4
47218+4 0 0 4 0 0 41 54 63 137 136 137 153 152 153 137 136 137
47219+153 152 153 157 156 157 125 124 125 24 26 27 0 0 0 2 2 2
47220+4 4 4 4 4 4 2 0 0 0 0 0 28 67 93 90 154 193
47221+90 154 193 90 154 193 90 154 193 90 154 193 64 123 161 90 154 193
47222+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47223+90 154 193 101 161 196 101 161 196 101 161 196 90 154 193 136 185 209
47224+101 161 196 101 161 196 136 185 209 101 161 196 136 185 209 101 161 196
47225+101 161 196 101 161 196 136 185 209 101 161 196 101 161 196 90 154 193
47226+35 83 115 13 16 17 3 6 7 2 5 5 13 16 17 60 74 84
47227+154 154 154 166 165 166 165 164 165 158 157 158 163 162 163 157 156 157
47228+60 74 84 13 16 17 4 0 0 4 0 0 3 2 2 4 4 4
47229+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47230+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47231+4 4 4 4 4 4
47232+1 1 1 4 0 0 41 54 63 157 156 157 155 154 155 137 136 137
47233+153 152 153 158 157 158 137 136 137 26 28 28 2 0 0 2 2 2
47234+4 4 4 4 4 4 1 0 0 6 10 14 34 86 122 90 154 193
47235+64 123 161 90 154 193 64 123 161 90 154 193 90 154 193 90 154 193
47236+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47237+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47238+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 101 161 196
47239+136 185 209 101 161 196 136 185 209 90 154 193 26 108 161 22 40 52
47240+13 16 17 5 7 8 2 5 5 2 5 5 37 38 37 165 164 165
47241+174 174 174 163 162 163 154 154 154 165 164 165 167 166 167 60 73 81
47242+6 6 6 4 0 0 4 0 0 4 4 4 4 4 4 4 4 4
47243+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47244+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47245+4 4 4 4 4 4
47246+4 0 0 6 6 6 41 54 63 156 155 156 158 157 158 153 152 153
47247+156 155 156 165 164 165 137 136 137 26 28 28 0 0 0 2 2 2
47248+4 4 5 4 4 4 2 0 0 7 12 15 31 96 139 64 123 161
47249+90 154 193 64 123 161 90 154 193 90 154 193 64 123 161 90 154 193
47250+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47251+90 154 193 90 154 193 90 154 193 101 161 196 101 161 196 101 161 196
47252+101 161 196 101 161 196 101 161 196 101 161 196 101 161 196 136 185 209
47253+101 161 196 136 185 209 26 108 161 22 40 52 7 11 13 5 7 8
47254+2 5 5 2 5 5 2 5 5 2 2 1 37 38 37 158 157 158
47255+174 174 174 154 154 154 156 155 156 167 166 167 165 164 165 37 38 37
47256+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47257+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47258+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47259+4 4 4 4 4 4
47260+3 1 0 4 0 0 60 73 81 157 156 157 163 162 163 153 152 153
47261+158 157 158 167 166 167 137 136 137 26 28 28 2 0 0 2 2 2
47262+4 5 5 4 4 4 4 0 0 7 12 15 24 86 132 26 108 161
47263+37 112 160 64 123 161 90 154 193 64 123 161 90 154 193 90 154 193
47264+90 154 193 90 154 193 90 154 193 90 154 193 90 154 193 90 154 193
47265+90 154 193 101 161 196 90 154 193 101 161 196 101 161 196 101 161 196
47266+101 161 196 101 161 196 101 161 196 136 185 209 101 161 196 136 185 209
47267+90 154 193 35 83 115 13 16 17 13 16 17 7 11 13 3 6 7
47268+5 7 8 6 6 6 3 4 3 2 2 1 30 32 34 154 154 154
47269+167 166 167 154 154 154 154 154 154 174 174 174 165 164 165 37 38 37
47270+6 6 6 4 0 0 6 6 6 4 4 4 4 4 4 4 4 4
47271+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47272+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47273+4 4 4 4 4 4
47274+4 0 0 4 0 0 41 54 63 163 162 163 166 165 166 154 154 154
47275+163 162 163 174 174 174 137 136 137 26 28 28 0 0 0 2 2 2
47276+4 5 5 4 4 5 1 1 2 6 10 14 28 67 93 18 97 151
47277+18 97 151 18 97 151 26 108 161 37 112 160 37 112 160 90 154 193
47278+64 123 161 90 154 193 90 154 193 90 154 193 90 154 193 101 161 196
47279+90 154 193 101 161 196 101 161 196 90 154 193 101 161 196 101 161 196
47280+101 161 196 101 161 196 101 161 196 136 185 209 90 154 193 16 89 141
47281+13 20 25 7 11 13 5 7 8 5 7 8 2 5 5 4 5 5
47282+3 4 3 4 5 5 3 4 3 0 0 0 37 38 37 158 157 158
47283+174 174 174 158 157 158 158 157 158 167 166 167 174 174 174 41 54 63
47284+4 0 0 3 2 2 5 5 5 4 4 4 4 4 4 4 4 4
47285+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47286+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47287+4 4 4 4 4 4
47288+1 1 1 4 0 0 60 73 81 165 164 165 174 174 174 158 157 158
47289+167 166 167 174 174 174 153 152 153 26 28 28 2 0 0 2 2 2
47290+4 5 5 4 4 4 4 0 0 7 12 15 10 87 144 10 87 144
47291+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47292+26 108 161 37 112 160 53 118 160 90 154 193 90 154 193 90 154 193
47293+90 154 193 90 154 193 101 161 196 101 161 196 101 161 196 101 161 196
47294+101 161 196 136 185 209 90 154 193 26 108 161 22 40 52 13 16 17
47295+7 11 13 3 6 7 5 7 8 5 7 8 2 5 5 4 5 5
47296+4 5 5 6 6 6 3 4 3 0 0 0 30 32 34 158 157 158
47297+174 174 174 156 155 156 155 154 155 165 164 165 154 153 154 37 38 37
47298+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47299+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47300+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47301+4 4 4 4 4 4
47302+4 0 0 4 0 0 60 73 81 167 166 167 174 174 174 163 162 163
47303+174 174 174 174 174 174 153 152 153 26 28 28 0 0 0 3 3 3
47304+5 5 5 4 4 4 1 1 2 7 12 15 28 67 93 18 97 151
47305+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47306+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47307+90 154 193 26 108 161 90 154 193 90 154 193 90 154 193 101 161 196
47308+101 161 196 26 108 161 22 40 52 13 16 17 7 11 13 2 5 5
47309+2 5 5 6 6 6 2 5 5 4 5 5 4 5 5 4 5 5
47310+3 4 3 5 5 5 3 4 3 2 0 0 30 32 34 137 136 137
47311+153 152 153 137 136 137 131 129 131 137 136 137 131 129 131 37 38 37
47312+4 0 0 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47313+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47314+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47315+4 4 4 4 4 4
47316+1 1 1 4 0 0 60 73 81 167 166 167 174 174 174 166 165 166
47317+174 174 174 177 184 187 153 152 153 30 32 34 1 0 0 3 3 3
47318+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47319+18 97 151 18 97 151 18 97 151 26 108 161 26 108 161 26 108 161
47320+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47321+26 108 161 26 108 161 26 108 161 90 154 193 90 154 193 26 108 161
47322+35 83 115 13 16 17 7 11 13 5 7 8 3 6 7 5 7 8
47323+2 5 5 6 6 6 4 5 5 4 5 5 3 4 3 4 5 5
47324+3 4 3 6 6 6 3 4 3 0 0 0 26 28 28 125 124 125
47325+131 129 131 125 124 125 125 124 125 131 129 131 131 129 131 37 38 37
47326+4 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47327+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47328+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47329+4 4 4 4 4 4
47330+3 1 0 4 0 0 60 73 81 174 174 174 177 184 187 167 166 167
47331+174 174 174 177 184 187 153 152 153 30 32 34 0 0 0 3 3 3
47332+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47333+18 97 151 18 97 151 18 97 151 18 97 151 18 97 151 26 108 161
47334+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47335+26 108 161 90 154 193 26 108 161 26 108 161 24 86 132 13 20 25
47336+7 11 13 13 20 25 22 40 52 5 7 8 3 4 3 3 4 3
47337+4 5 5 3 4 3 4 5 5 3 4 3 4 5 5 3 4 3
47338+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47339+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47340+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47341+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47342+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47343+4 4 4 4 4 4
47344+1 1 1 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47345+174 174 174 190 197 201 157 156 157 30 32 34 1 0 0 3 3 3
47346+5 5 5 4 3 3 4 0 0 7 12 15 10 87 144 10 87 144
47347+18 97 151 19 95 150 19 95 150 18 97 151 18 97 151 26 108 161
47348+18 97 151 26 108 161 26 108 161 26 108 161 26 108 161 90 154 193
47349+26 108 161 26 108 161 26 108 161 22 40 52 2 5 5 3 4 3
47350+28 67 93 37 112 160 34 86 122 2 5 5 3 4 3 3 4 3
47351+3 4 3 3 4 3 3 4 3 2 2 1 3 4 3 4 4 4
47352+4 5 5 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47353+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47354+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47355+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47356+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47357+4 4 4 4 4 4
47358+4 0 0 4 0 0 60 73 81 174 174 174 177 184 187 174 174 174
47359+174 174 174 190 197 201 158 157 158 30 32 34 0 0 0 2 2 2
47360+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 18 97 151
47361+10 87 144 19 95 150 19 95 150 18 97 151 18 97 151 18 97 151
47362+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47363+18 97 151 22 40 52 2 5 5 2 2 1 22 40 52 26 108 161
47364+90 154 193 37 112 160 22 40 52 3 4 3 13 20 25 22 30 35
47365+3 6 7 1 1 1 2 2 2 6 9 11 5 5 5 4 3 3
47366+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47367+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47368+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47369+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47370+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47371+4 4 4 4 4 4
47372+1 1 1 4 0 0 60 73 81 177 184 187 193 200 203 174 174 174
47373+177 184 187 193 200 203 163 162 163 30 32 34 4 0 0 2 2 2
47374+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47375+10 87 144 10 87 144 19 95 150 19 95 150 19 95 150 18 97 151
47376+26 108 161 26 108 161 26 108 161 90 154 193 26 108 161 28 67 93
47377+6 10 14 2 5 5 13 20 25 24 86 132 37 112 160 90 154 193
47378+10 87 144 7 12 15 2 5 5 28 67 93 37 112 160 28 67 93
47379+2 2 1 7 12 15 35 83 115 28 67 93 3 6 7 1 0 0
47380+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47381+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47382+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47383+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47384+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47385+4 4 4 4 4 4
47386+4 0 0 4 0 0 60 73 81 174 174 174 190 197 201 174 174 174
47387+177 184 187 193 200 203 163 162 163 30 32 34 0 0 0 2 2 2
47388+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47389+10 87 144 16 89 141 19 95 150 10 87 144 26 108 161 26 108 161
47390+26 108 161 26 108 161 26 108 161 28 67 93 6 10 14 1 1 2
47391+7 12 15 28 67 93 26 108 161 16 89 141 24 86 132 21 29 34
47392+3 4 3 21 29 34 37 112 160 37 112 160 27 99 146 21 29 34
47393+21 29 34 26 108 161 90 154 193 35 83 115 1 1 2 2 0 0
47394+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 125 124 125
47395+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47396+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47397+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47398+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47399+4 4 4 4 4 4
47400+3 1 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47401+190 197 201 193 200 203 165 164 165 37 38 37 4 0 0 2 2 2
47402+5 5 5 4 3 3 4 0 0 6 10 14 24 86 132 10 87 144
47403+10 87 144 10 87 144 16 89 141 18 97 151 18 97 151 10 87 144
47404+24 86 132 24 86 132 13 20 25 4 5 7 4 5 7 22 40 52
47405+18 97 151 37 112 160 26 108 161 7 12 15 1 1 1 0 0 0
47406+28 67 93 37 112 160 26 108 161 28 67 93 22 40 52 28 67 93
47407+26 108 161 90 154 193 26 108 161 10 87 144 0 0 0 2 0 0
47408+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47409+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47410+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47411+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47412+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47413+4 4 4 4 4 4
47414+4 0 0 6 6 6 60 73 81 174 174 174 193 200 203 174 174 174
47415+190 197 201 193 200 203 165 164 165 30 32 34 0 0 0 2 2 2
47416+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47417+10 87 144 10 87 144 10 87 144 18 97 151 28 67 93 6 10 14
47418+0 0 0 1 1 2 4 5 7 13 20 25 16 89 141 26 108 161
47419+26 108 161 26 108 161 24 86 132 6 9 11 2 3 3 22 40 52
47420+37 112 160 16 89 141 22 40 52 28 67 93 26 108 161 26 108 161
47421+90 154 193 26 108 161 26 108 161 28 67 93 1 1 1 4 0 0
47422+4 4 4 5 5 5 3 3 3 4 0 0 26 28 28 124 126 130
47423+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47424+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47425+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47426+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47427+4 4 4 4 4 4
47428+4 0 0 4 0 0 60 73 81 193 200 203 193 200 203 174 174 174
47429+193 200 203 193 200 203 167 166 167 37 38 37 4 0 0 2 2 2
47430+5 5 5 4 4 4 4 0 0 6 10 14 28 67 93 10 87 144
47431+10 87 144 10 87 144 18 97 151 10 87 144 13 20 25 4 5 7
47432+1 1 2 1 1 1 22 40 52 26 108 161 26 108 161 26 108 161
47433+26 108 161 26 108 161 26 108 161 24 86 132 22 40 52 22 40 52
47434+22 40 52 22 40 52 10 87 144 26 108 161 26 108 161 26 108 161
47435+26 108 161 26 108 161 90 154 193 10 87 144 0 0 0 4 0 0
47436+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47437+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47438+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47439+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47440+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47441+4 4 4 4 4 4
47442+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47443+190 197 201 205 212 215 167 166 167 30 32 34 0 0 0 2 2 2
47444+5 5 5 4 4 4 1 1 2 6 10 14 28 67 93 10 87 144
47445+10 87 144 10 87 144 10 87 144 10 87 144 22 40 52 1 1 2
47446+2 0 0 1 1 2 24 86 132 26 108 161 26 108 161 26 108 161
47447+26 108 161 19 95 150 16 89 141 10 87 144 22 40 52 22 40 52
47448+10 87 144 26 108 161 37 112 160 26 108 161 26 108 161 26 108 161
47449+26 108 161 26 108 161 26 108 161 28 67 93 2 0 0 3 1 0
47450+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47451+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47452+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47453+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47454+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47455+4 4 4 4 4 4
47456+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47457+193 200 203 193 200 203 174 174 174 37 38 37 4 0 0 2 2 2
47458+5 5 5 4 4 4 3 2 2 1 1 2 13 20 25 10 87 144
47459+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 13 20 25
47460+13 20 25 22 40 52 10 87 144 18 97 151 18 97 151 26 108 161
47461+10 87 144 13 20 25 6 10 14 21 29 34 24 86 132 18 97 151
47462+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47463+26 108 161 90 154 193 18 97 151 13 20 25 0 0 0 4 3 3
47464+4 4 4 5 5 5 3 3 3 0 0 0 26 28 28 131 129 131
47465+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47466+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47467+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47468+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47469+4 4 4 4 4 4
47470+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47471+190 197 201 220 221 221 167 166 167 30 32 34 1 0 0 2 2 2
47472+5 5 5 4 4 4 4 4 5 2 5 5 4 5 7 13 20 25
47473+28 67 93 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47474+10 87 144 10 87 144 18 97 151 10 87 144 18 97 151 18 97 151
47475+28 67 93 2 3 3 0 0 0 28 67 93 26 108 161 26 108 161
47476+26 108 161 26 108 161 26 108 161 26 108 161 26 108 161 26 108 161
47477+26 108 161 10 87 144 13 20 25 1 1 2 3 2 2 4 4 4
47478+4 4 4 5 5 5 3 3 3 2 0 0 26 28 28 131 129 131
47479+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47480+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47481+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47482+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47483+4 4 4 4 4 4
47484+4 0 0 4 0 0 60 73 81 220 221 221 190 197 201 174 174 174
47485+193 200 203 193 200 203 174 174 174 26 28 28 4 0 0 4 3 3
47486+5 5 5 4 4 4 4 4 4 4 4 5 1 1 2 2 5 5
47487+4 5 7 22 40 52 10 87 144 10 87 144 18 97 151 10 87 144
47488+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 18 97 151
47489+10 87 144 28 67 93 22 40 52 10 87 144 26 108 161 18 97 151
47490+18 97 151 18 97 151 26 108 161 26 108 161 26 108 161 26 108 161
47491+22 40 52 1 1 2 0 0 0 2 3 3 4 4 4 4 4 4
47492+4 4 4 5 5 5 4 4 4 0 0 0 26 28 28 131 129 131
47493+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47494+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47495+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47496+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47497+4 4 4 4 4 4
47498+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47499+190 197 201 220 221 221 190 197 201 41 54 63 4 0 0 2 2 2
47500+6 6 6 4 4 4 4 4 4 4 4 5 4 4 5 3 3 3
47501+1 1 2 1 1 2 6 10 14 22 40 52 10 87 144 18 97 151
47502+18 97 151 10 87 144 10 87 144 10 87 144 18 97 151 10 87 144
47503+10 87 144 18 97 151 26 108 161 18 97 151 18 97 151 10 87 144
47504+26 108 161 26 108 161 26 108 161 10 87 144 28 67 93 6 10 14
47505+1 1 2 1 1 2 4 3 3 4 4 5 4 4 4 4 4 4
47506+5 5 5 5 5 5 1 1 1 4 0 0 37 51 59 137 136 137
47507+137 136 137 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47508+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47509+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47510+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47511+4 4 4 4 4 4
47512+4 0 0 4 0 0 60 73 81 220 221 221 193 200 203 174 174 174
47513+193 200 203 193 200 203 220 221 221 137 136 137 13 16 17 4 0 0
47514+2 2 2 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5
47515+4 4 5 4 3 3 1 1 2 4 5 7 13 20 25 28 67 93
47516+10 87 144 10 87 144 10 87 144 10 87 144 10 87 144 10 87 144
47517+10 87 144 18 97 151 18 97 151 10 87 144 18 97 151 26 108 161
47518+26 108 161 18 97 151 28 67 93 6 10 14 0 0 0 0 0 0
47519+2 3 3 4 5 5 4 4 5 4 4 4 4 4 4 5 5 5
47520+3 3 3 1 1 1 0 0 0 16 19 21 125 124 125 137 136 137
47521+131 129 131 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47522+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47523+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47524+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47525+4 4 4 4 4 4
47526+4 0 0 6 6 6 60 73 81 174 174 174 220 221 221 174 174 174
47527+193 200 203 190 197 201 220 221 221 220 221 221 153 152 153 30 32 34
47528+0 0 0 0 0 0 2 2 2 4 4 4 4 4 4 4 4 4
47529+4 4 4 4 5 5 4 5 7 1 1 2 1 1 2 4 5 7
47530+13 20 25 28 67 93 10 87 144 18 97 151 10 87 144 10 87 144
47531+10 87 144 10 87 144 10 87 144 18 97 151 26 108 161 18 97 151
47532+28 67 93 7 12 15 0 0 0 0 0 0 2 2 1 4 4 4
47533+4 5 5 4 5 5 4 4 4 4 4 4 3 3 3 0 0 0
47534+0 0 0 0 0 0 37 38 37 125 124 125 158 157 158 131 129 131
47535+125 124 125 125 124 125 125 124 125 137 136 137 131 129 131 37 38 37
47536+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47537+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47538+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47539+4 4 4 4 4 4
47540+4 3 3 4 0 0 41 54 63 193 200 203 220 221 221 174 174 174
47541+193 200 203 193 200 203 193 200 203 220 221 221 244 246 246 193 200 203
47542+120 125 127 5 5 5 1 0 0 0 0 0 1 1 1 4 4 4
47543+4 4 4 4 4 4 4 5 5 4 5 5 4 4 5 1 1 2
47544+4 5 7 4 5 7 22 40 52 10 87 144 10 87 144 10 87 144
47545+10 87 144 10 87 144 18 97 151 10 87 144 10 87 144 13 20 25
47546+4 5 7 2 3 3 1 1 2 4 4 4 4 5 5 4 4 4
47547+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 1 2
47548+24 26 27 60 74 84 153 152 153 163 162 163 137 136 137 125 124 125
47549+125 124 125 125 124 125 125 124 125 137 136 137 125 124 125 26 28 28
47550+0 0 0 3 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47551+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47552+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47553+4 4 4 4 4 4
47554+4 0 0 6 6 6 26 28 28 156 155 156 220 221 221 220 221 221
47555+174 174 174 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47556+220 221 221 167 166 167 60 73 81 7 11 13 0 0 0 0 0 0
47557+3 3 3 4 4 4 4 4 4 4 4 4 4 4 5 4 4 5
47558+4 4 5 1 1 2 1 1 2 4 5 7 22 40 52 10 87 144
47559+10 87 144 10 87 144 10 87 144 22 40 52 4 5 7 1 1 2
47560+1 1 2 4 4 5 4 4 4 4 4 4 4 4 4 4 4 4
47561+5 5 5 2 2 2 0 0 0 4 0 0 16 19 21 60 73 81
47562+137 136 137 167 166 167 158 157 158 137 136 137 131 129 131 131 129 131
47563+125 124 125 125 124 125 131 129 131 155 154 155 60 74 84 5 7 8
47564+0 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47565+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47566+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47567+4 4 4 4 4 4
47568+5 5 5 4 0 0 4 0 0 60 73 81 193 200 203 220 221 221
47569+193 200 203 193 200 203 193 200 203 193 200 203 205 212 215 220 221 221
47570+220 221 221 220 221 221 220 221 221 137 136 137 43 57 68 6 6 6
47571+4 0 0 1 1 1 4 4 4 4 4 4 4 4 4 4 4 4
47572+4 4 5 4 4 5 3 2 2 1 1 2 2 5 5 13 20 25
47573+22 40 52 22 40 52 13 20 25 2 3 3 1 1 2 3 3 3
47574+4 5 7 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47575+1 1 1 0 0 0 2 3 3 41 54 63 131 129 131 166 165 166
47576+166 165 166 155 154 155 153 152 153 137 136 137 137 136 137 125 124 125
47577+125 124 125 137 136 137 137 136 137 125 124 125 37 38 37 4 3 3
47578+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47579+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47580+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47581+4 4 4 4 4 4
47582+4 3 3 6 6 6 6 6 6 13 16 17 60 73 81 167 166 167
47583+220 221 221 220 221 221 220 221 221 193 200 203 193 200 203 193 200 203
47584+205 212 215 220 221 221 220 221 221 244 246 246 205 212 215 125 124 125
47585+24 26 27 0 0 0 0 0 0 2 2 2 5 5 5 5 5 5
47586+4 4 4 4 4 4 4 4 4 4 4 5 1 1 2 4 5 7
47587+4 5 7 4 5 7 1 1 2 3 2 2 4 4 5 4 4 4
47588+4 4 4 4 4 4 5 5 5 4 4 4 0 0 0 0 0 0
47589+2 0 0 26 28 28 125 124 125 174 174 174 174 174 174 166 165 166
47590+156 155 156 153 152 153 137 136 137 137 136 137 131 129 131 137 136 137
47591+137 136 137 137 136 137 60 74 84 30 32 34 4 0 0 4 0 0
47592+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47593+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47594+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47595+4 4 4 4 4 4
47596+5 5 5 6 6 6 4 0 0 4 0 0 6 6 6 26 28 28
47597+125 124 125 174 174 174 220 221 221 220 221 221 220 221 221 193 200 203
47598+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47599+193 200 203 60 74 84 13 16 17 4 0 0 0 0 0 3 3 3
47600+5 5 5 5 5 5 4 4 4 4 4 4 4 4 5 3 3 3
47601+1 1 2 3 3 3 4 4 5 4 4 5 4 4 4 4 4 4
47602+5 5 5 5 5 5 2 2 2 0 0 0 0 0 0 13 16 17
47603+60 74 84 174 174 174 193 200 203 174 174 174 167 166 167 163 162 163
47604+153 152 153 153 152 153 137 136 137 137 136 137 153 152 153 137 136 137
47605+125 124 125 41 54 63 24 26 27 4 0 0 4 0 0 5 5 5
47606+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47607+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47608+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47609+4 4 4 4 4 4
47610+4 3 3 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47611+6 6 6 37 38 37 131 129 131 220 221 221 220 221 221 220 221 221
47612+193 200 203 193 200 203 220 221 221 205 212 215 220 221 221 244 246 246
47613+244 246 246 244 246 246 174 174 174 41 54 63 0 0 0 0 0 0
47614+0 0 0 4 4 4 5 5 5 5 5 5 4 4 4 4 4 5
47615+4 4 5 4 4 5 4 4 4 4 4 4 6 6 6 6 6 6
47616+3 3 3 0 0 0 2 0 0 13 16 17 60 73 81 156 155 156
47617+220 221 221 193 200 203 174 174 174 165 164 165 163 162 163 154 153 154
47618+153 152 153 153 152 153 158 157 158 163 162 163 137 136 137 60 73 81
47619+13 16 17 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47620+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47621+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47622+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47623+4 4 4 4 4 4
47624+5 5 5 4 3 3 4 3 3 6 6 6 6 6 6 6 6 6
47625+6 6 6 6 6 6 6 6 6 37 38 37 167 166 167 244 246 246
47626+244 246 246 220 221 221 205 212 215 205 212 215 220 221 221 193 200 203
47627+220 221 221 244 246 246 244 246 246 244 246 246 137 136 137 37 38 37
47628+3 2 2 0 0 0 1 1 1 5 5 5 5 5 5 4 4 4
47629+4 4 4 4 4 4 4 4 4 5 5 5 4 4 4 1 1 1
47630+0 0 0 5 5 5 43 57 68 153 152 153 193 200 203 220 221 221
47631+177 184 187 174 174 174 167 166 167 166 165 166 158 157 158 157 156 157
47632+158 157 158 166 165 166 156 155 156 85 115 134 13 16 17 4 0 0
47633+4 0 0 4 0 0 5 5 5 5 5 5 4 4 4 4 4 4
47634+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47635+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47636+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47637+4 4 4 4 4 4
47638+5 5 5 4 3 3 6 6 6 6 6 6 4 0 0 6 6 6
47639+6 6 6 6 6 6 6 6 6 6 6 6 13 16 17 60 73 81
47640+177 184 187 220 221 221 220 221 221 220 221 221 205 212 215 220 221 221
47641+220 221 221 205 212 215 220 221 221 244 246 246 244 246 246 205 212 215
47642+125 124 125 30 32 34 0 0 0 0 0 0 2 2 2 5 5 5
47643+4 4 4 4 4 4 4 4 4 1 1 1 0 0 0 1 0 0
47644+37 38 37 131 129 131 205 212 215 220 221 221 193 200 203 174 174 174
47645+174 174 174 174 174 174 167 166 167 165 164 165 166 165 166 167 166 167
47646+158 157 158 125 124 125 37 38 37 4 0 0 4 0 0 4 0 0
47647+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47648+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47649+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47650+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47651+4 4 4 4 4 4
47652+4 4 4 5 5 5 4 3 3 4 3 3 6 6 6 6 6 6
47653+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47654+26 28 28 125 124 125 205 212 215 220 221 221 220 221 221 220 221 221
47655+205 212 215 220 221 221 205 212 215 220 221 221 220 221 221 244 246 246
47656+244 246 246 190 197 201 60 74 84 16 19 21 4 0 0 0 0 0
47657+0 0 0 0 0 0 0 0 0 0 0 0 16 19 21 120 125 127
47658+177 184 187 220 221 221 205 212 215 177 184 187 174 174 174 177 184 187
47659+174 174 174 174 174 174 167 166 167 174 174 174 166 165 166 137 136 137
47660+60 73 81 13 16 17 4 0 0 4 0 0 4 3 3 6 6 6
47661+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47662+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47663+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47664+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47665+4 4 4 4 4 4
47666+5 5 5 4 3 3 5 5 5 4 3 3 6 6 6 4 0 0
47667+6 6 6 6 6 6 4 0 0 6 6 6 4 0 0 6 6 6
47668+6 6 6 6 6 6 37 38 37 137 136 137 193 200 203 220 221 221
47669+220 221 221 205 212 215 220 221 221 205 212 215 205 212 215 220 221 221
47670+220 221 221 220 221 221 244 246 246 166 165 166 43 57 68 2 2 2
47671+0 0 0 4 0 0 16 19 21 60 73 81 157 156 157 202 210 214
47672+220 221 221 193 200 203 177 184 187 177 184 187 177 184 187 174 174 174
47673+174 174 174 174 174 174 174 174 174 157 156 157 60 74 84 24 26 27
47674+4 0 0 4 0 0 4 0 0 6 6 6 4 4 4 4 4 4
47675+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47676+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47677+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47678+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47679+4 4 4 4 4 4
47680+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47681+6 6 6 4 0 0 6 6 6 6 6 6 6 6 6 4 0 0
47682+4 0 0 4 0 0 6 6 6 24 26 27 60 73 81 167 166 167
47683+220 221 221 220 221 221 220 221 221 205 212 215 205 212 215 205 212 215
47684+205 212 215 220 221 221 220 221 221 220 221 221 205 212 215 137 136 137
47685+60 74 84 125 124 125 137 136 137 190 197 201 220 221 221 193 200 203
47686+177 184 187 177 184 187 177 184 187 174 174 174 174 174 174 177 184 187
47687+190 197 201 174 174 174 125 124 125 37 38 37 6 6 6 4 0 0
47688+4 0 0 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47689+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47690+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47691+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47692+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47693+4 4 4 4 4 4
47694+4 4 4 4 4 4 5 5 5 5 5 5 4 3 3 6 6 6
47695+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 6 6 6
47696+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47697+125 124 125 193 200 203 244 246 246 220 221 221 205 212 215 205 212 215
47698+205 212 215 193 200 203 205 212 215 205 212 215 220 221 221 220 221 221
47699+193 200 203 193 200 203 205 212 215 193 200 203 193 200 203 177 184 187
47700+190 197 201 190 197 201 174 174 174 190 197 201 193 200 203 190 197 201
47701+153 152 153 60 73 81 4 0 0 4 0 0 4 0 0 3 2 2
47702+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47703+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47704+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47705+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47706+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47707+4 4 4 4 4 4
47708+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47709+6 6 6 4 3 3 4 3 3 4 3 3 6 6 6 6 6 6
47710+4 0 0 6 6 6 6 6 6 6 6 6 4 0 0 4 0 0
47711+4 0 0 26 28 28 131 129 131 220 221 221 244 246 246 220 221 221
47712+205 212 215 193 200 203 205 212 215 193 200 203 193 200 203 205 212 215
47713+220 221 221 193 200 203 193 200 203 193 200 203 190 197 201 174 174 174
47714+174 174 174 190 197 201 193 200 203 193 200 203 167 166 167 125 124 125
47715+6 6 6 4 0 0 4 0 0 4 3 3 4 4 4 4 4 4
47716+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47717+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47718+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47719+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47720+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47721+4 4 4 4 4 4
47722+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47723+5 5 5 4 3 3 5 5 5 6 6 6 4 3 3 5 5 5
47724+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47725+4 0 0 4 0 0 6 6 6 41 54 63 158 157 158 220 221 221
47726+220 221 221 220 221 221 193 200 203 193 200 203 193 200 203 190 197 201
47727+190 197 201 190 197 201 190 197 201 190 197 201 174 174 174 193 200 203
47728+193 200 203 220 221 221 174 174 174 125 124 125 37 38 37 4 0 0
47729+4 0 0 4 3 3 6 6 6 4 4 4 4 4 4 4 4 4
47730+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47731+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47732+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47733+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47734+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47735+4 4 4 4 4 4
47736+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47737+4 4 4 5 5 5 4 3 3 4 3 3 4 3 3 5 5 5
47738+4 3 3 6 6 6 5 5 5 4 3 3 6 6 6 6 6 6
47739+6 6 6 6 6 6 4 0 0 4 0 0 13 16 17 60 73 81
47740+174 174 174 220 221 221 220 221 221 205 212 215 190 197 201 174 174 174
47741+193 200 203 174 174 174 190 197 201 174 174 174 193 200 203 220 221 221
47742+193 200 203 131 129 131 37 38 37 6 6 6 4 0 0 4 0 0
47743+6 6 6 6 6 6 4 3 3 5 5 5 4 4 4 4 4 4
47744+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47745+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47746+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47747+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47748+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47749+4 4 4 4 4 4
47750+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47751+4 4 4 4 4 4 4 4 4 5 5 5 5 5 5 5 5 5
47752+5 5 5 4 3 3 4 3 3 5 5 5 4 3 3 4 3 3
47753+5 5 5 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47754+6 6 6 125 124 125 174 174 174 220 221 221 220 221 221 193 200 203
47755+193 200 203 193 200 203 193 200 203 193 200 203 220 221 221 158 157 158
47756+60 73 81 6 6 6 4 0 0 4 0 0 5 5 5 6 6 6
47757+5 5 5 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47758+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47759+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47760+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47761+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47762+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47763+4 4 4 4 4 4
47764+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47765+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47766+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47767+5 5 5 5 5 5 6 6 6 6 6 6 4 0 0 4 0 0
47768+4 0 0 4 0 0 26 28 28 125 124 125 174 174 174 193 200 203
47769+193 200 203 174 174 174 193 200 203 167 166 167 125 124 125 6 6 6
47770+6 6 6 6 6 6 4 0 0 6 6 6 6 6 6 5 5 5
47771+4 3 3 5 5 5 4 4 4 4 4 4 4 4 4 4 4 4
47772+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47773+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47774+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47775+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47776+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47777+4 4 4 4 4 4
47778+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47779+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47780+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47781+4 3 3 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47782+6 6 6 4 0 0 4 0 0 6 6 6 37 38 37 125 124 125
47783+153 152 153 131 129 131 125 124 125 37 38 37 6 6 6 6 6 6
47784+6 6 6 4 0 0 6 6 6 6 6 6 4 3 3 5 5 5
47785+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47786+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47787+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47788+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47789+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47790+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47791+4 4 4 4 4 4
47792+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47793+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47794+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47795+4 4 4 5 5 5 5 5 5 4 3 3 5 5 5 4 3 3
47796+6 6 6 6 6 6 4 0 0 4 0 0 6 6 6 6 6 6
47797+24 26 27 24 26 27 6 6 6 6 6 6 6 6 6 4 0 0
47798+6 6 6 6 6 6 4 0 0 6 6 6 5 5 5 4 3 3
47799+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47800+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47801+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47802+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47803+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47804+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47805+4 4 4 4 4 4
47806+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47807+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47808+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47809+4 4 4 4 4 4 5 5 5 4 3 3 5 5 5 6 6 6
47810+4 0 0 6 6 6 6 6 6 6 6 6 6 6 6 6 6 6
47811+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 6 6 6
47812+4 0 0 6 6 6 6 6 6 4 3 3 5 5 5 4 4 4
47813+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47814+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47815+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47816+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47817+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47818+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47819+4 4 4 4 4 4
47820+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47821+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47822+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47823+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 5 5 5
47824+5 5 5 5 5 5 4 0 0 6 6 6 4 0 0 6 6 6
47825+6 6 6 6 6 6 6 6 6 4 0 0 6 6 6 4 0 0
47826+6 6 6 4 3 3 5 5 5 4 3 3 5 5 5 4 4 4
47827+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47828+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47829+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47830+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47831+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47832+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47833+4 4 4 4 4 4
47834+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47835+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47836+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47837+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 5 5 5
47838+4 3 3 6 6 6 4 3 3 6 6 6 6 6 6 6 6 6
47839+4 0 0 6 6 6 4 0 0 6 6 6 6 6 6 6 6 6
47840+6 6 6 4 3 3 5 5 5 4 4 4 4 4 4 4 4 4
47841+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47842+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47843+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47844+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47845+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47846+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47847+4 4 4 4 4 4
47848+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47849+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47850+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47851+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47852+4 4 4 5 5 5 4 3 3 5 5 5 4 0 0 6 6 6
47853+6 6 6 4 0 0 6 6 6 6 6 6 4 0 0 6 6 6
47854+4 3 3 5 5 5 5 5 5 4 4 4 4 4 4 4 4 4
47855+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47856+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47857+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47858+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47859+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47860+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47861+4 4 4 4 4 4
47862+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47863+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47864+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47865+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47866+4 4 4 5 5 5 4 3 3 5 5 5 6 6 6 4 3 3
47867+4 3 3 6 6 6 6 6 6 4 3 3 6 6 6 4 3 3
47868+5 5 5 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47869+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47870+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47871+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47872+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47873+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47874+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47875+4 4 4 4 4 4
47876+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47877+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47878+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47879+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47880+4 4 4 4 4 4 4 4 4 5 5 5 4 3 3 6 6 6
47881+5 5 5 4 3 3 4 3 3 4 3 3 5 5 5 5 5 5
47882+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47883+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47884+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47885+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47886+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47887+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47888+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47889+4 4 4 4 4 4
47890+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47891+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47892+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47893+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47894+4 4 4 4 4 4 4 4 4 4 4 4 5 5 5 4 3 3
47895+5 5 5 4 3 3 5 5 5 5 5 5 4 4 4 4 4 4
47896+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47897+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47898+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47899+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47900+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47901+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47902+4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4 4
47903+4 4 4 4 4 4
47904diff --git a/drivers/video/mb862xx/mb862xxfb_accel.c b/drivers/video/mb862xx/mb862xxfb_accel.c
47905index fe92eed..106e085 100644
47906--- a/drivers/video/mb862xx/mb862xxfb_accel.c
47907+++ b/drivers/video/mb862xx/mb862xxfb_accel.c
47908@@ -312,14 +312,18 @@ void mb862xxfb_init_accel(struct fb_info *info, int xres)
47909 struct mb862xxfb_par *par = info->par;
47910
47911 if (info->var.bits_per_pixel == 32) {
47912- info->fbops->fb_fillrect = cfb_fillrect;
47913- info->fbops->fb_copyarea = cfb_copyarea;
47914- info->fbops->fb_imageblit = cfb_imageblit;
47915+ pax_open_kernel();
47916+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47917+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47918+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47919+ pax_close_kernel();
47920 } else {
47921 outreg(disp, GC_L0EM, 3);
47922- info->fbops->fb_fillrect = mb86290fb_fillrect;
47923- info->fbops->fb_copyarea = mb86290fb_copyarea;
47924- info->fbops->fb_imageblit = mb86290fb_imageblit;
47925+ pax_open_kernel();
47926+ *(void **)&info->fbops->fb_fillrect = mb86290fb_fillrect;
47927+ *(void **)&info->fbops->fb_copyarea = mb86290fb_copyarea;
47928+ *(void **)&info->fbops->fb_imageblit = mb86290fb_imageblit;
47929+ pax_close_kernel();
47930 }
47931 outreg(draw, GDC_REG_DRAW_BASE, 0);
47932 outreg(draw, GDC_REG_MODE_MISC, 0x8000);
47933diff --git a/drivers/video/nvidia/nvidia.c b/drivers/video/nvidia/nvidia.c
47934index ff22871..b129bed 100644
47935--- a/drivers/video/nvidia/nvidia.c
47936+++ b/drivers/video/nvidia/nvidia.c
47937@@ -669,19 +669,23 @@ static int nvidiafb_set_par(struct fb_info *info)
47938 info->fix.line_length = (info->var.xres_virtual *
47939 info->var.bits_per_pixel) >> 3;
47940 if (info->var.accel_flags) {
47941- info->fbops->fb_imageblit = nvidiafb_imageblit;
47942- info->fbops->fb_fillrect = nvidiafb_fillrect;
47943- info->fbops->fb_copyarea = nvidiafb_copyarea;
47944- info->fbops->fb_sync = nvidiafb_sync;
47945+ pax_open_kernel();
47946+ *(void **)&info->fbops->fb_imageblit = nvidiafb_imageblit;
47947+ *(void **)&info->fbops->fb_fillrect = nvidiafb_fillrect;
47948+ *(void **)&info->fbops->fb_copyarea = nvidiafb_copyarea;
47949+ *(void **)&info->fbops->fb_sync = nvidiafb_sync;
47950+ pax_close_kernel();
47951 info->pixmap.scan_align = 4;
47952 info->flags &= ~FBINFO_HWACCEL_DISABLED;
47953 info->flags |= FBINFO_READS_FAST;
47954 NVResetGraphics(info);
47955 } else {
47956- info->fbops->fb_imageblit = cfb_imageblit;
47957- info->fbops->fb_fillrect = cfb_fillrect;
47958- info->fbops->fb_copyarea = cfb_copyarea;
47959- info->fbops->fb_sync = NULL;
47960+ pax_open_kernel();
47961+ *(void **)&info->fbops->fb_imageblit = cfb_imageblit;
47962+ *(void **)&info->fbops->fb_fillrect = cfb_fillrect;
47963+ *(void **)&info->fbops->fb_copyarea = cfb_copyarea;
47964+ *(void **)&info->fbops->fb_sync = NULL;
47965+ pax_close_kernel();
47966 info->pixmap.scan_align = 1;
47967 info->flags |= FBINFO_HWACCEL_DISABLED;
47968 info->flags &= ~FBINFO_READS_FAST;
47969@@ -1173,8 +1177,11 @@ static int nvidia_set_fbinfo(struct fb_info *info)
47970 info->pixmap.size = 8 * 1024;
47971 info->pixmap.flags = FB_PIXMAP_SYSTEM;
47972
47973- if (!hwcur)
47974- info->fbops->fb_cursor = NULL;
47975+ if (!hwcur) {
47976+ pax_open_kernel();
47977+ *(void **)&info->fbops->fb_cursor = NULL;
47978+ pax_close_kernel();
47979+ }
47980
47981 info->var.accel_flags = (!noaccel);
47982
47983diff --git a/drivers/video/output.c b/drivers/video/output.c
47984index 0d6f2cd..6285b97 100644
47985--- a/drivers/video/output.c
47986+++ b/drivers/video/output.c
47987@@ -97,7 +97,7 @@ struct output_device *video_output_register(const char *name,
47988 new_dev->props = op;
47989 new_dev->dev.class = &video_output_class;
47990 new_dev->dev.parent = dev;
47991- dev_set_name(&new_dev->dev, name);
47992+ dev_set_name(&new_dev->dev, "%s", name);
47993 dev_set_drvdata(&new_dev->dev, devdata);
47994 ret_code = device_register(&new_dev->dev);
47995 if (ret_code) {
47996diff --git a/drivers/video/s1d13xxxfb.c b/drivers/video/s1d13xxxfb.c
47997index 05c2dc3..ea1f391 100644
47998--- a/drivers/video/s1d13xxxfb.c
47999+++ b/drivers/video/s1d13xxxfb.c
48000@@ -881,8 +881,10 @@ static int s1d13xxxfb_probe(struct platform_device *pdev)
48001
48002 switch(prod_id) {
48003 case S1D13506_PROD_ID: /* activate acceleration */
48004- s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
48005- s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
48006+ pax_open_kernel();
48007+ *(void **)&s1d13xxxfb_fbops.fb_fillrect = s1d13xxxfb_bitblt_solidfill;
48008+ *(void **)&s1d13xxxfb_fbops.fb_copyarea = s1d13xxxfb_bitblt_copyarea;
48009+ pax_close_kernel();
48010 info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_YPAN |
48011 FBINFO_HWACCEL_FILLRECT | FBINFO_HWACCEL_COPYAREA;
48012 break;
48013diff --git a/drivers/video/smscufx.c b/drivers/video/smscufx.c
48014index b2b33fc..f9f4658 100644
48015--- a/drivers/video/smscufx.c
48016+++ b/drivers/video/smscufx.c
48017@@ -1175,7 +1175,9 @@ static int ufx_ops_release(struct fb_info *info, int user)
48018 fb_deferred_io_cleanup(info);
48019 kfree(info->fbdefio);
48020 info->fbdefio = NULL;
48021- info->fbops->fb_mmap = ufx_ops_mmap;
48022+ pax_open_kernel();
48023+ *(void **)&info->fbops->fb_mmap = ufx_ops_mmap;
48024+ pax_close_kernel();
48025 }
48026
48027 pr_debug("released /dev/fb%d user=%d count=%d",
48028diff --git a/drivers/video/udlfb.c b/drivers/video/udlfb.c
48029index ec03e72..f578436 100644
48030--- a/drivers/video/udlfb.c
48031+++ b/drivers/video/udlfb.c
48032@@ -623,11 +623,11 @@ int dlfb_handle_damage(struct dlfb_data *dev, int x, int y,
48033 dlfb_urb_completion(urb);
48034
48035 error:
48036- atomic_add(bytes_sent, &dev->bytes_sent);
48037- atomic_add(bytes_identical, &dev->bytes_identical);
48038- atomic_add(width*height*2, &dev->bytes_rendered);
48039+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
48040+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
48041+ atomic_add_unchecked(width*height*2, &dev->bytes_rendered);
48042 end_cycles = get_cycles();
48043- atomic_add(((unsigned int) ((end_cycles - start_cycles)
48044+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
48045 >> 10)), /* Kcycles */
48046 &dev->cpu_kcycles_used);
48047
48048@@ -748,11 +748,11 @@ static void dlfb_dpy_deferred_io(struct fb_info *info,
48049 dlfb_urb_completion(urb);
48050
48051 error:
48052- atomic_add(bytes_sent, &dev->bytes_sent);
48053- atomic_add(bytes_identical, &dev->bytes_identical);
48054- atomic_add(bytes_rendered, &dev->bytes_rendered);
48055+ atomic_add_unchecked(bytes_sent, &dev->bytes_sent);
48056+ atomic_add_unchecked(bytes_identical, &dev->bytes_identical);
48057+ atomic_add_unchecked(bytes_rendered, &dev->bytes_rendered);
48058 end_cycles = get_cycles();
48059- atomic_add(((unsigned int) ((end_cycles - start_cycles)
48060+ atomic_add_unchecked(((unsigned int) ((end_cycles - start_cycles)
48061 >> 10)), /* Kcycles */
48062 &dev->cpu_kcycles_used);
48063 }
48064@@ -993,7 +993,9 @@ static int dlfb_ops_release(struct fb_info *info, int user)
48065 fb_deferred_io_cleanup(info);
48066 kfree(info->fbdefio);
48067 info->fbdefio = NULL;
48068- info->fbops->fb_mmap = dlfb_ops_mmap;
48069+ pax_open_kernel();
48070+ *(void **)&info->fbops->fb_mmap = dlfb_ops_mmap;
48071+ pax_close_kernel();
48072 }
48073
48074 pr_warn("released /dev/fb%d user=%d count=%d\n",
48075@@ -1376,7 +1378,7 @@ static ssize_t metrics_bytes_rendered_show(struct device *fbdev,
48076 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48077 struct dlfb_data *dev = fb_info->par;
48078 return snprintf(buf, PAGE_SIZE, "%u\n",
48079- atomic_read(&dev->bytes_rendered));
48080+ atomic_read_unchecked(&dev->bytes_rendered));
48081 }
48082
48083 static ssize_t metrics_bytes_identical_show(struct device *fbdev,
48084@@ -1384,7 +1386,7 @@ static ssize_t metrics_bytes_identical_show(struct device *fbdev,
48085 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48086 struct dlfb_data *dev = fb_info->par;
48087 return snprintf(buf, PAGE_SIZE, "%u\n",
48088- atomic_read(&dev->bytes_identical));
48089+ atomic_read_unchecked(&dev->bytes_identical));
48090 }
48091
48092 static ssize_t metrics_bytes_sent_show(struct device *fbdev,
48093@@ -1392,7 +1394,7 @@ static ssize_t metrics_bytes_sent_show(struct device *fbdev,
48094 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48095 struct dlfb_data *dev = fb_info->par;
48096 return snprintf(buf, PAGE_SIZE, "%u\n",
48097- atomic_read(&dev->bytes_sent));
48098+ atomic_read_unchecked(&dev->bytes_sent));
48099 }
48100
48101 static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
48102@@ -1400,7 +1402,7 @@ static ssize_t metrics_cpu_kcycles_used_show(struct device *fbdev,
48103 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48104 struct dlfb_data *dev = fb_info->par;
48105 return snprintf(buf, PAGE_SIZE, "%u\n",
48106- atomic_read(&dev->cpu_kcycles_used));
48107+ atomic_read_unchecked(&dev->cpu_kcycles_used));
48108 }
48109
48110 static ssize_t edid_show(
48111@@ -1460,10 +1462,10 @@ static ssize_t metrics_reset_store(struct device *fbdev,
48112 struct fb_info *fb_info = dev_get_drvdata(fbdev);
48113 struct dlfb_data *dev = fb_info->par;
48114
48115- atomic_set(&dev->bytes_rendered, 0);
48116- atomic_set(&dev->bytes_identical, 0);
48117- atomic_set(&dev->bytes_sent, 0);
48118- atomic_set(&dev->cpu_kcycles_used, 0);
48119+ atomic_set_unchecked(&dev->bytes_rendered, 0);
48120+ atomic_set_unchecked(&dev->bytes_identical, 0);
48121+ atomic_set_unchecked(&dev->bytes_sent, 0);
48122+ atomic_set_unchecked(&dev->cpu_kcycles_used, 0);
48123
48124 return count;
48125 }
48126diff --git a/drivers/video/uvesafb.c b/drivers/video/uvesafb.c
48127index e328a61..1b08ecb 100644
48128--- a/drivers/video/uvesafb.c
48129+++ b/drivers/video/uvesafb.c
48130@@ -19,6 +19,7 @@
48131 #include <linux/io.h>
48132 #include <linux/mutex.h>
48133 #include <linux/slab.h>
48134+#include <linux/moduleloader.h>
48135 #include <video/edid.h>
48136 #include <video/uvesafb.h>
48137 #ifdef CONFIG_X86
48138@@ -569,10 +570,32 @@ static int uvesafb_vbe_getpmi(struct uvesafb_ktask *task,
48139 if ((task->t.regs.eax & 0xffff) != 0x4f || task->t.regs.es < 0xc000) {
48140 par->pmi_setpal = par->ypan = 0;
48141 } else {
48142+
48143+#ifdef CONFIG_PAX_KERNEXEC
48144+#ifdef CONFIG_MODULES
48145+ par->pmi_code = module_alloc_exec((u16)task->t.regs.ecx);
48146+#endif
48147+ if (!par->pmi_code) {
48148+ par->pmi_setpal = par->ypan = 0;
48149+ return 0;
48150+ }
48151+#endif
48152+
48153 par->pmi_base = (u16 *)phys_to_virt(((u32)task->t.regs.es << 4)
48154 + task->t.regs.edi);
48155+
48156+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48157+ pax_open_kernel();
48158+ memcpy(par->pmi_code, par->pmi_base, (u16)task->t.regs.ecx);
48159+ pax_close_kernel();
48160+
48161+ par->pmi_start = ktva_ktla(par->pmi_code + par->pmi_base[1]);
48162+ par->pmi_pal = ktva_ktla(par->pmi_code + par->pmi_base[2]);
48163+#else
48164 par->pmi_start = (u8 *)par->pmi_base + par->pmi_base[1];
48165 par->pmi_pal = (u8 *)par->pmi_base + par->pmi_base[2];
48166+#endif
48167+
48168 printk(KERN_INFO "uvesafb: protected mode interface info at "
48169 "%04x:%04x\n",
48170 (u16)task->t.regs.es, (u16)task->t.regs.edi);
48171@@ -817,13 +840,14 @@ static int uvesafb_vbe_init(struct fb_info *info)
48172 par->ypan = ypan;
48173
48174 if (par->pmi_setpal || par->ypan) {
48175+#if !defined(CONFIG_MODULES) || !defined(CONFIG_PAX_KERNEXEC)
48176 if (__supported_pte_mask & _PAGE_NX) {
48177 par->pmi_setpal = par->ypan = 0;
48178 printk(KERN_WARNING "uvesafb: NX protection is actively."
48179 "We have better not to use the PMI.\n");
48180- } else {
48181+ } else
48182+#endif
48183 uvesafb_vbe_getpmi(task, par);
48184- }
48185 }
48186 #else
48187 /* The protected mode interface is not available on non-x86. */
48188@@ -1457,8 +1481,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48189 info->fix.ywrapstep = (par->ypan > 1) ? 1 : 0;
48190
48191 /* Disable blanking if the user requested so. */
48192- if (!blank)
48193- info->fbops->fb_blank = NULL;
48194+ if (!blank) {
48195+ pax_open_kernel();
48196+ *(void **)&info->fbops->fb_blank = NULL;
48197+ pax_close_kernel();
48198+ }
48199
48200 /*
48201 * Find out how much IO memory is required for the mode with
48202@@ -1534,8 +1561,11 @@ static void uvesafb_init_info(struct fb_info *info, struct vbe_mode_ib *mode)
48203 info->flags = FBINFO_FLAG_DEFAULT |
48204 (par->ypan ? FBINFO_HWACCEL_YPAN : 0);
48205
48206- if (!par->ypan)
48207- info->fbops->fb_pan_display = NULL;
48208+ if (!par->ypan) {
48209+ pax_open_kernel();
48210+ *(void **)&info->fbops->fb_pan_display = NULL;
48211+ pax_close_kernel();
48212+ }
48213 }
48214
48215 static void uvesafb_init_mtrr(struct fb_info *info)
48216@@ -1836,6 +1866,11 @@ out:
48217 if (par->vbe_modes)
48218 kfree(par->vbe_modes);
48219
48220+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48221+ if (par->pmi_code)
48222+ module_free_exec(NULL, par->pmi_code);
48223+#endif
48224+
48225 framebuffer_release(info);
48226 return err;
48227 }
48228@@ -1862,6 +1897,12 @@ static int uvesafb_remove(struct platform_device *dev)
48229 kfree(par->vbe_state_orig);
48230 if (par->vbe_state_saved)
48231 kfree(par->vbe_state_saved);
48232+
48233+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48234+ if (par->pmi_code)
48235+ module_free_exec(NULL, par->pmi_code);
48236+#endif
48237+
48238 }
48239
48240 framebuffer_release(info);
48241diff --git a/drivers/video/vesafb.c b/drivers/video/vesafb.c
48242index 501b340..d80aa17 100644
48243--- a/drivers/video/vesafb.c
48244+++ b/drivers/video/vesafb.c
48245@@ -9,6 +9,7 @@
48246 */
48247
48248 #include <linux/module.h>
48249+#include <linux/moduleloader.h>
48250 #include <linux/kernel.h>
48251 #include <linux/errno.h>
48252 #include <linux/string.h>
48253@@ -52,8 +53,8 @@ static int vram_remap __initdata; /* Set amount of memory to be used */
48254 static int vram_total __initdata; /* Set total amount of memory */
48255 static int pmi_setpal __read_mostly = 1; /* pmi for palette changes ??? */
48256 static int ypan __read_mostly; /* 0..nothing, 1..ypan, 2..ywrap */
48257-static void (*pmi_start)(void) __read_mostly;
48258-static void (*pmi_pal) (void) __read_mostly;
48259+static void (*pmi_start)(void) __read_only;
48260+static void (*pmi_pal) (void) __read_only;
48261 static int depth __read_mostly;
48262 static int vga_compat __read_mostly;
48263 /* --------------------------------------------------------------------- */
48264@@ -233,6 +234,7 @@ static int __init vesafb_probe(struct platform_device *dev)
48265 unsigned int size_vmode;
48266 unsigned int size_remap;
48267 unsigned int size_total;
48268+ void *pmi_code = NULL;
48269
48270 if (screen_info.orig_video_isVGA != VIDEO_TYPE_VLFB)
48271 return -ENODEV;
48272@@ -275,10 +277,6 @@ static int __init vesafb_probe(struct platform_device *dev)
48273 size_remap = size_total;
48274 vesafb_fix.smem_len = size_remap;
48275
48276-#ifndef __i386__
48277- screen_info.vesapm_seg = 0;
48278-#endif
48279-
48280 if (!request_mem_region(vesafb_fix.smem_start, size_total, "vesafb")) {
48281 printk(KERN_WARNING
48282 "vesafb: cannot reserve video memory at 0x%lx\n",
48283@@ -307,9 +305,21 @@ static int __init vesafb_probe(struct platform_device *dev)
48284 printk(KERN_INFO "vesafb: mode is %dx%dx%d, linelength=%d, pages=%d\n",
48285 vesafb_defined.xres, vesafb_defined.yres, vesafb_defined.bits_per_pixel, vesafb_fix.line_length, screen_info.pages);
48286
48287+#ifdef __i386__
48288+
48289+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48290+ pmi_code = module_alloc_exec(screen_info.vesapm_size);
48291+ if (!pmi_code)
48292+#elif !defined(CONFIG_PAX_KERNEXEC)
48293+ if (0)
48294+#endif
48295+
48296+#endif
48297+ screen_info.vesapm_seg = 0;
48298+
48299 if (screen_info.vesapm_seg) {
48300- printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x\n",
48301- screen_info.vesapm_seg,screen_info.vesapm_off);
48302+ printk(KERN_INFO "vesafb: protected mode interface info at %04x:%04x %04x bytes\n",
48303+ screen_info.vesapm_seg,screen_info.vesapm_off,screen_info.vesapm_size);
48304 }
48305
48306 if (screen_info.vesapm_seg < 0xc000)
48307@@ -317,9 +327,25 @@ static int __init vesafb_probe(struct platform_device *dev)
48308
48309 if (ypan || pmi_setpal) {
48310 unsigned short *pmi_base;
48311+
48312 pmi_base = (unsigned short*)phys_to_virt(((unsigned long)screen_info.vesapm_seg << 4) + screen_info.vesapm_off);
48313- pmi_start = (void*)((char*)pmi_base + pmi_base[1]);
48314- pmi_pal = (void*)((char*)pmi_base + pmi_base[2]);
48315+
48316+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48317+ pax_open_kernel();
48318+ memcpy(pmi_code, pmi_base, screen_info.vesapm_size);
48319+#else
48320+ pmi_code = pmi_base;
48321+#endif
48322+
48323+ pmi_start = (void*)((char*)pmi_code + pmi_base[1]);
48324+ pmi_pal = (void*)((char*)pmi_code + pmi_base[2]);
48325+
48326+#if defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48327+ pmi_start = ktva_ktla(pmi_start);
48328+ pmi_pal = ktva_ktla(pmi_pal);
48329+ pax_close_kernel();
48330+#endif
48331+
48332 printk(KERN_INFO "vesafb: pmi: set display start = %p, set palette = %p\n",pmi_start,pmi_pal);
48333 if (pmi_base[3]) {
48334 printk(KERN_INFO "vesafb: pmi: ports = ");
48335@@ -472,8 +498,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48336 info->flags = FBINFO_FLAG_DEFAULT | FBINFO_MISC_FIRMWARE |
48337 (ypan ? FBINFO_HWACCEL_YPAN : 0);
48338
48339- if (!ypan)
48340- info->fbops->fb_pan_display = NULL;
48341+ if (!ypan) {
48342+ pax_open_kernel();
48343+ *(void **)&info->fbops->fb_pan_display = NULL;
48344+ pax_close_kernel();
48345+ }
48346
48347 if (fb_alloc_cmap(&info->cmap, 256, 0) < 0) {
48348 err = -ENOMEM;
48349@@ -488,6 +517,11 @@ static int __init vesafb_probe(struct platform_device *dev)
48350 info->node, info->fix.id);
48351 return 0;
48352 err:
48353+
48354+#if defined(__i386__) && defined(CONFIG_MODULES) && defined(CONFIG_PAX_KERNEXEC)
48355+ module_free_exec(NULL, pmi_code);
48356+#endif
48357+
48358 if (info->screen_base)
48359 iounmap(info->screen_base);
48360 framebuffer_release(info);
48361diff --git a/drivers/video/via/via_clock.h b/drivers/video/via/via_clock.h
48362index 88714ae..16c2e11 100644
48363--- a/drivers/video/via/via_clock.h
48364+++ b/drivers/video/via/via_clock.h
48365@@ -56,7 +56,7 @@ struct via_clock {
48366
48367 void (*set_engine_pll_state)(u8 state);
48368 void (*set_engine_pll)(struct via_pll_config config);
48369-};
48370+} __no_const;
48371
48372
48373 static inline u32 get_pll_internal_frequency(u32 ref_freq,
48374diff --git a/drivers/xen/xenfs/xenstored.c b/drivers/xen/xenfs/xenstored.c
48375index fef20db..d28b1ab 100644
48376--- a/drivers/xen/xenfs/xenstored.c
48377+++ b/drivers/xen/xenfs/xenstored.c
48378@@ -24,7 +24,12 @@ static int xsd_release(struct inode *inode, struct file *file)
48379 static int xsd_kva_open(struct inode *inode, struct file *file)
48380 {
48381 file->private_data = (void *)kasprintf(GFP_KERNEL, "0x%p",
48382+#ifdef CONFIG_GRKERNSEC_HIDESYM
48383+ NULL);
48384+#else
48385 xen_store_interface);
48386+#endif
48387+
48388 if (!file->private_data)
48389 return -ENOMEM;
48390 return 0;
48391diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
48392index 055562c..fdfb10d 100644
48393--- a/fs/9p/vfs_addr.c
48394+++ b/fs/9p/vfs_addr.c
48395@@ -186,7 +186,7 @@ static int v9fs_vfs_writepage_locked(struct page *page)
48396
48397 retval = v9fs_file_write_internal(inode,
48398 v9inode->writeback_fid,
48399- (__force const char __user *)buffer,
48400+ (const char __force_user *)buffer,
48401 len, &offset, 0);
48402 if (retval > 0)
48403 retval = 0;
48404diff --git a/fs/9p/vfs_inode.c b/fs/9p/vfs_inode.c
48405index d86edc8..40ff2fb 100644
48406--- a/fs/9p/vfs_inode.c
48407+++ b/fs/9p/vfs_inode.c
48408@@ -1314,7 +1314,7 @@ static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
48409 void
48410 v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48411 {
48412- char *s = nd_get_link(nd);
48413+ const char *s = nd_get_link(nd);
48414
48415 p9_debug(P9_DEBUG_VFS, " %s %s\n",
48416 dentry->d_name.name, IS_ERR(s) ? "<error>" : s);
48417diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
48418index 370b24c..ff0be7b 100644
48419--- a/fs/Kconfig.binfmt
48420+++ b/fs/Kconfig.binfmt
48421@@ -103,7 +103,7 @@ config HAVE_AOUT
48422
48423 config BINFMT_AOUT
48424 tristate "Kernel support for a.out and ECOFF binaries"
48425- depends on HAVE_AOUT
48426+ depends on HAVE_AOUT && BROKEN
48427 ---help---
48428 A.out (Assembler.OUTput) is a set of formats for libraries and
48429 executables used in the earliest versions of UNIX. Linux used
48430diff --git a/fs/aio.c b/fs/aio.c
48431index 2bbcacf..8614116 100644
48432--- a/fs/aio.c
48433+++ b/fs/aio.c
48434@@ -160,7 +160,7 @@ static int aio_setup_ring(struct kioctx *ctx)
48435 size += sizeof(struct io_event) * nr_events;
48436 nr_pages = (size + PAGE_SIZE-1) >> PAGE_SHIFT;
48437
48438- if (nr_pages < 0)
48439+ if (nr_pages <= 0)
48440 return -EINVAL;
48441
48442 nr_events = (PAGE_SIZE * nr_pages - sizeof(struct aio_ring)) / sizeof(struct io_event);
48443@@ -950,6 +950,7 @@ static ssize_t aio_rw_vect_retry(struct kiocb *iocb, int rw, aio_rw_op *rw_op)
48444 static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
48445 {
48446 ssize_t ret;
48447+ struct iovec iovstack;
48448
48449 kiocb->ki_nr_segs = kiocb->ki_nbytes;
48450
48451@@ -957,17 +958,22 @@ static ssize_t aio_setup_vectored_rw(int rw, struct kiocb *kiocb, bool compat)
48452 if (compat)
48453 ret = compat_rw_copy_check_uvector(rw,
48454 (struct compat_iovec __user *)kiocb->ki_buf,
48455- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
48456+ kiocb->ki_nr_segs, 1, &iovstack,
48457 &kiocb->ki_iovec);
48458 else
48459 #endif
48460 ret = rw_copy_check_uvector(rw,
48461 (struct iovec __user *)kiocb->ki_buf,
48462- kiocb->ki_nr_segs, 1, &kiocb->ki_inline_vec,
48463+ kiocb->ki_nr_segs, 1, &iovstack,
48464 &kiocb->ki_iovec);
48465 if (ret < 0)
48466 return ret;
48467
48468+ if (kiocb->ki_iovec == &iovstack) {
48469+ kiocb->ki_inline_vec = iovstack;
48470+ kiocb->ki_iovec = &kiocb->ki_inline_vec;
48471+ }
48472+
48473 /* ki_nbytes now reflect bytes instead of segs */
48474 kiocb->ki_nbytes = ret;
48475 return 0;
48476diff --git a/fs/attr.c b/fs/attr.c
48477index 1449adb..a2038c2 100644
48478--- a/fs/attr.c
48479+++ b/fs/attr.c
48480@@ -102,6 +102,7 @@ int inode_newsize_ok(const struct inode *inode, loff_t offset)
48481 unsigned long limit;
48482
48483 limit = rlimit(RLIMIT_FSIZE);
48484+ gr_learn_resource(current, RLIMIT_FSIZE, (unsigned long)offset, 1);
48485 if (limit != RLIM_INFINITY && offset > limit)
48486 goto out_sig;
48487 if (offset > inode->i_sb->s_maxbytes)
48488diff --git a/fs/autofs4/waitq.c b/fs/autofs4/waitq.c
48489index 3db70da..7aeec5b 100644
48490--- a/fs/autofs4/waitq.c
48491+++ b/fs/autofs4/waitq.c
48492@@ -59,7 +59,7 @@ static int autofs4_write(struct autofs_sb_info *sbi,
48493 {
48494 unsigned long sigpipe, flags;
48495 mm_segment_t fs;
48496- const char *data = (const char *)addr;
48497+ const char __user *data = (const char __force_user *)addr;
48498 ssize_t wr = 0;
48499
48500 sigpipe = sigismember(&current->pending.signal, SIGPIPE);
48501@@ -346,6 +346,10 @@ static int validate_request(struct autofs_wait_queue **wait,
48502 return 1;
48503 }
48504
48505+#ifdef CONFIG_GRKERNSEC_HIDESYM
48506+static atomic_unchecked_t autofs_dummy_name_id = ATOMIC_INIT(0);
48507+#endif
48508+
48509 int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48510 enum autofs_notify notify)
48511 {
48512@@ -379,7 +383,12 @@ int autofs4_wait(struct autofs_sb_info *sbi, struct dentry *dentry,
48513
48514 /* If this is a direct mount request create a dummy name */
48515 if (IS_ROOT(dentry) && autofs_type_trigger(sbi->type))
48516+#ifdef CONFIG_GRKERNSEC_HIDESYM
48517+ /* this name does get written to userland via autofs4_write() */
48518+ qstr.len = sprintf(name, "%08x", atomic_inc_return_unchecked(&autofs_dummy_name_id));
48519+#else
48520 qstr.len = sprintf(name, "%p", dentry);
48521+#endif
48522 else {
48523 qstr.len = autofs4_getpath(sbi, dentry, &name);
48524 if (!qstr.len) {
48525diff --git a/fs/befs/endian.h b/fs/befs/endian.h
48526index 2722387..c8dd2a7 100644
48527--- a/fs/befs/endian.h
48528+++ b/fs/befs/endian.h
48529@@ -11,7 +11,7 @@
48530
48531 #include <asm/byteorder.h>
48532
48533-static inline u64
48534+static inline u64 __intentional_overflow(-1)
48535 fs64_to_cpu(const struct super_block *sb, fs64 n)
48536 {
48537 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48538@@ -29,7 +29,7 @@ cpu_to_fs64(const struct super_block *sb, u64 n)
48539 return (__force fs64)cpu_to_be64(n);
48540 }
48541
48542-static inline u32
48543+static inline u32 __intentional_overflow(-1)
48544 fs32_to_cpu(const struct super_block *sb, fs32 n)
48545 {
48546 if (BEFS_SB(sb)->byte_order == BEFS_BYTESEX_LE)
48547diff --git a/fs/befs/linuxvfs.c b/fs/befs/linuxvfs.c
48548index f95dddc..b1e2c1c 100644
48549--- a/fs/befs/linuxvfs.c
48550+++ b/fs/befs/linuxvfs.c
48551@@ -510,7 +510,7 @@ static void befs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
48552 {
48553 befs_inode_info *befs_ino = BEFS_I(dentry->d_inode);
48554 if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
48555- char *link = nd_get_link(nd);
48556+ const char *link = nd_get_link(nd);
48557 if (!IS_ERR(link))
48558 kfree(link);
48559 }
48560diff --git a/fs/binfmt_aout.c b/fs/binfmt_aout.c
48561index bce8769..7fc7544 100644
48562--- a/fs/binfmt_aout.c
48563+++ b/fs/binfmt_aout.c
48564@@ -16,6 +16,7 @@
48565 #include <linux/string.h>
48566 #include <linux/fs.h>
48567 #include <linux/file.h>
48568+#include <linux/security.h>
48569 #include <linux/stat.h>
48570 #include <linux/fcntl.h>
48571 #include <linux/ptrace.h>
48572@@ -59,6 +60,8 @@ static int aout_core_dump(struct coredump_params *cprm)
48573 #endif
48574 # define START_STACK(u) ((void __user *)u.start_stack)
48575
48576+ memset(&dump, 0, sizeof(dump));
48577+
48578 fs = get_fs();
48579 set_fs(KERNEL_DS);
48580 has_dumped = 1;
48581@@ -69,10 +72,12 @@ static int aout_core_dump(struct coredump_params *cprm)
48582
48583 /* If the size of the dump file exceeds the rlimit, then see what would happen
48584 if we wrote the stack, but not the data area. */
48585+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE, 1);
48586 if ((dump.u_dsize + dump.u_ssize+1) * PAGE_SIZE > cprm->limit)
48587 dump.u_dsize = 0;
48588
48589 /* Make sure we have enough room to write the stack and data areas. */
48590+ gr_learn_resource(current, RLIMIT_CORE, (dump.u_ssize + 1) * PAGE_SIZE, 1);
48591 if ((dump.u_ssize + 1) * PAGE_SIZE > cprm->limit)
48592 dump.u_ssize = 0;
48593
48594@@ -233,6 +238,8 @@ static int load_aout_binary(struct linux_binprm * bprm)
48595 rlim = rlimit(RLIMIT_DATA);
48596 if (rlim >= RLIM_INFINITY)
48597 rlim = ~0;
48598+
48599+ gr_learn_resource(current, RLIMIT_DATA, ex.a_data + ex.a_bss, 1);
48600 if (ex.a_data + ex.a_bss > rlim)
48601 return -ENOMEM;
48602
48603@@ -267,6 +274,27 @@ static int load_aout_binary(struct linux_binprm * bprm)
48604
48605 install_exec_creds(bprm);
48606
48607+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48608+ current->mm->pax_flags = 0UL;
48609+#endif
48610+
48611+#ifdef CONFIG_PAX_PAGEEXEC
48612+ if (!(N_FLAGS(ex) & F_PAX_PAGEEXEC)) {
48613+ current->mm->pax_flags |= MF_PAX_PAGEEXEC;
48614+
48615+#ifdef CONFIG_PAX_EMUTRAMP
48616+ if (N_FLAGS(ex) & F_PAX_EMUTRAMP)
48617+ current->mm->pax_flags |= MF_PAX_EMUTRAMP;
48618+#endif
48619+
48620+#ifdef CONFIG_PAX_MPROTECT
48621+ if (!(N_FLAGS(ex) & F_PAX_MPROTECT))
48622+ current->mm->pax_flags |= MF_PAX_MPROTECT;
48623+#endif
48624+
48625+ }
48626+#endif
48627+
48628 if (N_MAGIC(ex) == OMAGIC) {
48629 unsigned long text_addr, map_size;
48630 loff_t pos;
48631@@ -324,7 +352,7 @@ static int load_aout_binary(struct linux_binprm * bprm)
48632 }
48633
48634 error = vm_mmap(bprm->file, N_DATADDR(ex), ex.a_data,
48635- PROT_READ | PROT_WRITE | PROT_EXEC,
48636+ PROT_READ | PROT_WRITE,
48637 MAP_FIXED | MAP_PRIVATE | MAP_DENYWRITE | MAP_EXECUTABLE,
48638 fd_offset + ex.a_text);
48639 if (error != N_DATADDR(ex)) {
48640diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
48641index f8a0b0e..8186af0 100644
48642--- a/fs/binfmt_elf.c
48643+++ b/fs/binfmt_elf.c
48644@@ -34,6 +34,7 @@
48645 #include <linux/utsname.h>
48646 #include <linux/coredump.h>
48647 #include <linux/sched.h>
48648+#include <linux/xattr.h>
48649 #include <asm/uaccess.h>
48650 #include <asm/param.h>
48651 #include <asm/page.h>
48652@@ -60,6 +61,10 @@ static int elf_core_dump(struct coredump_params *cprm);
48653 #define elf_core_dump NULL
48654 #endif
48655
48656+#ifdef CONFIG_PAX_MPROTECT
48657+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags);
48658+#endif
48659+
48660 #if ELF_EXEC_PAGESIZE > PAGE_SIZE
48661 #define ELF_MIN_ALIGN ELF_EXEC_PAGESIZE
48662 #else
48663@@ -79,6 +84,11 @@ static struct linux_binfmt elf_format = {
48664 .load_binary = load_elf_binary,
48665 .load_shlib = load_elf_library,
48666 .core_dump = elf_core_dump,
48667+
48668+#ifdef CONFIG_PAX_MPROTECT
48669+ .handle_mprotect= elf_handle_mprotect,
48670+#endif
48671+
48672 .min_coredump = ELF_EXEC_PAGESIZE,
48673 };
48674
48675@@ -86,6 +96,8 @@ static struct linux_binfmt elf_format = {
48676
48677 static int set_brk(unsigned long start, unsigned long end)
48678 {
48679+ unsigned long e = end;
48680+
48681 start = ELF_PAGEALIGN(start);
48682 end = ELF_PAGEALIGN(end);
48683 if (end > start) {
48684@@ -94,7 +106,7 @@ static int set_brk(unsigned long start, unsigned long end)
48685 if (BAD_ADDR(addr))
48686 return addr;
48687 }
48688- current->mm->start_brk = current->mm->brk = end;
48689+ current->mm->start_brk = current->mm->brk = e;
48690 return 0;
48691 }
48692
48693@@ -155,12 +167,13 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48694 elf_addr_t __user *u_rand_bytes;
48695 const char *k_platform = ELF_PLATFORM;
48696 const char *k_base_platform = ELF_BASE_PLATFORM;
48697- unsigned char k_rand_bytes[16];
48698+ u32 k_rand_bytes[4];
48699 int items;
48700 elf_addr_t *elf_info;
48701 int ei_index = 0;
48702 const struct cred *cred = current_cred();
48703 struct vm_area_struct *vma;
48704+ unsigned long saved_auxv[AT_VECTOR_SIZE];
48705
48706 /*
48707 * In some cases (e.g. Hyper-Threading), we want to avoid L1
48708@@ -202,8 +215,12 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48709 * Generate 16 random bytes for userspace PRNG seeding.
48710 */
48711 get_random_bytes(k_rand_bytes, sizeof(k_rand_bytes));
48712- u_rand_bytes = (elf_addr_t __user *)
48713- STACK_ALLOC(p, sizeof(k_rand_bytes));
48714+ prandom_seed(k_rand_bytes[0] ^ prandom_u32());
48715+ prandom_seed(k_rand_bytes[1] ^ prandom_u32());
48716+ prandom_seed(k_rand_bytes[2] ^ prandom_u32());
48717+ prandom_seed(k_rand_bytes[3] ^ prandom_u32());
48718+ p = STACK_ROUND(p, sizeof(k_rand_bytes));
48719+ u_rand_bytes = (elf_addr_t __user *) p;
48720 if (__copy_to_user(u_rand_bytes, k_rand_bytes, sizeof(k_rand_bytes)))
48721 return -EFAULT;
48722
48723@@ -318,9 +335,11 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
48724 return -EFAULT;
48725 current->mm->env_end = p;
48726
48727+ memcpy(saved_auxv, elf_info, ei_index * sizeof(elf_addr_t));
48728+
48729 /* Put the elf_info on the stack in the right place. */
48730 sp = (elf_addr_t __user *)envp + 1;
48731- if (copy_to_user(sp, elf_info, ei_index * sizeof(elf_addr_t)))
48732+ if (copy_to_user(sp, saved_auxv, ei_index * sizeof(elf_addr_t)))
48733 return -EFAULT;
48734 return 0;
48735 }
48736@@ -388,15 +407,14 @@ static unsigned long total_mapping_size(struct elf_phdr *cmds, int nr)
48737 an ELF header */
48738
48739 static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48740- struct file *interpreter, unsigned long *interp_map_addr,
48741- unsigned long no_base)
48742+ struct file *interpreter, unsigned long no_base)
48743 {
48744 struct elf_phdr *elf_phdata;
48745 struct elf_phdr *eppnt;
48746- unsigned long load_addr = 0;
48747+ unsigned long load_addr = 0, pax_task_size = TASK_SIZE;
48748 int load_addr_set = 0;
48749 unsigned long last_bss = 0, elf_bss = 0;
48750- unsigned long error = ~0UL;
48751+ unsigned long error = -EINVAL;
48752 unsigned long total_size;
48753 int retval, i, size;
48754
48755@@ -442,6 +460,11 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48756 goto out_close;
48757 }
48758
48759+#ifdef CONFIG_PAX_SEGMEXEC
48760+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC)
48761+ pax_task_size = SEGMEXEC_TASK_SIZE;
48762+#endif
48763+
48764 eppnt = elf_phdata;
48765 for (i = 0; i < interp_elf_ex->e_phnum; i++, eppnt++) {
48766 if (eppnt->p_type == PT_LOAD) {
48767@@ -465,8 +488,6 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48768 map_addr = elf_map(interpreter, load_addr + vaddr,
48769 eppnt, elf_prot, elf_type, total_size);
48770 total_size = 0;
48771- if (!*interp_map_addr)
48772- *interp_map_addr = map_addr;
48773 error = map_addr;
48774 if (BAD_ADDR(map_addr))
48775 goto out_close;
48776@@ -485,8 +506,8 @@ static unsigned long load_elf_interp(struct elfhdr *interp_elf_ex,
48777 k = load_addr + eppnt->p_vaddr;
48778 if (BAD_ADDR(k) ||
48779 eppnt->p_filesz > eppnt->p_memsz ||
48780- eppnt->p_memsz > TASK_SIZE ||
48781- TASK_SIZE - eppnt->p_memsz < k) {
48782+ eppnt->p_memsz > pax_task_size ||
48783+ pax_task_size - eppnt->p_memsz < k) {
48784 error = -ENOMEM;
48785 goto out_close;
48786 }
48787@@ -538,6 +559,315 @@ out:
48788 return error;
48789 }
48790
48791+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48792+#ifdef CONFIG_PAX_SOFTMODE
48793+static unsigned long pax_parse_pt_pax_softmode(const struct elf_phdr * const elf_phdata)
48794+{
48795+ unsigned long pax_flags = 0UL;
48796+
48797+#ifdef CONFIG_PAX_PAGEEXEC
48798+ if (elf_phdata->p_flags & PF_PAGEEXEC)
48799+ pax_flags |= MF_PAX_PAGEEXEC;
48800+#endif
48801+
48802+#ifdef CONFIG_PAX_SEGMEXEC
48803+ if (elf_phdata->p_flags & PF_SEGMEXEC)
48804+ pax_flags |= MF_PAX_SEGMEXEC;
48805+#endif
48806+
48807+#ifdef CONFIG_PAX_EMUTRAMP
48808+ if ((elf_phdata->p_flags & PF_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48809+ pax_flags |= MF_PAX_EMUTRAMP;
48810+#endif
48811+
48812+#ifdef CONFIG_PAX_MPROTECT
48813+ if (elf_phdata->p_flags & PF_MPROTECT)
48814+ pax_flags |= MF_PAX_MPROTECT;
48815+#endif
48816+
48817+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48818+ if (randomize_va_space && (elf_phdata->p_flags & PF_RANDMMAP))
48819+ pax_flags |= MF_PAX_RANDMMAP;
48820+#endif
48821+
48822+ return pax_flags;
48823+}
48824+#endif
48825+
48826+static unsigned long pax_parse_pt_pax_hardmode(const struct elf_phdr * const elf_phdata)
48827+{
48828+ unsigned long pax_flags = 0UL;
48829+
48830+#ifdef CONFIG_PAX_PAGEEXEC
48831+ if (!(elf_phdata->p_flags & PF_NOPAGEEXEC))
48832+ pax_flags |= MF_PAX_PAGEEXEC;
48833+#endif
48834+
48835+#ifdef CONFIG_PAX_SEGMEXEC
48836+ if (!(elf_phdata->p_flags & PF_NOSEGMEXEC))
48837+ pax_flags |= MF_PAX_SEGMEXEC;
48838+#endif
48839+
48840+#ifdef CONFIG_PAX_EMUTRAMP
48841+ if (!(elf_phdata->p_flags & PF_NOEMUTRAMP))
48842+ pax_flags |= MF_PAX_EMUTRAMP;
48843+#endif
48844+
48845+#ifdef CONFIG_PAX_MPROTECT
48846+ if (!(elf_phdata->p_flags & PF_NOMPROTECT))
48847+ pax_flags |= MF_PAX_MPROTECT;
48848+#endif
48849+
48850+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48851+ if (randomize_va_space && !(elf_phdata->p_flags & PF_NORANDMMAP))
48852+ pax_flags |= MF_PAX_RANDMMAP;
48853+#endif
48854+
48855+ return pax_flags;
48856+}
48857+#endif
48858+
48859+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
48860+#ifdef CONFIG_PAX_SOFTMODE
48861+static unsigned long pax_parse_xattr_pax_softmode(unsigned long pax_flags_softmode)
48862+{
48863+ unsigned long pax_flags = 0UL;
48864+
48865+#ifdef CONFIG_PAX_PAGEEXEC
48866+ if (pax_flags_softmode & MF_PAX_PAGEEXEC)
48867+ pax_flags |= MF_PAX_PAGEEXEC;
48868+#endif
48869+
48870+#ifdef CONFIG_PAX_SEGMEXEC
48871+ if (pax_flags_softmode & MF_PAX_SEGMEXEC)
48872+ pax_flags |= MF_PAX_SEGMEXEC;
48873+#endif
48874+
48875+#ifdef CONFIG_PAX_EMUTRAMP
48876+ if ((pax_flags_softmode & MF_PAX_EMUTRAMP) && (pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)))
48877+ pax_flags |= MF_PAX_EMUTRAMP;
48878+#endif
48879+
48880+#ifdef CONFIG_PAX_MPROTECT
48881+ if (pax_flags_softmode & MF_PAX_MPROTECT)
48882+ pax_flags |= MF_PAX_MPROTECT;
48883+#endif
48884+
48885+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48886+ if (randomize_va_space && (pax_flags_softmode & MF_PAX_RANDMMAP))
48887+ pax_flags |= MF_PAX_RANDMMAP;
48888+#endif
48889+
48890+ return pax_flags;
48891+}
48892+#endif
48893+
48894+static unsigned long pax_parse_xattr_pax_hardmode(unsigned long pax_flags_hardmode)
48895+{
48896+ unsigned long pax_flags = 0UL;
48897+
48898+#ifdef CONFIG_PAX_PAGEEXEC
48899+ if (!(pax_flags_hardmode & MF_PAX_PAGEEXEC))
48900+ pax_flags |= MF_PAX_PAGEEXEC;
48901+#endif
48902+
48903+#ifdef CONFIG_PAX_SEGMEXEC
48904+ if (!(pax_flags_hardmode & MF_PAX_SEGMEXEC))
48905+ pax_flags |= MF_PAX_SEGMEXEC;
48906+#endif
48907+
48908+#ifdef CONFIG_PAX_EMUTRAMP
48909+ if (!(pax_flags_hardmode & MF_PAX_EMUTRAMP))
48910+ pax_flags |= MF_PAX_EMUTRAMP;
48911+#endif
48912+
48913+#ifdef CONFIG_PAX_MPROTECT
48914+ if (!(pax_flags_hardmode & MF_PAX_MPROTECT))
48915+ pax_flags |= MF_PAX_MPROTECT;
48916+#endif
48917+
48918+#if defined(CONFIG_PAX_RANDMMAP) || defined(CONFIG_PAX_RANDUSTACK)
48919+ if (randomize_va_space && !(pax_flags_hardmode & MF_PAX_RANDMMAP))
48920+ pax_flags |= MF_PAX_RANDMMAP;
48921+#endif
48922+
48923+ return pax_flags;
48924+}
48925+#endif
48926+
48927+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
48928+static unsigned long pax_parse_ei_pax(const struct elfhdr * const elf_ex)
48929+{
48930+ unsigned long pax_flags = 0UL;
48931+
48932+#ifdef CONFIG_PAX_EI_PAX
48933+
48934+#ifdef CONFIG_PAX_PAGEEXEC
48935+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_PAGEEXEC))
48936+ pax_flags |= MF_PAX_PAGEEXEC;
48937+#endif
48938+
48939+#ifdef CONFIG_PAX_SEGMEXEC
48940+ if (!(elf_ex->e_ident[EI_PAX] & EF_PAX_SEGMEXEC))
48941+ pax_flags |= MF_PAX_SEGMEXEC;
48942+#endif
48943+
48944+#ifdef CONFIG_PAX_EMUTRAMP
48945+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && (elf_ex->e_ident[EI_PAX] & EF_PAX_EMUTRAMP))
48946+ pax_flags |= MF_PAX_EMUTRAMP;
48947+#endif
48948+
48949+#ifdef CONFIG_PAX_MPROTECT
48950+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) && !(elf_ex->e_ident[EI_PAX] & EF_PAX_MPROTECT))
48951+ pax_flags |= MF_PAX_MPROTECT;
48952+#endif
48953+
48954+#ifdef CONFIG_PAX_ASLR
48955+ if (randomize_va_space && !(elf_ex->e_ident[EI_PAX] & EF_PAX_RANDMMAP))
48956+ pax_flags |= MF_PAX_RANDMMAP;
48957+#endif
48958+
48959+#else
48960+
48961+#ifdef CONFIG_PAX_PAGEEXEC
48962+ pax_flags |= MF_PAX_PAGEEXEC;
48963+#endif
48964+
48965+#ifdef CONFIG_PAX_SEGMEXEC
48966+ pax_flags |= MF_PAX_SEGMEXEC;
48967+#endif
48968+
48969+#ifdef CONFIG_PAX_MPROTECT
48970+ pax_flags |= MF_PAX_MPROTECT;
48971+#endif
48972+
48973+#ifdef CONFIG_PAX_RANDMMAP
48974+ if (randomize_va_space)
48975+ pax_flags |= MF_PAX_RANDMMAP;
48976+#endif
48977+
48978+#endif
48979+
48980+ return pax_flags;
48981+}
48982+
48983+static unsigned long pax_parse_pt_pax(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata)
48984+{
48985+
48986+#ifdef CONFIG_PAX_PT_PAX_FLAGS
48987+ unsigned long i;
48988+
48989+ for (i = 0UL; i < elf_ex->e_phnum; i++)
48990+ if (elf_phdata[i].p_type == PT_PAX_FLAGS) {
48991+ if (((elf_phdata[i].p_flags & PF_PAGEEXEC) && (elf_phdata[i].p_flags & PF_NOPAGEEXEC)) ||
48992+ ((elf_phdata[i].p_flags & PF_SEGMEXEC) && (elf_phdata[i].p_flags & PF_NOSEGMEXEC)) ||
48993+ ((elf_phdata[i].p_flags & PF_EMUTRAMP) && (elf_phdata[i].p_flags & PF_NOEMUTRAMP)) ||
48994+ ((elf_phdata[i].p_flags & PF_MPROTECT) && (elf_phdata[i].p_flags & PF_NOMPROTECT)) ||
48995+ ((elf_phdata[i].p_flags & PF_RANDMMAP) && (elf_phdata[i].p_flags & PF_NORANDMMAP)))
48996+ return ~0UL;
48997+
48998+#ifdef CONFIG_PAX_SOFTMODE
48999+ if (pax_softmode)
49000+ return pax_parse_pt_pax_softmode(&elf_phdata[i]);
49001+ else
49002+#endif
49003+
49004+ return pax_parse_pt_pax_hardmode(&elf_phdata[i]);
49005+ break;
49006+ }
49007+#endif
49008+
49009+ return ~0UL;
49010+}
49011+
49012+static unsigned long pax_parse_xattr_pax(struct file * const file)
49013+{
49014+
49015+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
49016+ ssize_t xattr_size, i;
49017+ unsigned char xattr_value[sizeof("pemrs") - 1];
49018+ unsigned long pax_flags_hardmode = 0UL, pax_flags_softmode = 0UL;
49019+
49020+ xattr_size = pax_getxattr(file->f_path.dentry, xattr_value, sizeof xattr_value);
49021+ if (xattr_size <= 0 || xattr_size > sizeof xattr_value)
49022+ return ~0UL;
49023+
49024+ for (i = 0; i < xattr_size; i++)
49025+ switch (xattr_value[i]) {
49026+ default:
49027+ return ~0UL;
49028+
49029+#define parse_flag(option1, option2, flag) \
49030+ case option1: \
49031+ if (pax_flags_hardmode & MF_PAX_##flag) \
49032+ return ~0UL; \
49033+ pax_flags_hardmode |= MF_PAX_##flag; \
49034+ break; \
49035+ case option2: \
49036+ if (pax_flags_softmode & MF_PAX_##flag) \
49037+ return ~0UL; \
49038+ pax_flags_softmode |= MF_PAX_##flag; \
49039+ break;
49040+
49041+ parse_flag('p', 'P', PAGEEXEC);
49042+ parse_flag('e', 'E', EMUTRAMP);
49043+ parse_flag('m', 'M', MPROTECT);
49044+ parse_flag('r', 'R', RANDMMAP);
49045+ parse_flag('s', 'S', SEGMEXEC);
49046+
49047+#undef parse_flag
49048+ }
49049+
49050+ if (pax_flags_hardmode & pax_flags_softmode)
49051+ return ~0UL;
49052+
49053+#ifdef CONFIG_PAX_SOFTMODE
49054+ if (pax_softmode)
49055+ return pax_parse_xattr_pax_softmode(pax_flags_softmode);
49056+ else
49057+#endif
49058+
49059+ return pax_parse_xattr_pax_hardmode(pax_flags_hardmode);
49060+#else
49061+ return ~0UL;
49062+#endif
49063+
49064+}
49065+
49066+static long pax_parse_pax_flags(const struct elfhdr * const elf_ex, const struct elf_phdr * const elf_phdata, struct file * const file)
49067+{
49068+ unsigned long pax_flags, pt_pax_flags, xattr_pax_flags;
49069+
49070+ pax_flags = pax_parse_ei_pax(elf_ex);
49071+ pt_pax_flags = pax_parse_pt_pax(elf_ex, elf_phdata);
49072+ xattr_pax_flags = pax_parse_xattr_pax(file);
49073+
49074+ if (pt_pax_flags == ~0UL)
49075+ pt_pax_flags = xattr_pax_flags;
49076+ else if (xattr_pax_flags == ~0UL)
49077+ xattr_pax_flags = pt_pax_flags;
49078+ if (pt_pax_flags != xattr_pax_flags)
49079+ return -EINVAL;
49080+ if (pt_pax_flags != ~0UL)
49081+ pax_flags = pt_pax_flags;
49082+
49083+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_PAX_SEGMEXEC)
49084+ if ((pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) == (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49085+ if ((__supported_pte_mask & _PAGE_NX))
49086+ pax_flags &= ~MF_PAX_SEGMEXEC;
49087+ else
49088+ pax_flags &= ~MF_PAX_PAGEEXEC;
49089+ }
49090+#endif
49091+
49092+ if (0 > pax_check_flags(&pax_flags))
49093+ return -EINVAL;
49094+
49095+ current->mm->pax_flags = pax_flags;
49096+ return 0;
49097+}
49098+#endif
49099+
49100 /*
49101 * These are the functions used to load ELF style executables and shared
49102 * libraries. There is no binary dependent code anywhere else.
49103@@ -554,6 +884,11 @@ static unsigned long randomize_stack_top(unsigned long stack_top)
49104 {
49105 unsigned int random_variable = 0;
49106
49107+#ifdef CONFIG_PAX_RANDUSTACK
49108+ if (current->mm->pax_flags & MF_PAX_RANDMMAP)
49109+ return stack_top - current->mm->delta_stack;
49110+#endif
49111+
49112 if ((current->flags & PF_RANDOMIZE) &&
49113 !(current->personality & ADDR_NO_RANDOMIZE)) {
49114 random_variable = get_random_int() & STACK_RND_MASK;
49115@@ -572,7 +907,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
49116 unsigned long load_addr = 0, load_bias = 0;
49117 int load_addr_set = 0;
49118 char * elf_interpreter = NULL;
49119- unsigned long error;
49120+ unsigned long error = 0;
49121 struct elf_phdr *elf_ppnt, *elf_phdata;
49122 unsigned long elf_bss, elf_brk;
49123 int retval, i;
49124@@ -582,12 +917,12 @@ static int load_elf_binary(struct linux_binprm *bprm)
49125 unsigned long start_code, end_code, start_data, end_data;
49126 unsigned long reloc_func_desc __maybe_unused = 0;
49127 int executable_stack = EXSTACK_DEFAULT;
49128- unsigned long def_flags = 0;
49129 struct pt_regs *regs = current_pt_regs();
49130 struct {
49131 struct elfhdr elf_ex;
49132 struct elfhdr interp_elf_ex;
49133 } *loc;
49134+ unsigned long pax_task_size = TASK_SIZE;
49135
49136 loc = kmalloc(sizeof(*loc), GFP_KERNEL);
49137 if (!loc) {
49138@@ -723,11 +1058,81 @@ static int load_elf_binary(struct linux_binprm *bprm)
49139 goto out_free_dentry;
49140
49141 /* OK, This is the point of no return */
49142- current->mm->def_flags = def_flags;
49143+
49144+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49145+ current->mm->pax_flags = 0UL;
49146+#endif
49147+
49148+#ifdef CONFIG_PAX_DLRESOLVE
49149+ current->mm->call_dl_resolve = 0UL;
49150+#endif
49151+
49152+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
49153+ current->mm->call_syscall = 0UL;
49154+#endif
49155+
49156+#ifdef CONFIG_PAX_ASLR
49157+ current->mm->delta_mmap = 0UL;
49158+ current->mm->delta_stack = 0UL;
49159+#endif
49160+
49161+ current->mm->def_flags = 0;
49162+
49163+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
49164+ if (0 > pax_parse_pax_flags(&loc->elf_ex, elf_phdata, bprm->file)) {
49165+ send_sig(SIGKILL, current, 0);
49166+ goto out_free_dentry;
49167+ }
49168+#endif
49169+
49170+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
49171+ pax_set_initial_flags(bprm);
49172+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
49173+ if (pax_set_initial_flags_func)
49174+ (pax_set_initial_flags_func)(bprm);
49175+#endif
49176+
49177+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
49178+ if ((current->mm->pax_flags & MF_PAX_PAGEEXEC) && !(__supported_pte_mask & _PAGE_NX)) {
49179+ current->mm->context.user_cs_limit = PAGE_SIZE;
49180+ current->mm->def_flags |= VM_PAGEEXEC | VM_NOHUGEPAGE;
49181+ }
49182+#endif
49183+
49184+#ifdef CONFIG_PAX_SEGMEXEC
49185+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
49186+ current->mm->context.user_cs_base = SEGMEXEC_TASK_SIZE;
49187+ current->mm->context.user_cs_limit = TASK_SIZE-SEGMEXEC_TASK_SIZE;
49188+ pax_task_size = SEGMEXEC_TASK_SIZE;
49189+ current->mm->def_flags |= VM_NOHUGEPAGE;
49190+ }
49191+#endif
49192+
49193+#if defined(CONFIG_ARCH_TRACK_EXEC_LIMIT) || defined(CONFIG_PAX_SEGMEXEC)
49194+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49195+ set_user_cs(current->mm->context.user_cs_base, current->mm->context.user_cs_limit, get_cpu());
49196+ put_cpu();
49197+ }
49198+#endif
49199
49200 /* Do this immediately, since STACK_TOP as used in setup_arg_pages
49201 may depend on the personality. */
49202 SET_PERSONALITY(loc->elf_ex);
49203+
49204+#ifdef CONFIG_PAX_ASLR
49205+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49206+ current->mm->delta_mmap = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN)-1)) << PAGE_SHIFT;
49207+ current->mm->delta_stack = (pax_get_random_long() & ((1UL << PAX_DELTA_STACK_LEN)-1)) << PAGE_SHIFT;
49208+ }
49209+#endif
49210+
49211+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
49212+ if (current->mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
49213+ executable_stack = EXSTACK_DISABLE_X;
49214+ current->personality &= ~READ_IMPLIES_EXEC;
49215+ } else
49216+#endif
49217+
49218 if (elf_read_implies_exec(loc->elf_ex, executable_stack))
49219 current->personality |= READ_IMPLIES_EXEC;
49220
49221@@ -819,6 +1224,20 @@ static int load_elf_binary(struct linux_binprm *bprm)
49222 #else
49223 load_bias = ELF_PAGESTART(ELF_ET_DYN_BASE - vaddr);
49224 #endif
49225+
49226+#ifdef CONFIG_PAX_RANDMMAP
49227+ /* PaX: randomize base address at the default exe base if requested */
49228+ if ((current->mm->pax_flags & MF_PAX_RANDMMAP) && elf_interpreter) {
49229+#ifdef CONFIG_SPARC64
49230+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << (PAGE_SHIFT+1);
49231+#else
49232+ load_bias = (pax_get_random_long() & ((1UL << PAX_DELTA_MMAP_LEN) - 1)) << PAGE_SHIFT;
49233+#endif
49234+ load_bias = ELF_PAGESTART(PAX_ELF_ET_DYN_BASE - vaddr + load_bias);
49235+ elf_flags |= MAP_FIXED;
49236+ }
49237+#endif
49238+
49239 }
49240
49241 error = elf_map(bprm->file, load_bias + vaddr, elf_ppnt,
49242@@ -851,9 +1270,9 @@ static int load_elf_binary(struct linux_binprm *bprm)
49243 * allowed task size. Note that p_filesz must always be
49244 * <= p_memsz so it is only necessary to check p_memsz.
49245 */
49246- if (BAD_ADDR(k) || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49247- elf_ppnt->p_memsz > TASK_SIZE ||
49248- TASK_SIZE - elf_ppnt->p_memsz < k) {
49249+ if (k >= pax_task_size || elf_ppnt->p_filesz > elf_ppnt->p_memsz ||
49250+ elf_ppnt->p_memsz > pax_task_size ||
49251+ pax_task_size - elf_ppnt->p_memsz < k) {
49252 /* set_brk can never work. Avoid overflows. */
49253 send_sig(SIGKILL, current, 0);
49254 retval = -EINVAL;
49255@@ -892,17 +1311,45 @@ static int load_elf_binary(struct linux_binprm *bprm)
49256 goto out_free_dentry;
49257 }
49258 if (likely(elf_bss != elf_brk) && unlikely(padzero(elf_bss))) {
49259- send_sig(SIGSEGV, current, 0);
49260- retval = -EFAULT; /* Nobody gets to see this, but.. */
49261- goto out_free_dentry;
49262+ /*
49263+ * This bss-zeroing can fail if the ELF
49264+ * file specifies odd protections. So
49265+ * we don't check the return value
49266+ */
49267 }
49268
49269+#ifdef CONFIG_PAX_RANDMMAP
49270+ if (current->mm->pax_flags & MF_PAX_RANDMMAP) {
49271+ unsigned long start, size, flags;
49272+ vm_flags_t vm_flags;
49273+
49274+ start = ELF_PAGEALIGN(elf_brk);
49275+ size = PAGE_SIZE + ((pax_get_random_long() & ((1UL << 22) - 1UL)) << 4);
49276+ flags = MAP_FIXED | MAP_PRIVATE;
49277+ vm_flags = VM_DONTEXPAND | VM_DONTDUMP;
49278+
49279+ down_write(&current->mm->mmap_sem);
49280+ start = get_unmapped_area(NULL, start, PAGE_ALIGN(size), 0, flags);
49281+ retval = -ENOMEM;
49282+ if (!IS_ERR_VALUE(start) && !find_vma_intersection(current->mm, start, start + size + PAGE_SIZE)) {
49283+// if (current->personality & ADDR_NO_RANDOMIZE)
49284+// vm_flags |= VM_READ | VM_MAYREAD;
49285+ start = mmap_region(NULL, start, PAGE_ALIGN(size), vm_flags, 0);
49286+ retval = IS_ERR_VALUE(start) ? start : 0;
49287+ }
49288+ up_write(&current->mm->mmap_sem);
49289+ if (retval == 0)
49290+ retval = set_brk(start + size, start + size + PAGE_SIZE);
49291+ if (retval < 0) {
49292+ send_sig(SIGKILL, current, 0);
49293+ goto out_free_dentry;
49294+ }
49295+ }
49296+#endif
49297+
49298 if (elf_interpreter) {
49299- unsigned long interp_map_addr = 0;
49300-
49301 elf_entry = load_elf_interp(&loc->interp_elf_ex,
49302 interpreter,
49303- &interp_map_addr,
49304 load_bias);
49305 if (!IS_ERR((void *)elf_entry)) {
49306 /*
49307@@ -1124,7 +1571,7 @@ static bool always_dump_vma(struct vm_area_struct *vma)
49308 * Decide what to dump of a segment, part, all or none.
49309 */
49310 static unsigned long vma_dump_size(struct vm_area_struct *vma,
49311- unsigned long mm_flags)
49312+ unsigned long mm_flags, long signr)
49313 {
49314 #define FILTER(type) (mm_flags & (1UL << MMF_DUMP_##type))
49315
49316@@ -1162,7 +1609,7 @@ static unsigned long vma_dump_size(struct vm_area_struct *vma,
49317 if (vma->vm_file == NULL)
49318 return 0;
49319
49320- if (FILTER(MAPPED_PRIVATE))
49321+ if (signr == SIGKILL || FILTER(MAPPED_PRIVATE))
49322 goto whole;
49323
49324 /*
49325@@ -1387,9 +1834,9 @@ static void fill_auxv_note(struct memelfnote *note, struct mm_struct *mm)
49326 {
49327 elf_addr_t *auxv = (elf_addr_t *) mm->saved_auxv;
49328 int i = 0;
49329- do
49330+ do {
49331 i += 2;
49332- while (auxv[i - 2] != AT_NULL);
49333+ } while (auxv[i - 2] != AT_NULL);
49334 fill_note(note, "CORE", NT_AUXV, i * sizeof(elf_addr_t), auxv);
49335 }
49336
49337@@ -1398,7 +1845,7 @@ static void fill_siginfo_note(struct memelfnote *note, user_siginfo_t *csigdata,
49338 {
49339 mm_segment_t old_fs = get_fs();
49340 set_fs(KERNEL_DS);
49341- copy_siginfo_to_user((user_siginfo_t __user *) csigdata, siginfo);
49342+ copy_siginfo_to_user((user_siginfo_t __force_user *) csigdata, siginfo);
49343 set_fs(old_fs);
49344 fill_note(note, "CORE", NT_SIGINFO, sizeof(*csigdata), csigdata);
49345 }
49346@@ -2019,14 +2466,14 @@ static void fill_extnum_info(struct elfhdr *elf, struct elf_shdr *shdr4extnum,
49347 }
49348
49349 static size_t elf_core_vma_data_size(struct vm_area_struct *gate_vma,
49350- unsigned long mm_flags)
49351+ struct coredump_params *cprm)
49352 {
49353 struct vm_area_struct *vma;
49354 size_t size = 0;
49355
49356 for (vma = first_vma(current, gate_vma); vma != NULL;
49357 vma = next_vma(vma, gate_vma))
49358- size += vma_dump_size(vma, mm_flags);
49359+ size += vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49360 return size;
49361 }
49362
49363@@ -2119,7 +2566,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49364
49365 dataoff = offset = roundup(offset, ELF_EXEC_PAGESIZE);
49366
49367- offset += elf_core_vma_data_size(gate_vma, cprm->mm_flags);
49368+ offset += elf_core_vma_data_size(gate_vma, cprm);
49369 offset += elf_core_extra_data_size();
49370 e_shoff = offset;
49371
49372@@ -2133,10 +2580,12 @@ static int elf_core_dump(struct coredump_params *cprm)
49373 offset = dataoff;
49374
49375 size += sizeof(*elf);
49376+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49377 if (size > cprm->limit || !dump_write(cprm->file, elf, sizeof(*elf)))
49378 goto end_coredump;
49379
49380 size += sizeof(*phdr4note);
49381+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49382 if (size > cprm->limit
49383 || !dump_write(cprm->file, phdr4note, sizeof(*phdr4note)))
49384 goto end_coredump;
49385@@ -2150,7 +2599,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49386 phdr.p_offset = offset;
49387 phdr.p_vaddr = vma->vm_start;
49388 phdr.p_paddr = 0;
49389- phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags);
49390+ phdr.p_filesz = vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49391 phdr.p_memsz = vma->vm_end - vma->vm_start;
49392 offset += phdr.p_filesz;
49393 phdr.p_flags = vma->vm_flags & VM_READ ? PF_R : 0;
49394@@ -2161,6 +2610,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49395 phdr.p_align = ELF_EXEC_PAGESIZE;
49396
49397 size += sizeof(phdr);
49398+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49399 if (size > cprm->limit
49400 || !dump_write(cprm->file, &phdr, sizeof(phdr)))
49401 goto end_coredump;
49402@@ -2185,7 +2635,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49403 unsigned long addr;
49404 unsigned long end;
49405
49406- end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags);
49407+ end = vma->vm_start + vma_dump_size(vma, cprm->mm_flags, cprm->siginfo->si_signo);
49408
49409 for (addr = vma->vm_start; addr < end; addr += PAGE_SIZE) {
49410 struct page *page;
49411@@ -2194,6 +2644,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49412 page = get_dump_page(addr);
49413 if (page) {
49414 void *kaddr = kmap(page);
49415+ gr_learn_resource(current, RLIMIT_CORE, size + PAGE_SIZE, 1);
49416 stop = ((size += PAGE_SIZE) > cprm->limit) ||
49417 !dump_write(cprm->file, kaddr,
49418 PAGE_SIZE);
49419@@ -2211,6 +2662,7 @@ static int elf_core_dump(struct coredump_params *cprm)
49420
49421 if (e_phnum == PN_XNUM) {
49422 size += sizeof(*shdr4extnum);
49423+ gr_learn_resource(current, RLIMIT_CORE, size, 1);
49424 if (size > cprm->limit
49425 || !dump_write(cprm->file, shdr4extnum,
49426 sizeof(*shdr4extnum)))
49427@@ -2231,6 +2683,97 @@ out:
49428
49429 #endif /* CONFIG_ELF_CORE */
49430
49431+#ifdef CONFIG_PAX_MPROTECT
49432+/* PaX: non-PIC ELF libraries need relocations on their executable segments
49433+ * therefore we'll grant them VM_MAYWRITE once during their life. Similarly
49434+ * we'll remove VM_MAYWRITE for good on RELRO segments.
49435+ *
49436+ * The checks favour ld-linux.so behaviour which operates on a per ELF segment
49437+ * basis because we want to allow the common case and not the special ones.
49438+ */
49439+static void elf_handle_mprotect(struct vm_area_struct *vma, unsigned long newflags)
49440+{
49441+ struct elfhdr elf_h;
49442+ struct elf_phdr elf_p;
49443+ unsigned long i;
49444+ unsigned long oldflags;
49445+ bool is_textrel_rw, is_textrel_rx, is_relro;
49446+
49447+ if (!(vma->vm_mm->pax_flags & MF_PAX_MPROTECT))
49448+ return;
49449+
49450+ oldflags = vma->vm_flags & (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ);
49451+ newflags &= VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_EXEC | VM_WRITE | VM_READ;
49452+
49453+#ifdef CONFIG_PAX_ELFRELOCS
49454+ /* possible TEXTREL */
49455+ is_textrel_rw = vma->vm_file && !vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYREAD | VM_EXEC | VM_READ) && newflags == (VM_WRITE | VM_READ);
49456+ is_textrel_rx = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYEXEC | VM_MAYWRITE | VM_MAYREAD | VM_WRITE | VM_READ) && newflags == (VM_EXEC | VM_READ);
49457+#else
49458+ is_textrel_rw = false;
49459+ is_textrel_rx = false;
49460+#endif
49461+
49462+ /* possible RELRO */
49463+ is_relro = vma->vm_file && vma->anon_vma && oldflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ) && newflags == (VM_MAYWRITE | VM_MAYREAD | VM_READ);
49464+
49465+ if (!is_textrel_rw && !is_textrel_rx && !is_relro)
49466+ return;
49467+
49468+ if (sizeof(elf_h) != kernel_read(vma->vm_file, 0UL, (char *)&elf_h, sizeof(elf_h)) ||
49469+ memcmp(elf_h.e_ident, ELFMAG, SELFMAG) ||
49470+
49471+#ifdef CONFIG_PAX_ETEXECRELOCS
49472+ ((is_textrel_rw || is_textrel_rx) && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49473+#else
49474+ ((is_textrel_rw || is_textrel_rx) && elf_h.e_type != ET_DYN) ||
49475+#endif
49476+
49477+ (is_relro && (elf_h.e_type != ET_DYN && elf_h.e_type != ET_EXEC)) ||
49478+ !elf_check_arch(&elf_h) ||
49479+ elf_h.e_phentsize != sizeof(struct elf_phdr) ||
49480+ elf_h.e_phnum > 65536UL / sizeof(struct elf_phdr))
49481+ return;
49482+
49483+ for (i = 0UL; i < elf_h.e_phnum; i++) {
49484+ if (sizeof(elf_p) != kernel_read(vma->vm_file, elf_h.e_phoff + i*sizeof(elf_p), (char *)&elf_p, sizeof(elf_p)))
49485+ return;
49486+ switch (elf_p.p_type) {
49487+ case PT_DYNAMIC:
49488+ if (!is_textrel_rw && !is_textrel_rx)
49489+ continue;
49490+ i = 0UL;
49491+ while ((i+1) * sizeof(elf_dyn) <= elf_p.p_filesz) {
49492+ elf_dyn dyn;
49493+
49494+ if (sizeof(dyn) != kernel_read(vma->vm_file, elf_p.p_offset + i*sizeof(dyn), (char *)&dyn, sizeof(dyn)))
49495+ return;
49496+ if (dyn.d_tag == DT_NULL)
49497+ return;
49498+ if (dyn.d_tag == DT_TEXTREL || (dyn.d_tag == DT_FLAGS && (dyn.d_un.d_val & DF_TEXTREL))) {
49499+ gr_log_textrel(vma);
49500+ if (is_textrel_rw)
49501+ vma->vm_flags |= VM_MAYWRITE;
49502+ else
49503+ /* PaX: disallow write access after relocs are done, hopefully noone else needs it... */
49504+ vma->vm_flags &= ~VM_MAYWRITE;
49505+ return;
49506+ }
49507+ i++;
49508+ }
49509+ return;
49510+
49511+ case PT_GNU_RELRO:
49512+ if (!is_relro)
49513+ continue;
49514+ if ((elf_p.p_offset >> PAGE_SHIFT) == vma->vm_pgoff && ELF_PAGEALIGN(elf_p.p_memsz) == vma->vm_end - vma->vm_start)
49515+ vma->vm_flags &= ~VM_MAYWRITE;
49516+ return;
49517+ }
49518+ }
49519+}
49520+#endif
49521+
49522 static int __init init_elf_binfmt(void)
49523 {
49524 register_binfmt(&elf_format);
49525diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
49526index d50bbe5..af3b649 100644
49527--- a/fs/binfmt_flat.c
49528+++ b/fs/binfmt_flat.c
49529@@ -566,7 +566,9 @@ static int load_flat_file(struct linux_binprm * bprm,
49530 realdatastart = (unsigned long) -ENOMEM;
49531 printk("Unable to allocate RAM for process data, errno %d\n",
49532 (int)-realdatastart);
49533+ down_write(&current->mm->mmap_sem);
49534 vm_munmap(textpos, text_len);
49535+ up_write(&current->mm->mmap_sem);
49536 ret = realdatastart;
49537 goto err;
49538 }
49539@@ -590,8 +592,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49540 }
49541 if (IS_ERR_VALUE(result)) {
49542 printk("Unable to read data+bss, errno %d\n", (int)-result);
49543+ down_write(&current->mm->mmap_sem);
49544 vm_munmap(textpos, text_len);
49545 vm_munmap(realdatastart, len);
49546+ up_write(&current->mm->mmap_sem);
49547 ret = result;
49548 goto err;
49549 }
49550@@ -653,8 +657,10 @@ static int load_flat_file(struct linux_binprm * bprm,
49551 }
49552 if (IS_ERR_VALUE(result)) {
49553 printk("Unable to read code+data+bss, errno %d\n",(int)-result);
49554+ down_write(&current->mm->mmap_sem);
49555 vm_munmap(textpos, text_len + data_len + extra +
49556 MAX_SHARED_LIBS * sizeof(unsigned long));
49557+ up_write(&current->mm->mmap_sem);
49558 ret = result;
49559 goto err;
49560 }
49561diff --git a/fs/bio.c b/fs/bio.c
49562index 94bbc04..6fe78a4 100644
49563--- a/fs/bio.c
49564+++ b/fs/bio.c
49565@@ -1096,7 +1096,7 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
49566 /*
49567 * Overflow, abort
49568 */
49569- if (end < start)
49570+ if (end < start || end - start > INT_MAX - nr_pages)
49571 return ERR_PTR(-EINVAL);
49572
49573 nr_pages += end - start;
49574@@ -1230,7 +1230,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
49575 /*
49576 * Overflow, abort
49577 */
49578- if (end < start)
49579+ if (end < start || end - start > INT_MAX - nr_pages)
49580 return ERR_PTR(-EINVAL);
49581
49582 nr_pages += end - start;
49583@@ -1492,7 +1492,7 @@ static void bio_copy_kern_endio(struct bio *bio, int err)
49584 const int read = bio_data_dir(bio) == READ;
49585 struct bio_map_data *bmd = bio->bi_private;
49586 int i;
49587- char *p = bmd->sgvecs[0].iov_base;
49588+ char *p = (char __force_kernel *)bmd->sgvecs[0].iov_base;
49589
49590 bio_for_each_segment_all(bvec, bio, i) {
49591 char *addr = page_address(bvec->bv_page);
49592diff --git a/fs/block_dev.c b/fs/block_dev.c
49593index 2091db8..81dafe9 100644
49594--- a/fs/block_dev.c
49595+++ b/fs/block_dev.c
49596@@ -651,7 +651,7 @@ static bool bd_may_claim(struct block_device *bdev, struct block_device *whole,
49597 else if (bdev->bd_contains == bdev)
49598 return true; /* is a whole device which isn't held */
49599
49600- else if (whole->bd_holder == bd_may_claim)
49601+ else if (whole->bd_holder == (void *)bd_may_claim)
49602 return true; /* is a partition of a device that is being partitioned */
49603 else if (whole->bd_holder != NULL)
49604 return false; /* is a partition of a held device */
49605diff --git a/fs/btrfs/ctree.c b/fs/btrfs/ctree.c
49606index 7fb054b..ad36c67 100644
49607--- a/fs/btrfs/ctree.c
49608+++ b/fs/btrfs/ctree.c
49609@@ -1076,9 +1076,12 @@ static noinline int __btrfs_cow_block(struct btrfs_trans_handle *trans,
49610 free_extent_buffer(buf);
49611 add_root_to_dirty_list(root);
49612 } else {
49613- if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID)
49614- parent_start = parent->start;
49615- else
49616+ if (root->root_key.objectid == BTRFS_TREE_RELOC_OBJECTID) {
49617+ if (parent)
49618+ parent_start = parent->start;
49619+ else
49620+ parent_start = 0;
49621+ } else
49622 parent_start = 0;
49623
49624 WARN_ON(trans->transid != btrfs_header_generation(parent));
49625diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
49626index 0f81d67..0ad55fe 100644
49627--- a/fs/btrfs/ioctl.c
49628+++ b/fs/btrfs/ioctl.c
49629@@ -3084,9 +3084,12 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49630 for (i = 0; i < num_types; i++) {
49631 struct btrfs_space_info *tmp;
49632
49633+ /* Don't copy in more than we allocated */
49634 if (!slot_count)
49635 break;
49636
49637+ slot_count--;
49638+
49639 info = NULL;
49640 rcu_read_lock();
49641 list_for_each_entry_rcu(tmp, &root->fs_info->space_info,
49642@@ -3108,10 +3111,7 @@ static long btrfs_ioctl_space_info(struct btrfs_root *root, void __user *arg)
49643 memcpy(dest, &space, sizeof(space));
49644 dest++;
49645 space_args.total_spaces++;
49646- slot_count--;
49647 }
49648- if (!slot_count)
49649- break;
49650 }
49651 up_read(&info->groups_sem);
49652 }
49653diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
49654index f0857e0..e7023c5 100644
49655--- a/fs/btrfs/super.c
49656+++ b/fs/btrfs/super.c
49657@@ -265,7 +265,7 @@ void __btrfs_abort_transaction(struct btrfs_trans_handle *trans,
49658 function, line, errstr);
49659 return;
49660 }
49661- ACCESS_ONCE(trans->transaction->aborted) = errno;
49662+ ACCESS_ONCE_RW(trans->transaction->aborted) = errno;
49663 __btrfs_std_error(root->fs_info, function, line, errno, NULL);
49664 }
49665 /*
49666diff --git a/fs/buffer.c b/fs/buffer.c
49667index d2a4d1b..df798ca 100644
49668--- a/fs/buffer.c
49669+++ b/fs/buffer.c
49670@@ -3367,7 +3367,7 @@ void __init buffer_init(void)
49671 bh_cachep = kmem_cache_create("buffer_head",
49672 sizeof(struct buffer_head), 0,
49673 (SLAB_RECLAIM_ACCOUNT|SLAB_PANIC|
49674- SLAB_MEM_SPREAD),
49675+ SLAB_MEM_SPREAD|SLAB_NO_SANITIZE),
49676 NULL);
49677
49678 /*
49679diff --git a/fs/cachefiles/bind.c b/fs/cachefiles/bind.c
49680index 622f469..e8d2d55 100644
49681--- a/fs/cachefiles/bind.c
49682+++ b/fs/cachefiles/bind.c
49683@@ -39,13 +39,11 @@ int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
49684 args);
49685
49686 /* start by checking things over */
49687- ASSERT(cache->fstop_percent >= 0 &&
49688- cache->fstop_percent < cache->fcull_percent &&
49689+ ASSERT(cache->fstop_percent < cache->fcull_percent &&
49690 cache->fcull_percent < cache->frun_percent &&
49691 cache->frun_percent < 100);
49692
49693- ASSERT(cache->bstop_percent >= 0 &&
49694- cache->bstop_percent < cache->bcull_percent &&
49695+ ASSERT(cache->bstop_percent < cache->bcull_percent &&
49696 cache->bcull_percent < cache->brun_percent &&
49697 cache->brun_percent < 100);
49698
49699diff --git a/fs/cachefiles/daemon.c b/fs/cachefiles/daemon.c
49700index 0a1467b..6a53245 100644
49701--- a/fs/cachefiles/daemon.c
49702+++ b/fs/cachefiles/daemon.c
49703@@ -196,7 +196,7 @@ static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
49704 if (n > buflen)
49705 return -EMSGSIZE;
49706
49707- if (copy_to_user(_buffer, buffer, n) != 0)
49708+ if (n > sizeof(buffer) || copy_to_user(_buffer, buffer, n) != 0)
49709 return -EFAULT;
49710
49711 return n;
49712@@ -222,7 +222,7 @@ static ssize_t cachefiles_daemon_write(struct file *file,
49713 if (test_bit(CACHEFILES_DEAD, &cache->flags))
49714 return -EIO;
49715
49716- if (datalen < 0 || datalen > PAGE_SIZE - 1)
49717+ if (datalen > PAGE_SIZE - 1)
49718 return -EOPNOTSUPP;
49719
49720 /* drag the command string into the kernel so we can parse it */
49721@@ -386,7 +386,7 @@ static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
49722 if (args[0] != '%' || args[1] != '\0')
49723 return -EINVAL;
49724
49725- if (fstop < 0 || fstop >= cache->fcull_percent)
49726+ if (fstop >= cache->fcull_percent)
49727 return cachefiles_daemon_range_error(cache, args);
49728
49729 cache->fstop_percent = fstop;
49730@@ -458,7 +458,7 @@ static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
49731 if (args[0] != '%' || args[1] != '\0')
49732 return -EINVAL;
49733
49734- if (bstop < 0 || bstop >= cache->bcull_percent)
49735+ if (bstop >= cache->bcull_percent)
49736 return cachefiles_daemon_range_error(cache, args);
49737
49738 cache->bstop_percent = bstop;
49739diff --git a/fs/cachefiles/internal.h b/fs/cachefiles/internal.h
49740index 4938251..7e01445 100644
49741--- a/fs/cachefiles/internal.h
49742+++ b/fs/cachefiles/internal.h
49743@@ -59,7 +59,7 @@ struct cachefiles_cache {
49744 wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
49745 struct rb_root active_nodes; /* active nodes (can't be culled) */
49746 rwlock_t active_lock; /* lock for active_nodes */
49747- atomic_t gravecounter; /* graveyard uniquifier */
49748+ atomic_unchecked_t gravecounter; /* graveyard uniquifier */
49749 unsigned frun_percent; /* when to stop culling (% files) */
49750 unsigned fcull_percent; /* when to start culling (% files) */
49751 unsigned fstop_percent; /* when to stop allocating (% files) */
49752@@ -171,19 +171,19 @@ extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
49753 * proc.c
49754 */
49755 #ifdef CONFIG_CACHEFILES_HISTOGRAM
49756-extern atomic_t cachefiles_lookup_histogram[HZ];
49757-extern atomic_t cachefiles_mkdir_histogram[HZ];
49758-extern atomic_t cachefiles_create_histogram[HZ];
49759+extern atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49760+extern atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49761+extern atomic_unchecked_t cachefiles_create_histogram[HZ];
49762
49763 extern int __init cachefiles_proc_init(void);
49764 extern void cachefiles_proc_cleanup(void);
49765 static inline
49766-void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
49767+void cachefiles_hist(atomic_unchecked_t histogram[], unsigned long start_jif)
49768 {
49769 unsigned long jif = jiffies - start_jif;
49770 if (jif >= HZ)
49771 jif = HZ - 1;
49772- atomic_inc(&histogram[jif]);
49773+ atomic_inc_unchecked(&histogram[jif]);
49774 }
49775
49776 #else
49777diff --git a/fs/cachefiles/namei.c b/fs/cachefiles/namei.c
49778index 8c01c5fc..15f982e 100644
49779--- a/fs/cachefiles/namei.c
49780+++ b/fs/cachefiles/namei.c
49781@@ -317,7 +317,7 @@ try_again:
49782 /* first step is to make up a grave dentry in the graveyard */
49783 sprintf(nbuffer, "%08x%08x",
49784 (uint32_t) get_seconds(),
49785- (uint32_t) atomic_inc_return(&cache->gravecounter));
49786+ (uint32_t) atomic_inc_return_unchecked(&cache->gravecounter));
49787
49788 /* do the multiway lock magic */
49789 trap = lock_rename(cache->graveyard, dir);
49790diff --git a/fs/cachefiles/proc.c b/fs/cachefiles/proc.c
49791index eccd339..4c1d995 100644
49792--- a/fs/cachefiles/proc.c
49793+++ b/fs/cachefiles/proc.c
49794@@ -14,9 +14,9 @@
49795 #include <linux/seq_file.h>
49796 #include "internal.h"
49797
49798-atomic_t cachefiles_lookup_histogram[HZ];
49799-atomic_t cachefiles_mkdir_histogram[HZ];
49800-atomic_t cachefiles_create_histogram[HZ];
49801+atomic_unchecked_t cachefiles_lookup_histogram[HZ];
49802+atomic_unchecked_t cachefiles_mkdir_histogram[HZ];
49803+atomic_unchecked_t cachefiles_create_histogram[HZ];
49804
49805 /*
49806 * display the latency histogram
49807@@ -35,9 +35,9 @@ static int cachefiles_histogram_show(struct seq_file *m, void *v)
49808 return 0;
49809 default:
49810 index = (unsigned long) v - 3;
49811- x = atomic_read(&cachefiles_lookup_histogram[index]);
49812- y = atomic_read(&cachefiles_mkdir_histogram[index]);
49813- z = atomic_read(&cachefiles_create_histogram[index]);
49814+ x = atomic_read_unchecked(&cachefiles_lookup_histogram[index]);
49815+ y = atomic_read_unchecked(&cachefiles_mkdir_histogram[index]);
49816+ z = atomic_read_unchecked(&cachefiles_create_histogram[index]);
49817 if (x == 0 && y == 0 && z == 0)
49818 return 0;
49819
49820diff --git a/fs/cachefiles/rdwr.c b/fs/cachefiles/rdwr.c
49821index 317f9ee..3d24511 100644
49822--- a/fs/cachefiles/rdwr.c
49823+++ b/fs/cachefiles/rdwr.c
49824@@ -966,7 +966,7 @@ int cachefiles_write_page(struct fscache_storage *op, struct page *page)
49825 old_fs = get_fs();
49826 set_fs(KERNEL_DS);
49827 ret = file->f_op->write(
49828- file, (const void __user *) data, len, &pos);
49829+ file, (const void __force_user *) data, len, &pos);
49830 set_fs(old_fs);
49831 kunmap(page);
49832 file_end_write(file);
49833diff --git a/fs/ceph/dir.c b/fs/ceph/dir.c
49834index f02d82b..2632cf86 100644
49835--- a/fs/ceph/dir.c
49836+++ b/fs/ceph/dir.c
49837@@ -243,7 +243,7 @@ static int ceph_readdir(struct file *filp, void *dirent, filldir_t filldir)
49838 struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
49839 struct ceph_mds_client *mdsc = fsc->mdsc;
49840 unsigned frag = fpos_frag(filp->f_pos);
49841- int off = fpos_off(filp->f_pos);
49842+ unsigned int off = fpos_off(filp->f_pos);
49843 int err;
49844 u32 ftype;
49845 struct ceph_mds_reply_info_parsed *rinfo;
49846diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
49847index d597483..747901b 100644
49848--- a/fs/cifs/cifs_debug.c
49849+++ b/fs/cifs/cifs_debug.c
49850@@ -284,8 +284,8 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49851
49852 if (c == '1' || c == 'y' || c == 'Y' || c == '0') {
49853 #ifdef CONFIG_CIFS_STATS2
49854- atomic_set(&totBufAllocCount, 0);
49855- atomic_set(&totSmBufAllocCount, 0);
49856+ atomic_set_unchecked(&totBufAllocCount, 0);
49857+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49858 #endif /* CONFIG_CIFS_STATS2 */
49859 spin_lock(&cifs_tcp_ses_lock);
49860 list_for_each(tmp1, &cifs_tcp_ses_list) {
49861@@ -298,7 +298,7 @@ static ssize_t cifs_stats_proc_write(struct file *file,
49862 tcon = list_entry(tmp3,
49863 struct cifs_tcon,
49864 tcon_list);
49865- atomic_set(&tcon->num_smbs_sent, 0);
49866+ atomic_set_unchecked(&tcon->num_smbs_sent, 0);
49867 if (server->ops->clear_stats)
49868 server->ops->clear_stats(tcon);
49869 }
49870@@ -330,8 +330,8 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49871 smBufAllocCount.counter, cifs_min_small);
49872 #ifdef CONFIG_CIFS_STATS2
49873 seq_printf(m, "Total Large %d Small %d Allocations\n",
49874- atomic_read(&totBufAllocCount),
49875- atomic_read(&totSmBufAllocCount));
49876+ atomic_read_unchecked(&totBufAllocCount),
49877+ atomic_read_unchecked(&totSmBufAllocCount));
49878 #endif /* CONFIG_CIFS_STATS2 */
49879
49880 seq_printf(m, "Operations (MIDs): %d\n", atomic_read(&midCount));
49881@@ -360,7 +360,7 @@ static int cifs_stats_proc_show(struct seq_file *m, void *v)
49882 if (tcon->need_reconnect)
49883 seq_puts(m, "\tDISCONNECTED ");
49884 seq_printf(m, "\nSMBs: %d",
49885- atomic_read(&tcon->num_smbs_sent));
49886+ atomic_read_unchecked(&tcon->num_smbs_sent));
49887 if (server->ops->print_stats)
49888 server->ops->print_stats(m, tcon);
49889 }
49890diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
49891index 3752b9f..8db5569 100644
49892--- a/fs/cifs/cifsfs.c
49893+++ b/fs/cifs/cifsfs.c
49894@@ -1035,7 +1035,7 @@ cifs_init_request_bufs(void)
49895 */
49896 cifs_req_cachep = kmem_cache_create("cifs_request",
49897 CIFSMaxBufSize + max_hdr_size, 0,
49898- SLAB_HWCACHE_ALIGN, NULL);
49899+ SLAB_HWCACHE_ALIGN | SLAB_USERCOPY, NULL);
49900 if (cifs_req_cachep == NULL)
49901 return -ENOMEM;
49902
49903@@ -1062,7 +1062,7 @@ cifs_init_request_bufs(void)
49904 efficient to alloc 1 per page off the slab compared to 17K (5page)
49905 alloc of large cifs buffers even when page debugging is on */
49906 cifs_sm_req_cachep = kmem_cache_create("cifs_small_rq",
49907- MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN,
49908+ MAX_CIFS_SMALL_BUFFER_SIZE, 0, SLAB_HWCACHE_ALIGN | SLAB_USERCOPY,
49909 NULL);
49910 if (cifs_sm_req_cachep == NULL) {
49911 mempool_destroy(cifs_req_poolp);
49912@@ -1147,8 +1147,8 @@ init_cifs(void)
49913 atomic_set(&bufAllocCount, 0);
49914 atomic_set(&smBufAllocCount, 0);
49915 #ifdef CONFIG_CIFS_STATS2
49916- atomic_set(&totBufAllocCount, 0);
49917- atomic_set(&totSmBufAllocCount, 0);
49918+ atomic_set_unchecked(&totBufAllocCount, 0);
49919+ atomic_set_unchecked(&totSmBufAllocCount, 0);
49920 #endif /* CONFIG_CIFS_STATS2 */
49921
49922 atomic_set(&midCount, 0);
49923diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
49924index 4f07f6f..55de8ce 100644
49925--- a/fs/cifs/cifsglob.h
49926+++ b/fs/cifs/cifsglob.h
49927@@ -751,35 +751,35 @@ struct cifs_tcon {
49928 __u16 Flags; /* optional support bits */
49929 enum statusEnum tidStatus;
49930 #ifdef CONFIG_CIFS_STATS
49931- atomic_t num_smbs_sent;
49932+ atomic_unchecked_t num_smbs_sent;
49933 union {
49934 struct {
49935- atomic_t num_writes;
49936- atomic_t num_reads;
49937- atomic_t num_flushes;
49938- atomic_t num_oplock_brks;
49939- atomic_t num_opens;
49940- atomic_t num_closes;
49941- atomic_t num_deletes;
49942- atomic_t num_mkdirs;
49943- atomic_t num_posixopens;
49944- atomic_t num_posixmkdirs;
49945- atomic_t num_rmdirs;
49946- atomic_t num_renames;
49947- atomic_t num_t2renames;
49948- atomic_t num_ffirst;
49949- atomic_t num_fnext;
49950- atomic_t num_fclose;
49951- atomic_t num_hardlinks;
49952- atomic_t num_symlinks;
49953- atomic_t num_locks;
49954- atomic_t num_acl_get;
49955- atomic_t num_acl_set;
49956+ atomic_unchecked_t num_writes;
49957+ atomic_unchecked_t num_reads;
49958+ atomic_unchecked_t num_flushes;
49959+ atomic_unchecked_t num_oplock_brks;
49960+ atomic_unchecked_t num_opens;
49961+ atomic_unchecked_t num_closes;
49962+ atomic_unchecked_t num_deletes;
49963+ atomic_unchecked_t num_mkdirs;
49964+ atomic_unchecked_t num_posixopens;
49965+ atomic_unchecked_t num_posixmkdirs;
49966+ atomic_unchecked_t num_rmdirs;
49967+ atomic_unchecked_t num_renames;
49968+ atomic_unchecked_t num_t2renames;
49969+ atomic_unchecked_t num_ffirst;
49970+ atomic_unchecked_t num_fnext;
49971+ atomic_unchecked_t num_fclose;
49972+ atomic_unchecked_t num_hardlinks;
49973+ atomic_unchecked_t num_symlinks;
49974+ atomic_unchecked_t num_locks;
49975+ atomic_unchecked_t num_acl_get;
49976+ atomic_unchecked_t num_acl_set;
49977 } cifs_stats;
49978 #ifdef CONFIG_CIFS_SMB2
49979 struct {
49980- atomic_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49981- atomic_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49982+ atomic_unchecked_t smb2_com_sent[NUMBER_OF_SMB2_COMMANDS];
49983+ atomic_unchecked_t smb2_com_failed[NUMBER_OF_SMB2_COMMANDS];
49984 } smb2_stats;
49985 #endif /* CONFIG_CIFS_SMB2 */
49986 } stats;
49987@@ -1080,7 +1080,7 @@ convert_delimiter(char *path, char delim)
49988 }
49989
49990 #ifdef CONFIG_CIFS_STATS
49991-#define cifs_stats_inc atomic_inc
49992+#define cifs_stats_inc atomic_inc_unchecked
49993
49994 static inline void cifs_stats_bytes_written(struct cifs_tcon *tcon,
49995 unsigned int bytes)
49996@@ -1445,8 +1445,8 @@ GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
49997 /* Various Debug counters */
49998 GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
49999 #ifdef CONFIG_CIFS_STATS2
50000-GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
50001-GLOBAL_EXTERN atomic_t totSmBufAllocCount;
50002+GLOBAL_EXTERN atomic_unchecked_t totBufAllocCount; /* total allocated over all time */
50003+GLOBAL_EXTERN atomic_unchecked_t totSmBufAllocCount;
50004 #endif
50005 GLOBAL_EXTERN atomic_t smBufAllocCount;
50006 GLOBAL_EXTERN atomic_t midCount;
50007diff --git a/fs/cifs/link.c b/fs/cifs/link.c
50008index b83c3f5..6437caa 100644
50009--- a/fs/cifs/link.c
50010+++ b/fs/cifs/link.c
50011@@ -616,7 +616,7 @@ symlink_exit:
50012
50013 void cifs_put_link(struct dentry *direntry, struct nameidata *nd, void *cookie)
50014 {
50015- char *p = nd_get_link(nd);
50016+ const char *p = nd_get_link(nd);
50017 if (!IS_ERR(p))
50018 kfree(p);
50019 }
50020diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
50021index 1bec014..f329411 100644
50022--- a/fs/cifs/misc.c
50023+++ b/fs/cifs/misc.c
50024@@ -169,7 +169,7 @@ cifs_buf_get(void)
50025 memset(ret_buf, 0, buf_size + 3);
50026 atomic_inc(&bufAllocCount);
50027 #ifdef CONFIG_CIFS_STATS2
50028- atomic_inc(&totBufAllocCount);
50029+ atomic_inc_unchecked(&totBufAllocCount);
50030 #endif /* CONFIG_CIFS_STATS2 */
50031 }
50032
50033@@ -204,7 +204,7 @@ cifs_small_buf_get(void)
50034 /* memset(ret_buf, 0, sizeof(struct smb_hdr) + 27);*/
50035 atomic_inc(&smBufAllocCount);
50036 #ifdef CONFIG_CIFS_STATS2
50037- atomic_inc(&totSmBufAllocCount);
50038+ atomic_inc_unchecked(&totSmBufAllocCount);
50039 #endif /* CONFIG_CIFS_STATS2 */
50040
50041 }
50042diff --git a/fs/cifs/smb1ops.c b/fs/cifs/smb1ops.c
50043index 3efdb9d..e845a5e 100644
50044--- a/fs/cifs/smb1ops.c
50045+++ b/fs/cifs/smb1ops.c
50046@@ -591,27 +591,27 @@ static void
50047 cifs_clear_stats(struct cifs_tcon *tcon)
50048 {
50049 #ifdef CONFIG_CIFS_STATS
50050- atomic_set(&tcon->stats.cifs_stats.num_writes, 0);
50051- atomic_set(&tcon->stats.cifs_stats.num_reads, 0);
50052- atomic_set(&tcon->stats.cifs_stats.num_flushes, 0);
50053- atomic_set(&tcon->stats.cifs_stats.num_oplock_brks, 0);
50054- atomic_set(&tcon->stats.cifs_stats.num_opens, 0);
50055- atomic_set(&tcon->stats.cifs_stats.num_posixopens, 0);
50056- atomic_set(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
50057- atomic_set(&tcon->stats.cifs_stats.num_closes, 0);
50058- atomic_set(&tcon->stats.cifs_stats.num_deletes, 0);
50059- atomic_set(&tcon->stats.cifs_stats.num_mkdirs, 0);
50060- atomic_set(&tcon->stats.cifs_stats.num_rmdirs, 0);
50061- atomic_set(&tcon->stats.cifs_stats.num_renames, 0);
50062- atomic_set(&tcon->stats.cifs_stats.num_t2renames, 0);
50063- atomic_set(&tcon->stats.cifs_stats.num_ffirst, 0);
50064- atomic_set(&tcon->stats.cifs_stats.num_fnext, 0);
50065- atomic_set(&tcon->stats.cifs_stats.num_fclose, 0);
50066- atomic_set(&tcon->stats.cifs_stats.num_hardlinks, 0);
50067- atomic_set(&tcon->stats.cifs_stats.num_symlinks, 0);
50068- atomic_set(&tcon->stats.cifs_stats.num_locks, 0);
50069- atomic_set(&tcon->stats.cifs_stats.num_acl_get, 0);
50070- atomic_set(&tcon->stats.cifs_stats.num_acl_set, 0);
50071+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_writes, 0);
50072+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_reads, 0);
50073+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_flushes, 0);
50074+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_oplock_brks, 0);
50075+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_opens, 0);
50076+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixopens, 0);
50077+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs, 0);
50078+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_closes, 0);
50079+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_deletes, 0);
50080+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_mkdirs, 0);
50081+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_rmdirs, 0);
50082+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_renames, 0);
50083+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_t2renames, 0);
50084+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_ffirst, 0);
50085+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fnext, 0);
50086+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_fclose, 0);
50087+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_hardlinks, 0);
50088+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_symlinks, 0);
50089+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_locks, 0);
50090+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_get, 0);
50091+ atomic_set_unchecked(&tcon->stats.cifs_stats.num_acl_set, 0);
50092 #endif
50093 }
50094
50095@@ -620,36 +620,36 @@ cifs_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50096 {
50097 #ifdef CONFIG_CIFS_STATS
50098 seq_printf(m, " Oplocks breaks: %d",
50099- atomic_read(&tcon->stats.cifs_stats.num_oplock_brks));
50100+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_oplock_brks));
50101 seq_printf(m, "\nReads: %d Bytes: %llu",
50102- atomic_read(&tcon->stats.cifs_stats.num_reads),
50103+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_reads),
50104 (long long)(tcon->bytes_read));
50105 seq_printf(m, "\nWrites: %d Bytes: %llu",
50106- atomic_read(&tcon->stats.cifs_stats.num_writes),
50107+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_writes),
50108 (long long)(tcon->bytes_written));
50109 seq_printf(m, "\nFlushes: %d",
50110- atomic_read(&tcon->stats.cifs_stats.num_flushes));
50111+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_flushes));
50112 seq_printf(m, "\nLocks: %d HardLinks: %d Symlinks: %d",
50113- atomic_read(&tcon->stats.cifs_stats.num_locks),
50114- atomic_read(&tcon->stats.cifs_stats.num_hardlinks),
50115- atomic_read(&tcon->stats.cifs_stats.num_symlinks));
50116+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_locks),
50117+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_hardlinks),
50118+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_symlinks));
50119 seq_printf(m, "\nOpens: %d Closes: %d Deletes: %d",
50120- atomic_read(&tcon->stats.cifs_stats.num_opens),
50121- atomic_read(&tcon->stats.cifs_stats.num_closes),
50122- atomic_read(&tcon->stats.cifs_stats.num_deletes));
50123+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_opens),
50124+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_closes),
50125+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_deletes));
50126 seq_printf(m, "\nPosix Opens: %d Posix Mkdirs: %d",
50127- atomic_read(&tcon->stats.cifs_stats.num_posixopens),
50128- atomic_read(&tcon->stats.cifs_stats.num_posixmkdirs));
50129+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixopens),
50130+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_posixmkdirs));
50131 seq_printf(m, "\nMkdirs: %d Rmdirs: %d",
50132- atomic_read(&tcon->stats.cifs_stats.num_mkdirs),
50133- atomic_read(&tcon->stats.cifs_stats.num_rmdirs));
50134+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_mkdirs),
50135+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_rmdirs));
50136 seq_printf(m, "\nRenames: %d T2 Renames %d",
50137- atomic_read(&tcon->stats.cifs_stats.num_renames),
50138- atomic_read(&tcon->stats.cifs_stats.num_t2renames));
50139+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_renames),
50140+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_t2renames));
50141 seq_printf(m, "\nFindFirst: %d FNext %d FClose %d",
50142- atomic_read(&tcon->stats.cifs_stats.num_ffirst),
50143- atomic_read(&tcon->stats.cifs_stats.num_fnext),
50144- atomic_read(&tcon->stats.cifs_stats.num_fclose));
50145+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_ffirst),
50146+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fnext),
50147+ atomic_read_unchecked(&tcon->stats.cifs_stats.num_fclose));
50148 #endif
50149 }
50150
50151diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
50152index f2e76f3..c44fac7 100644
50153--- a/fs/cifs/smb2ops.c
50154+++ b/fs/cifs/smb2ops.c
50155@@ -274,8 +274,8 @@ smb2_clear_stats(struct cifs_tcon *tcon)
50156 #ifdef CONFIG_CIFS_STATS
50157 int i;
50158 for (i = 0; i < NUMBER_OF_SMB2_COMMANDS; i++) {
50159- atomic_set(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50160- atomic_set(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50161+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_sent[i], 0);
50162+ atomic_set_unchecked(&tcon->stats.smb2_stats.smb2_com_failed[i], 0);
50163 }
50164 #endif
50165 }
50166@@ -284,66 +284,66 @@ static void
50167 smb2_print_stats(struct seq_file *m, struct cifs_tcon *tcon)
50168 {
50169 #ifdef CONFIG_CIFS_STATS
50170- atomic_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50171- atomic_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50172+ atomic_unchecked_t *sent = tcon->stats.smb2_stats.smb2_com_sent;
50173+ atomic_unchecked_t *failed = tcon->stats.smb2_stats.smb2_com_failed;
50174 seq_printf(m, "\nNegotiates: %d sent %d failed",
50175- atomic_read(&sent[SMB2_NEGOTIATE_HE]),
50176- atomic_read(&failed[SMB2_NEGOTIATE_HE]));
50177+ atomic_read_unchecked(&sent[SMB2_NEGOTIATE_HE]),
50178+ atomic_read_unchecked(&failed[SMB2_NEGOTIATE_HE]));
50179 seq_printf(m, "\nSessionSetups: %d sent %d failed",
50180- atomic_read(&sent[SMB2_SESSION_SETUP_HE]),
50181- atomic_read(&failed[SMB2_SESSION_SETUP_HE]));
50182+ atomic_read_unchecked(&sent[SMB2_SESSION_SETUP_HE]),
50183+ atomic_read_unchecked(&failed[SMB2_SESSION_SETUP_HE]));
50184 #define SMB2LOGOFF 0x0002 /* trivial request/resp */
50185 seq_printf(m, "\nLogoffs: %d sent %d failed",
50186- atomic_read(&sent[SMB2_LOGOFF_HE]),
50187- atomic_read(&failed[SMB2_LOGOFF_HE]));
50188+ atomic_read_unchecked(&sent[SMB2_LOGOFF_HE]),
50189+ atomic_read_unchecked(&failed[SMB2_LOGOFF_HE]));
50190 seq_printf(m, "\nTreeConnects: %d sent %d failed",
50191- atomic_read(&sent[SMB2_TREE_CONNECT_HE]),
50192- atomic_read(&failed[SMB2_TREE_CONNECT_HE]));
50193+ atomic_read_unchecked(&sent[SMB2_TREE_CONNECT_HE]),
50194+ atomic_read_unchecked(&failed[SMB2_TREE_CONNECT_HE]));
50195 seq_printf(m, "\nTreeDisconnects: %d sent %d failed",
50196- atomic_read(&sent[SMB2_TREE_DISCONNECT_HE]),
50197- atomic_read(&failed[SMB2_TREE_DISCONNECT_HE]));
50198+ atomic_read_unchecked(&sent[SMB2_TREE_DISCONNECT_HE]),
50199+ atomic_read_unchecked(&failed[SMB2_TREE_DISCONNECT_HE]));
50200 seq_printf(m, "\nCreates: %d sent %d failed",
50201- atomic_read(&sent[SMB2_CREATE_HE]),
50202- atomic_read(&failed[SMB2_CREATE_HE]));
50203+ atomic_read_unchecked(&sent[SMB2_CREATE_HE]),
50204+ atomic_read_unchecked(&failed[SMB2_CREATE_HE]));
50205 seq_printf(m, "\nCloses: %d sent %d failed",
50206- atomic_read(&sent[SMB2_CLOSE_HE]),
50207- atomic_read(&failed[SMB2_CLOSE_HE]));
50208+ atomic_read_unchecked(&sent[SMB2_CLOSE_HE]),
50209+ atomic_read_unchecked(&failed[SMB2_CLOSE_HE]));
50210 seq_printf(m, "\nFlushes: %d sent %d failed",
50211- atomic_read(&sent[SMB2_FLUSH_HE]),
50212- atomic_read(&failed[SMB2_FLUSH_HE]));
50213+ atomic_read_unchecked(&sent[SMB2_FLUSH_HE]),
50214+ atomic_read_unchecked(&failed[SMB2_FLUSH_HE]));
50215 seq_printf(m, "\nReads: %d sent %d failed",
50216- atomic_read(&sent[SMB2_READ_HE]),
50217- atomic_read(&failed[SMB2_READ_HE]));
50218+ atomic_read_unchecked(&sent[SMB2_READ_HE]),
50219+ atomic_read_unchecked(&failed[SMB2_READ_HE]));
50220 seq_printf(m, "\nWrites: %d sent %d failed",
50221- atomic_read(&sent[SMB2_WRITE_HE]),
50222- atomic_read(&failed[SMB2_WRITE_HE]));
50223+ atomic_read_unchecked(&sent[SMB2_WRITE_HE]),
50224+ atomic_read_unchecked(&failed[SMB2_WRITE_HE]));
50225 seq_printf(m, "\nLocks: %d sent %d failed",
50226- atomic_read(&sent[SMB2_LOCK_HE]),
50227- atomic_read(&failed[SMB2_LOCK_HE]));
50228+ atomic_read_unchecked(&sent[SMB2_LOCK_HE]),
50229+ atomic_read_unchecked(&failed[SMB2_LOCK_HE]));
50230 seq_printf(m, "\nIOCTLs: %d sent %d failed",
50231- atomic_read(&sent[SMB2_IOCTL_HE]),
50232- atomic_read(&failed[SMB2_IOCTL_HE]));
50233+ atomic_read_unchecked(&sent[SMB2_IOCTL_HE]),
50234+ atomic_read_unchecked(&failed[SMB2_IOCTL_HE]));
50235 seq_printf(m, "\nCancels: %d sent %d failed",
50236- atomic_read(&sent[SMB2_CANCEL_HE]),
50237- atomic_read(&failed[SMB2_CANCEL_HE]));
50238+ atomic_read_unchecked(&sent[SMB2_CANCEL_HE]),
50239+ atomic_read_unchecked(&failed[SMB2_CANCEL_HE]));
50240 seq_printf(m, "\nEchos: %d sent %d failed",
50241- atomic_read(&sent[SMB2_ECHO_HE]),
50242- atomic_read(&failed[SMB2_ECHO_HE]));
50243+ atomic_read_unchecked(&sent[SMB2_ECHO_HE]),
50244+ atomic_read_unchecked(&failed[SMB2_ECHO_HE]));
50245 seq_printf(m, "\nQueryDirectories: %d sent %d failed",
50246- atomic_read(&sent[SMB2_QUERY_DIRECTORY_HE]),
50247- atomic_read(&failed[SMB2_QUERY_DIRECTORY_HE]));
50248+ atomic_read_unchecked(&sent[SMB2_QUERY_DIRECTORY_HE]),
50249+ atomic_read_unchecked(&failed[SMB2_QUERY_DIRECTORY_HE]));
50250 seq_printf(m, "\nChangeNotifies: %d sent %d failed",
50251- atomic_read(&sent[SMB2_CHANGE_NOTIFY_HE]),
50252- atomic_read(&failed[SMB2_CHANGE_NOTIFY_HE]));
50253+ atomic_read_unchecked(&sent[SMB2_CHANGE_NOTIFY_HE]),
50254+ atomic_read_unchecked(&failed[SMB2_CHANGE_NOTIFY_HE]));
50255 seq_printf(m, "\nQueryInfos: %d sent %d failed",
50256- atomic_read(&sent[SMB2_QUERY_INFO_HE]),
50257- atomic_read(&failed[SMB2_QUERY_INFO_HE]));
50258+ atomic_read_unchecked(&sent[SMB2_QUERY_INFO_HE]),
50259+ atomic_read_unchecked(&failed[SMB2_QUERY_INFO_HE]));
50260 seq_printf(m, "\nSetInfos: %d sent %d failed",
50261- atomic_read(&sent[SMB2_SET_INFO_HE]),
50262- atomic_read(&failed[SMB2_SET_INFO_HE]));
50263+ atomic_read_unchecked(&sent[SMB2_SET_INFO_HE]),
50264+ atomic_read_unchecked(&failed[SMB2_SET_INFO_HE]));
50265 seq_printf(m, "\nOplockBreaks: %d sent %d failed",
50266- atomic_read(&sent[SMB2_OPLOCK_BREAK_HE]),
50267- atomic_read(&failed[SMB2_OPLOCK_BREAK_HE]));
50268+ atomic_read_unchecked(&sent[SMB2_OPLOCK_BREAK_HE]),
50269+ atomic_read_unchecked(&failed[SMB2_OPLOCK_BREAK_HE]));
50270 #endif
50271 }
50272
50273diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
50274index 2b95ce2..d079d75 100644
50275--- a/fs/cifs/smb2pdu.c
50276+++ b/fs/cifs/smb2pdu.c
50277@@ -1760,8 +1760,7 @@ SMB2_query_directory(const unsigned int xid, struct cifs_tcon *tcon,
50278 default:
50279 cifs_dbg(VFS, "info level %u isn't supported\n",
50280 srch_inf->info_level);
50281- rc = -EINVAL;
50282- goto qdir_exit;
50283+ return -EINVAL;
50284 }
50285
50286 req->FileIndex = cpu_to_le32(index);
50287diff --git a/fs/coda/cache.c b/fs/coda/cache.c
50288index 1da168c..8bc7ff6 100644
50289--- a/fs/coda/cache.c
50290+++ b/fs/coda/cache.c
50291@@ -24,7 +24,7 @@
50292 #include "coda_linux.h"
50293 #include "coda_cache.h"
50294
50295-static atomic_t permission_epoch = ATOMIC_INIT(0);
50296+static atomic_unchecked_t permission_epoch = ATOMIC_INIT(0);
50297
50298 /* replace or extend an acl cache hit */
50299 void coda_cache_enter(struct inode *inode, int mask)
50300@@ -32,7 +32,7 @@ void coda_cache_enter(struct inode *inode, int mask)
50301 struct coda_inode_info *cii = ITOC(inode);
50302
50303 spin_lock(&cii->c_lock);
50304- cii->c_cached_epoch = atomic_read(&permission_epoch);
50305+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch);
50306 if (!uid_eq(cii->c_uid, current_fsuid())) {
50307 cii->c_uid = current_fsuid();
50308 cii->c_cached_perm = mask;
50309@@ -46,14 +46,14 @@ void coda_cache_clear_inode(struct inode *inode)
50310 {
50311 struct coda_inode_info *cii = ITOC(inode);
50312 spin_lock(&cii->c_lock);
50313- cii->c_cached_epoch = atomic_read(&permission_epoch) - 1;
50314+ cii->c_cached_epoch = atomic_read_unchecked(&permission_epoch) - 1;
50315 spin_unlock(&cii->c_lock);
50316 }
50317
50318 /* remove all acl caches */
50319 void coda_cache_clear_all(struct super_block *sb)
50320 {
50321- atomic_inc(&permission_epoch);
50322+ atomic_inc_unchecked(&permission_epoch);
50323 }
50324
50325
50326@@ -66,7 +66,7 @@ int coda_cache_check(struct inode *inode, int mask)
50327 spin_lock(&cii->c_lock);
50328 hit = (mask & cii->c_cached_perm) == mask &&
50329 uid_eq(cii->c_uid, current_fsuid()) &&
50330- cii->c_cached_epoch == atomic_read(&permission_epoch);
50331+ cii->c_cached_epoch == atomic_read_unchecked(&permission_epoch);
50332 spin_unlock(&cii->c_lock);
50333
50334 return hit;
50335diff --git a/fs/compat.c b/fs/compat.c
50336index fc3b55d..7b568ae 100644
50337--- a/fs/compat.c
50338+++ b/fs/compat.c
50339@@ -54,7 +54,7 @@
50340 #include <asm/ioctls.h>
50341 #include "internal.h"
50342
50343-int compat_log = 1;
50344+int compat_log = 0;
50345
50346 int compat_printk(const char *fmt, ...)
50347 {
50348@@ -488,7 +488,7 @@ compat_sys_io_setup(unsigned nr_reqs, u32 __user *ctx32p)
50349
50350 set_fs(KERNEL_DS);
50351 /* The __user pointer cast is valid because of the set_fs() */
50352- ret = sys_io_setup(nr_reqs, (aio_context_t __user *) &ctx64);
50353+ ret = sys_io_setup(nr_reqs, (aio_context_t __force_user *) &ctx64);
50354 set_fs(oldfs);
50355 /* truncating is ok because it's a user address */
50356 if (!ret)
50357@@ -546,7 +546,7 @@ ssize_t compat_rw_copy_check_uvector(int type,
50358 goto out;
50359
50360 ret = -EINVAL;
50361- if (nr_segs > UIO_MAXIOV || nr_segs < 0)
50362+ if (nr_segs > UIO_MAXIOV)
50363 goto out;
50364 if (nr_segs > fast_segs) {
50365 ret = -ENOMEM;
50366@@ -833,6 +833,7 @@ struct compat_old_linux_dirent {
50367
50368 struct compat_readdir_callback {
50369 struct compat_old_linux_dirent __user *dirent;
50370+ struct file * file;
50371 int result;
50372 };
50373
50374@@ -850,6 +851,10 @@ static int compat_fillonedir(void *__buf, const char *name, int namlen,
50375 buf->result = -EOVERFLOW;
50376 return -EOVERFLOW;
50377 }
50378+
50379+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50380+ return 0;
50381+
50382 buf->result++;
50383 dirent = buf->dirent;
50384 if (!access_ok(VERIFY_WRITE, dirent,
50385@@ -880,6 +885,7 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
50386
50387 buf.result = 0;
50388 buf.dirent = dirent;
50389+ buf.file = f.file;
50390
50391 error = vfs_readdir(f.file, compat_fillonedir, &buf);
50392 if (buf.result)
50393@@ -899,6 +905,7 @@ struct compat_linux_dirent {
50394 struct compat_getdents_callback {
50395 struct compat_linux_dirent __user *current_dir;
50396 struct compat_linux_dirent __user *previous;
50397+ struct file * file;
50398 int count;
50399 int error;
50400 };
50401@@ -920,6 +927,10 @@ static int compat_filldir(void *__buf, const char *name, int namlen,
50402 buf->error = -EOVERFLOW;
50403 return -EOVERFLOW;
50404 }
50405+
50406+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50407+ return 0;
50408+
50409 dirent = buf->previous;
50410 if (dirent) {
50411 if (__put_user(offset, &dirent->d_off))
50412@@ -965,6 +976,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50413 buf.previous = NULL;
50414 buf.count = count;
50415 buf.error = 0;
50416+ buf.file = f.file;
50417
50418 error = vfs_readdir(f.file, compat_filldir, &buf);
50419 if (error >= 0)
50420@@ -985,6 +997,7 @@ asmlinkage long compat_sys_getdents(unsigned int fd,
50421 struct compat_getdents_callback64 {
50422 struct linux_dirent64 __user *current_dir;
50423 struct linux_dirent64 __user *previous;
50424+ struct file * file;
50425 int count;
50426 int error;
50427 };
50428@@ -1001,6 +1014,10 @@ static int compat_filldir64(void * __buf, const char * name, int namlen, loff_t
50429 buf->error = -EINVAL; /* only used if we fail.. */
50430 if (reclen > buf->count)
50431 return -EINVAL;
50432+
50433+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
50434+ return 0;
50435+
50436 dirent = buf->previous;
50437
50438 if (dirent) {
50439@@ -1050,13 +1067,14 @@ asmlinkage long compat_sys_getdents64(unsigned int fd,
50440 buf.previous = NULL;
50441 buf.count = count;
50442 buf.error = 0;
50443+ buf.file = f.file;
50444
50445 error = vfs_readdir(f.file, compat_filldir64, &buf);
50446 if (error >= 0)
50447 error = buf.error;
50448 lastdirent = buf.previous;
50449 if (lastdirent) {
50450- typeof(lastdirent->d_off) d_off = f.file->f_pos;
50451+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
50452 if (__put_user_unaligned(d_off, &lastdirent->d_off))
50453 error = -EFAULT;
50454 else
50455diff --git a/fs/compat_binfmt_elf.c b/fs/compat_binfmt_elf.c
50456index a81147e..20bf2b5 100644
50457--- a/fs/compat_binfmt_elf.c
50458+++ b/fs/compat_binfmt_elf.c
50459@@ -30,11 +30,13 @@
50460 #undef elf_phdr
50461 #undef elf_shdr
50462 #undef elf_note
50463+#undef elf_dyn
50464 #undef elf_addr_t
50465 #define elfhdr elf32_hdr
50466 #define elf_phdr elf32_phdr
50467 #define elf_shdr elf32_shdr
50468 #define elf_note elf32_note
50469+#define elf_dyn Elf32_Dyn
50470 #define elf_addr_t Elf32_Addr
50471
50472 /*
50473diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
50474index 996cdc5..15e2f33 100644
50475--- a/fs/compat_ioctl.c
50476+++ b/fs/compat_ioctl.c
50477@@ -622,7 +622,7 @@ static int serial_struct_ioctl(unsigned fd, unsigned cmd,
50478 return -EFAULT;
50479 if (__get_user(udata, &ss32->iomem_base))
50480 return -EFAULT;
50481- ss.iomem_base = compat_ptr(udata);
50482+ ss.iomem_base = (unsigned char __force_kernel *)compat_ptr(udata);
50483 if (__get_user(ss.iomem_reg_shift, &ss32->iomem_reg_shift) ||
50484 __get_user(ss.port_high, &ss32->port_high))
50485 return -EFAULT;
50486@@ -703,8 +703,8 @@ static int do_i2c_rdwr_ioctl(unsigned int fd, unsigned int cmd,
50487 for (i = 0; i < nmsgs; i++) {
50488 if (copy_in_user(&tmsgs[i].addr, &umsgs[i].addr, 3*sizeof(u16)))
50489 return -EFAULT;
50490- if (get_user(datap, &umsgs[i].buf) ||
50491- put_user(compat_ptr(datap), &tmsgs[i].buf))
50492+ if (get_user(datap, (u8 __user * __user *)&umsgs[i].buf) ||
50493+ put_user(compat_ptr(datap), (u8 __user * __user *)&tmsgs[i].buf))
50494 return -EFAULT;
50495 }
50496 return sys_ioctl(fd, cmd, (unsigned long)tdata);
50497@@ -797,7 +797,7 @@ static int compat_ioctl_preallocate(struct file *file,
50498 copy_in_user(&p->l_len, &p32->l_len, sizeof(s64)) ||
50499 copy_in_user(&p->l_sysid, &p32->l_sysid, sizeof(s32)) ||
50500 copy_in_user(&p->l_pid, &p32->l_pid, sizeof(u32)) ||
50501- copy_in_user(&p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50502+ copy_in_user(p->l_pad, &p32->l_pad, 4*sizeof(u32)))
50503 return -EFAULT;
50504
50505 return ioctl_preallocate(file, p);
50506@@ -1619,8 +1619,8 @@ asmlinkage long compat_sys_ioctl(unsigned int fd, unsigned int cmd,
50507 static int __init init_sys32_ioctl_cmp(const void *p, const void *q)
50508 {
50509 unsigned int a, b;
50510- a = *(unsigned int *)p;
50511- b = *(unsigned int *)q;
50512+ a = *(const unsigned int *)p;
50513+ b = *(const unsigned int *)q;
50514 if (a > b)
50515 return 1;
50516 if (a < b)
50517diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
50518index 7aabc6a..34c1197 100644
50519--- a/fs/configfs/dir.c
50520+++ b/fs/configfs/dir.c
50521@@ -1565,7 +1565,8 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50522 }
50523 for (p=q->next; p!= &parent_sd->s_children; p=p->next) {
50524 struct configfs_dirent *next;
50525- const char * name;
50526+ const unsigned char * name;
50527+ char d_name[sizeof(next->s_dentry->d_iname)];
50528 int len;
50529 struct inode *inode = NULL;
50530
50531@@ -1575,7 +1576,12 @@ static int configfs_readdir(struct file * filp, void * dirent, filldir_t filldir
50532 continue;
50533
50534 name = configfs_get_name(next);
50535- len = strlen(name);
50536+ if (next->s_dentry && name == next->s_dentry->d_iname) {
50537+ len = next->s_dentry->d_name.len;
50538+ memcpy(d_name, name, len);
50539+ name = d_name;
50540+ } else
50541+ len = strlen(name);
50542
50543 /*
50544 * We'll have a dentry and an inode for
50545diff --git a/fs/coredump.c b/fs/coredump.c
50546index dafafba..10b3b27 100644
50547--- a/fs/coredump.c
50548+++ b/fs/coredump.c
50549@@ -52,7 +52,7 @@ struct core_name {
50550 char *corename;
50551 int used, size;
50552 };
50553-static atomic_t call_count = ATOMIC_INIT(1);
50554+static atomic_unchecked_t call_count = ATOMIC_INIT(1);
50555
50556 /* The maximal length of core_pattern is also specified in sysctl.c */
50557
50558@@ -60,7 +60,7 @@ static int expand_corename(struct core_name *cn)
50559 {
50560 char *old_corename = cn->corename;
50561
50562- cn->size = CORENAME_MAX_SIZE * atomic_inc_return(&call_count);
50563+ cn->size = CORENAME_MAX_SIZE * atomic_inc_return_unchecked(&call_count);
50564 cn->corename = krealloc(old_corename, cn->size, GFP_KERNEL);
50565
50566 if (!cn->corename) {
50567@@ -157,7 +157,7 @@ static int format_corename(struct core_name *cn, struct coredump_params *cprm)
50568 int pid_in_pattern = 0;
50569 int err = 0;
50570
50571- cn->size = CORENAME_MAX_SIZE * atomic_read(&call_count);
50572+ cn->size = CORENAME_MAX_SIZE * atomic_read_unchecked(&call_count);
50573 cn->corename = kmalloc(cn->size, GFP_KERNEL);
50574 cn->used = 0;
50575
50576@@ -435,8 +435,8 @@ static void wait_for_dump_helpers(struct file *file)
50577 struct pipe_inode_info *pipe = file->private_data;
50578
50579 pipe_lock(pipe);
50580- pipe->readers++;
50581- pipe->writers--;
50582+ atomic_inc(&pipe->readers);
50583+ atomic_dec(&pipe->writers);
50584 wake_up_interruptible_sync(&pipe->wait);
50585 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
50586 pipe_unlock(pipe);
50587@@ -445,11 +445,11 @@ static void wait_for_dump_helpers(struct file *file)
50588 * We actually want wait_event_freezable() but then we need
50589 * to clear TIF_SIGPENDING and improve dump_interrupted().
50590 */
50591- wait_event_interruptible(pipe->wait, pipe->readers == 1);
50592+ wait_event_interruptible(pipe->wait, atomic_read(&pipe->readers) == 1);
50593
50594 pipe_lock(pipe);
50595- pipe->readers--;
50596- pipe->writers++;
50597+ atomic_dec(&pipe->readers);
50598+ atomic_inc(&pipe->writers);
50599 pipe_unlock(pipe);
50600 }
50601
50602@@ -496,7 +496,8 @@ void do_coredump(siginfo_t *siginfo)
50603 struct files_struct *displaced;
50604 bool need_nonrelative = false;
50605 bool core_dumped = false;
50606- static atomic_t core_dump_count = ATOMIC_INIT(0);
50607+ static atomic_unchecked_t core_dump_count = ATOMIC_INIT(0);
50608+ long signr = siginfo->si_signo;
50609 struct coredump_params cprm = {
50610 .siginfo = siginfo,
50611 .regs = signal_pt_regs(),
50612@@ -509,7 +510,10 @@ void do_coredump(siginfo_t *siginfo)
50613 .mm_flags = mm->flags,
50614 };
50615
50616- audit_core_dumps(siginfo->si_signo);
50617+ audit_core_dumps(signr);
50618+
50619+ if (signr == SIGSEGV || signr == SIGBUS || signr == SIGKILL || signr == SIGILL)
50620+ gr_handle_brute_attach(cprm.mm_flags);
50621
50622 binfmt = mm->binfmt;
50623 if (!binfmt || !binfmt->core_dump)
50624@@ -533,7 +537,7 @@ void do_coredump(siginfo_t *siginfo)
50625 need_nonrelative = true;
50626 }
50627
50628- retval = coredump_wait(siginfo->si_signo, &core_state);
50629+ retval = coredump_wait(signr, &core_state);
50630 if (retval < 0)
50631 goto fail_creds;
50632
50633@@ -576,7 +580,7 @@ void do_coredump(siginfo_t *siginfo)
50634 }
50635 cprm.limit = RLIM_INFINITY;
50636
50637- dump_count = atomic_inc_return(&core_dump_count);
50638+ dump_count = atomic_inc_return_unchecked(&core_dump_count);
50639 if (core_pipe_limit && (core_pipe_limit < dump_count)) {
50640 printk(KERN_WARNING "Pid %d(%s) over core_pipe_limit\n",
50641 task_tgid_vnr(current), current->comm);
50642@@ -608,6 +612,8 @@ void do_coredump(siginfo_t *siginfo)
50643 } else {
50644 struct inode *inode;
50645
50646+ gr_learn_resource(current, RLIMIT_CORE, binfmt->min_coredump, 1);
50647+
50648 if (cprm.limit < binfmt->min_coredump)
50649 goto fail_unlock;
50650
50651@@ -666,7 +672,7 @@ close_fail:
50652 filp_close(cprm.file, NULL);
50653 fail_dropcount:
50654 if (ispipe)
50655- atomic_dec(&core_dump_count);
50656+ atomic_dec_unchecked(&core_dump_count);
50657 fail_unlock:
50658 kfree(cn.corename);
50659 fail_corename:
50660@@ -687,7 +693,7 @@ int dump_write(struct file *file, const void *addr, int nr)
50661 {
50662 return !dump_interrupted() &&
50663 access_ok(VERIFY_READ, addr, nr) &&
50664- file->f_op->write(file, addr, nr, &file->f_pos) == nr;
50665+ file->f_op->write(file, (const char __force_user *)addr, nr, &file->f_pos) == nr;
50666 }
50667 EXPORT_SYMBOL(dump_write);
50668
50669diff --git a/fs/dcache.c b/fs/dcache.c
50670index f09b908..04b9690 100644
50671--- a/fs/dcache.c
50672+++ b/fs/dcache.c
50673@@ -3086,7 +3086,8 @@ void __init vfs_caches_init(unsigned long mempages)
50674 mempages -= reserve;
50675
50676 names_cachep = kmem_cache_create("names_cache", PATH_MAX, 0,
50677- SLAB_HWCACHE_ALIGN|SLAB_PANIC, NULL);
50678+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_USERCOPY|
50679+ SLAB_NO_SANITIZE, NULL);
50680
50681 dcache_init();
50682 inode_init();
50683diff --git a/fs/debugfs/inode.c b/fs/debugfs/inode.c
50684index 4888cb3..e0f7cf8 100644
50685--- a/fs/debugfs/inode.c
50686+++ b/fs/debugfs/inode.c
50687@@ -415,7 +415,11 @@ EXPORT_SYMBOL_GPL(debugfs_create_file);
50688 */
50689 struct dentry *debugfs_create_dir(const char *name, struct dentry *parent)
50690 {
50691+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
50692+ return __create_file(name, S_IFDIR | S_IRWXU,
50693+#else
50694 return __create_file(name, S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO,
50695+#endif
50696 parent, NULL, NULL);
50697 }
50698 EXPORT_SYMBOL_GPL(debugfs_create_dir);
50699diff --git a/fs/ecryptfs/inode.c b/fs/ecryptfs/inode.c
50700index 5eab400..810a3f5 100644
50701--- a/fs/ecryptfs/inode.c
50702+++ b/fs/ecryptfs/inode.c
50703@@ -674,7 +674,7 @@ static int ecryptfs_readlink_lower(struct dentry *dentry, char **buf,
50704 old_fs = get_fs();
50705 set_fs(get_ds());
50706 rc = lower_dentry->d_inode->i_op->readlink(lower_dentry,
50707- (char __user *)lower_buf,
50708+ (char __force_user *)lower_buf,
50709 PATH_MAX);
50710 set_fs(old_fs);
50711 if (rc < 0)
50712@@ -706,7 +706,7 @@ out:
50713 static void
50714 ecryptfs_put_link(struct dentry *dentry, struct nameidata *nd, void *ptr)
50715 {
50716- char *buf = nd_get_link(nd);
50717+ const char *buf = nd_get_link(nd);
50718 if (!IS_ERR(buf)) {
50719 /* Free the char* */
50720 kfree(buf);
50721diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
50722index e4141f2..d8263e8 100644
50723--- a/fs/ecryptfs/miscdev.c
50724+++ b/fs/ecryptfs/miscdev.c
50725@@ -304,7 +304,7 @@ check_list:
50726 goto out_unlock_msg_ctx;
50727 i = PKT_TYPE_SIZE + PKT_CTR_SIZE;
50728 if (msg_ctx->msg) {
50729- if (copy_to_user(&buf[i], packet_length, packet_length_size))
50730+ if (packet_length_size > sizeof(packet_length) || copy_to_user(&buf[i], packet_length, packet_length_size))
50731 goto out_unlock_msg_ctx;
50732 i += packet_length_size;
50733 if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
50734diff --git a/fs/exec.c b/fs/exec.c
50735index ffd7a81..f0afae1 100644
50736--- a/fs/exec.c
50737+++ b/fs/exec.c
50738@@ -55,8 +55,20 @@
50739 #include <linux/pipe_fs_i.h>
50740 #include <linux/oom.h>
50741 #include <linux/compat.h>
50742+#include <linux/random.h>
50743+#include <linux/seq_file.h>
50744+#include <linux/coredump.h>
50745+#include <linux/mman.h>
50746+
50747+#ifdef CONFIG_PAX_REFCOUNT
50748+#include <linux/kallsyms.h>
50749+#include <linux/kdebug.h>
50750+#endif
50751+
50752+#include <trace/events/fs.h>
50753
50754 #include <asm/uaccess.h>
50755+#include <asm/sections.h>
50756 #include <asm/mmu_context.h>
50757 #include <asm/tlb.h>
50758
50759@@ -66,17 +78,32 @@
50760
50761 #include <trace/events/sched.h>
50762
50763+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
50764+void __weak pax_set_initial_flags(struct linux_binprm *bprm)
50765+{
50766+ pr_warn_once("PAX: PAX_HAVE_ACL_FLAGS was enabled without providing the pax_set_initial_flags callback, this is probably not what you wanted.\n");
50767+}
50768+#endif
50769+
50770+#ifdef CONFIG_PAX_HOOK_ACL_FLAGS
50771+void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
50772+EXPORT_SYMBOL(pax_set_initial_flags_func);
50773+#endif
50774+
50775 int suid_dumpable = 0;
50776
50777 static LIST_HEAD(formats);
50778 static DEFINE_RWLOCK(binfmt_lock);
50779
50780+extern int gr_process_kernel_exec_ban(void);
50781+extern int gr_process_suid_exec_ban(const struct linux_binprm *bprm);
50782+
50783 void __register_binfmt(struct linux_binfmt * fmt, int insert)
50784 {
50785 BUG_ON(!fmt);
50786 write_lock(&binfmt_lock);
50787- insert ? list_add(&fmt->lh, &formats) :
50788- list_add_tail(&fmt->lh, &formats);
50789+ insert ? pax_list_add((struct list_head *)&fmt->lh, &formats) :
50790+ pax_list_add_tail((struct list_head *)&fmt->lh, &formats);
50791 write_unlock(&binfmt_lock);
50792 }
50793
50794@@ -85,7 +112,7 @@ EXPORT_SYMBOL(__register_binfmt);
50795 void unregister_binfmt(struct linux_binfmt * fmt)
50796 {
50797 write_lock(&binfmt_lock);
50798- list_del(&fmt->lh);
50799+ pax_list_del((struct list_head *)&fmt->lh);
50800 write_unlock(&binfmt_lock);
50801 }
50802
50803@@ -180,18 +207,10 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50804 int write)
50805 {
50806 struct page *page;
50807- int ret;
50808
50809-#ifdef CONFIG_STACK_GROWSUP
50810- if (write) {
50811- ret = expand_downwards(bprm->vma, pos);
50812- if (ret < 0)
50813- return NULL;
50814- }
50815-#endif
50816- ret = get_user_pages(current, bprm->mm, pos,
50817- 1, write, 1, &page, NULL);
50818- if (ret <= 0)
50819+ if (0 > expand_downwards(bprm->vma, pos))
50820+ return NULL;
50821+ if (0 >= get_user_pages(current, bprm->mm, pos, 1, write, 1, &page, NULL))
50822 return NULL;
50823
50824 if (write) {
50825@@ -207,6 +226,17 @@ static struct page *get_arg_page(struct linux_binprm *bprm, unsigned long pos,
50826 if (size <= ARG_MAX)
50827 return page;
50828
50829+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
50830+ // only allow 512KB for argv+env on suid/sgid binaries
50831+ // to prevent easy ASLR exhaustion
50832+ if (((!uid_eq(bprm->cred->euid, current_euid())) ||
50833+ (!gid_eq(bprm->cred->egid, current_egid()))) &&
50834+ (size > (512 * 1024))) {
50835+ put_page(page);
50836+ return NULL;
50837+ }
50838+#endif
50839+
50840 /*
50841 * Limit to 1/4-th the stack size for the argv+env strings.
50842 * This ensures that:
50843@@ -266,6 +296,11 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50844 vma->vm_end = STACK_TOP_MAX;
50845 vma->vm_start = vma->vm_end - PAGE_SIZE;
50846 vma->vm_flags = VM_STACK_FLAGS | VM_STACK_INCOMPLETE_SETUP;
50847+
50848+#ifdef CONFIG_PAX_SEGMEXEC
50849+ vma->vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
50850+#endif
50851+
50852 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
50853 INIT_LIST_HEAD(&vma->anon_vma_chain);
50854
50855@@ -276,6 +311,12 @@ static int __bprm_mm_init(struct linux_binprm *bprm)
50856 mm->stack_vm = mm->total_vm = 1;
50857 up_write(&mm->mmap_sem);
50858 bprm->p = vma->vm_end - sizeof(void *);
50859+
50860+#ifdef CONFIG_PAX_RANDUSTACK
50861+ if (randomize_va_space)
50862+ bprm->p ^= prandom_u32() & ~PAGE_MASK;
50863+#endif
50864+
50865 return 0;
50866 err:
50867 up_write(&mm->mmap_sem);
50868@@ -396,7 +437,7 @@ struct user_arg_ptr {
50869 } ptr;
50870 };
50871
50872-static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50873+const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50874 {
50875 const char __user *native;
50876
50877@@ -405,14 +446,14 @@ static const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr)
50878 compat_uptr_t compat;
50879
50880 if (get_user(compat, argv.ptr.compat + nr))
50881- return ERR_PTR(-EFAULT);
50882+ return (const char __force_user *)ERR_PTR(-EFAULT);
50883
50884 return compat_ptr(compat);
50885 }
50886 #endif
50887
50888 if (get_user(native, argv.ptr.native + nr))
50889- return ERR_PTR(-EFAULT);
50890+ return (const char __force_user *)ERR_PTR(-EFAULT);
50891
50892 return native;
50893 }
50894@@ -431,7 +472,7 @@ static int count(struct user_arg_ptr argv, int max)
50895 if (!p)
50896 break;
50897
50898- if (IS_ERR(p))
50899+ if (IS_ERR((const char __force_kernel *)p))
50900 return -EFAULT;
50901
50902 if (i >= max)
50903@@ -466,7 +507,7 @@ static int copy_strings(int argc, struct user_arg_ptr argv,
50904
50905 ret = -EFAULT;
50906 str = get_user_arg_ptr(argv, argc);
50907- if (IS_ERR(str))
50908+ if (IS_ERR((const char __force_kernel *)str))
50909 goto out;
50910
50911 len = strnlen_user(str, MAX_ARG_STRLEN);
50912@@ -548,7 +589,7 @@ int copy_strings_kernel(int argc, const char *const *__argv,
50913 int r;
50914 mm_segment_t oldfs = get_fs();
50915 struct user_arg_ptr argv = {
50916- .ptr.native = (const char __user *const __user *)__argv,
50917+ .ptr.native = (const char __force_user * const __force_user *)__argv,
50918 };
50919
50920 set_fs(KERNEL_DS);
50921@@ -583,7 +624,8 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50922 unsigned long new_end = old_end - shift;
50923 struct mmu_gather tlb;
50924
50925- BUG_ON(new_start > new_end);
50926+ if (new_start >= new_end || new_start < mmap_min_addr)
50927+ return -ENOMEM;
50928
50929 /*
50930 * ensure there are no vmas between where we want to go
50931@@ -592,6 +634,10 @@ static int shift_arg_pages(struct vm_area_struct *vma, unsigned long shift)
50932 if (vma != find_vma(mm, new_start))
50933 return -EFAULT;
50934
50935+#ifdef CONFIG_PAX_SEGMEXEC
50936+ BUG_ON(pax_find_mirror_vma(vma));
50937+#endif
50938+
50939 /*
50940 * cover the whole range: [new_start, old_end)
50941 */
50942@@ -672,10 +718,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50943 stack_top = arch_align_stack(stack_top);
50944 stack_top = PAGE_ALIGN(stack_top);
50945
50946- if (unlikely(stack_top < mmap_min_addr) ||
50947- unlikely(vma->vm_end - vma->vm_start >= stack_top - mmap_min_addr))
50948- return -ENOMEM;
50949-
50950 stack_shift = vma->vm_end - stack_top;
50951
50952 bprm->p -= stack_shift;
50953@@ -687,8 +729,28 @@ int setup_arg_pages(struct linux_binprm *bprm,
50954 bprm->exec -= stack_shift;
50955
50956 down_write(&mm->mmap_sem);
50957+
50958+ /* Move stack pages down in memory. */
50959+ if (stack_shift) {
50960+ ret = shift_arg_pages(vma, stack_shift);
50961+ if (ret)
50962+ goto out_unlock;
50963+ }
50964+
50965 vm_flags = VM_STACK_FLAGS;
50966
50967+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
50968+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
50969+ vm_flags &= ~VM_EXEC;
50970+
50971+#ifdef CONFIG_PAX_MPROTECT
50972+ if (mm->pax_flags & MF_PAX_MPROTECT)
50973+ vm_flags &= ~VM_MAYEXEC;
50974+#endif
50975+
50976+ }
50977+#endif
50978+
50979 /*
50980 * Adjust stack execute permissions; explicitly enable for
50981 * EXSTACK_ENABLE_X, disable for EXSTACK_DISABLE_X and leave alone
50982@@ -707,13 +769,6 @@ int setup_arg_pages(struct linux_binprm *bprm,
50983 goto out_unlock;
50984 BUG_ON(prev != vma);
50985
50986- /* Move stack pages down in memory. */
50987- if (stack_shift) {
50988- ret = shift_arg_pages(vma, stack_shift);
50989- if (ret)
50990- goto out_unlock;
50991- }
50992-
50993 /* mprotect_fixup is overkill to remove the temporary stack flags */
50994 vma->vm_flags &= ~VM_STACK_INCOMPLETE_SETUP;
50995
50996@@ -737,6 +792,27 @@ int setup_arg_pages(struct linux_binprm *bprm,
50997 #endif
50998 current->mm->start_stack = bprm->p;
50999 ret = expand_stack(vma, stack_base);
51000+
51001+#if !defined(CONFIG_STACK_GROWSUP) && defined(CONFIG_PAX_RANDMMAP)
51002+ if (!ret && (mm->pax_flags & MF_PAX_RANDMMAP) && STACK_TOP <= 0xFFFFFFFFU && STACK_TOP > vma->vm_end) {
51003+ unsigned long size;
51004+ vm_flags_t vm_flags;
51005+
51006+ size = STACK_TOP - vma->vm_end;
51007+ vm_flags = VM_NONE | VM_DONTEXPAND | VM_DONTDUMP;
51008+
51009+ ret = vma->vm_end != mmap_region(NULL, vma->vm_end, size, vm_flags, 0);
51010+
51011+#ifdef CONFIG_X86
51012+ if (!ret) {
51013+ size = PAGE_SIZE + mmap_min_addr + ((mm->delta_mmap ^ mm->delta_stack) & (0xFFUL << PAGE_SHIFT));
51014+ ret = 0 != mmap_region(NULL, 0, PAGE_ALIGN(size), vm_flags, 0);
51015+ }
51016+#endif
51017+
51018+ }
51019+#endif
51020+
51021 if (ret)
51022 ret = -EFAULT;
51023
51024@@ -772,6 +848,8 @@ struct file *open_exec(const char *name)
51025
51026 fsnotify_open(file);
51027
51028+ trace_open_exec(name);
51029+
51030 err = deny_write_access(file);
51031 if (err)
51032 goto exit;
51033@@ -795,7 +873,7 @@ int kernel_read(struct file *file, loff_t offset,
51034 old_fs = get_fs();
51035 set_fs(get_ds());
51036 /* The cast to a user pointer is valid due to the set_fs() */
51037- result = vfs_read(file, (void __user *)addr, count, &pos);
51038+ result = vfs_read(file, (void __force_user *)addr, count, &pos);
51039 set_fs(old_fs);
51040 return result;
51041 }
51042@@ -1251,7 +1329,7 @@ static int check_unsafe_exec(struct linux_binprm *bprm)
51043 }
51044 rcu_read_unlock();
51045
51046- if (p->fs->users > n_fs) {
51047+ if (atomic_read(&p->fs->users) > n_fs) {
51048 bprm->unsafe |= LSM_UNSAFE_SHARE;
51049 } else {
51050 res = -EAGAIN;
51051@@ -1451,6 +1529,31 @@ int search_binary_handler(struct linux_binprm *bprm)
51052
51053 EXPORT_SYMBOL(search_binary_handler);
51054
51055+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51056+static DEFINE_PER_CPU(u64, exec_counter);
51057+static int __init init_exec_counters(void)
51058+{
51059+ unsigned int cpu;
51060+
51061+ for_each_possible_cpu(cpu) {
51062+ per_cpu(exec_counter, cpu) = (u64)cpu;
51063+ }
51064+
51065+ return 0;
51066+}
51067+early_initcall(init_exec_counters);
51068+static inline void increment_exec_counter(void)
51069+{
51070+ BUILD_BUG_ON(NR_CPUS > (1 << 16));
51071+ current->exec_id = this_cpu_add_return(exec_counter, 1 << 16);
51072+}
51073+#else
51074+static inline void increment_exec_counter(void) {}
51075+#endif
51076+
51077+extern void gr_handle_exec_args(struct linux_binprm *bprm,
51078+ struct user_arg_ptr argv);
51079+
51080 /*
51081 * sys_execve() executes a new program.
51082 */
51083@@ -1458,6 +1561,11 @@ static int do_execve_common(const char *filename,
51084 struct user_arg_ptr argv,
51085 struct user_arg_ptr envp)
51086 {
51087+#ifdef CONFIG_GRKERNSEC
51088+ struct file *old_exec_file;
51089+ struct acl_subject_label *old_acl;
51090+ struct rlimit old_rlim[RLIM_NLIMITS];
51091+#endif
51092 struct linux_binprm *bprm;
51093 struct file *file;
51094 struct files_struct *displaced;
51095@@ -1465,6 +1573,8 @@ static int do_execve_common(const char *filename,
51096 int retval;
51097 const struct cred *cred = current_cred();
51098
51099+ gr_learn_resource(current, RLIMIT_NPROC, atomic_read(&cred->user->processes), 1);
51100+
51101 /*
51102 * We move the actual failure in case of RLIMIT_NPROC excess from
51103 * set*uid() to execve() because too many poorly written programs
51104@@ -1505,12 +1615,22 @@ static int do_execve_common(const char *filename,
51105 if (IS_ERR(file))
51106 goto out_unmark;
51107
51108+ if (gr_ptrace_readexec(file, bprm->unsafe)) {
51109+ retval = -EPERM;
51110+ goto out_file;
51111+ }
51112+
51113 sched_exec();
51114
51115 bprm->file = file;
51116 bprm->filename = filename;
51117 bprm->interp = filename;
51118
51119+ if (!gr_acl_handle_execve(file->f_path.dentry, file->f_path.mnt)) {
51120+ retval = -EACCES;
51121+ goto out_file;
51122+ }
51123+
51124 retval = bprm_mm_init(bprm);
51125 if (retval)
51126 goto out_file;
51127@@ -1527,24 +1647,70 @@ static int do_execve_common(const char *filename,
51128 if (retval < 0)
51129 goto out;
51130
51131+#ifdef CONFIG_GRKERNSEC
51132+ old_acl = current->acl;
51133+ memcpy(old_rlim, current->signal->rlim, sizeof(old_rlim));
51134+ old_exec_file = current->exec_file;
51135+ get_file(file);
51136+ current->exec_file = file;
51137+#endif
51138+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
51139+ /* limit suid stack to 8MB
51140+ * we saved the old limits above and will restore them if this exec fails
51141+ */
51142+ if (((!uid_eq(bprm->cred->euid, current_euid())) || (!gid_eq(bprm->cred->egid, current_egid()))) &&
51143+ (old_rlim[RLIMIT_STACK].rlim_cur > (8 * 1024 * 1024)))
51144+ current->signal->rlim[RLIMIT_STACK].rlim_cur = 8 * 1024 * 1024;
51145+#endif
51146+
51147+ if (gr_process_kernel_exec_ban() || gr_process_suid_exec_ban(bprm)) {
51148+ retval = -EPERM;
51149+ goto out_fail;
51150+ }
51151+
51152+ if (!gr_tpe_allow(file)) {
51153+ retval = -EACCES;
51154+ goto out_fail;
51155+ }
51156+
51157+ if (gr_check_crash_exec(file)) {
51158+ retval = -EACCES;
51159+ goto out_fail;
51160+ }
51161+
51162+ retval = gr_set_proc_label(file->f_path.dentry, file->f_path.mnt,
51163+ bprm->unsafe);
51164+ if (retval < 0)
51165+ goto out_fail;
51166+
51167 retval = copy_strings_kernel(1, &bprm->filename, bprm);
51168 if (retval < 0)
51169- goto out;
51170+ goto out_fail;
51171
51172 bprm->exec = bprm->p;
51173 retval = copy_strings(bprm->envc, envp, bprm);
51174 if (retval < 0)
51175- goto out;
51176+ goto out_fail;
51177
51178 retval = copy_strings(bprm->argc, argv, bprm);
51179 if (retval < 0)
51180- goto out;
51181+ goto out_fail;
51182+
51183+ gr_log_chroot_exec(file->f_path.dentry, file->f_path.mnt);
51184+
51185+ gr_handle_exec_args(bprm, argv);
51186
51187 retval = search_binary_handler(bprm);
51188 if (retval < 0)
51189- goto out;
51190+ goto out_fail;
51191+#ifdef CONFIG_GRKERNSEC
51192+ if (old_exec_file)
51193+ fput(old_exec_file);
51194+#endif
51195
51196 /* execve succeeded */
51197+
51198+ increment_exec_counter();
51199 current->fs->in_exec = 0;
51200 current->in_execve = 0;
51201 acct_update_integrals(current);
51202@@ -1553,6 +1719,14 @@ static int do_execve_common(const char *filename,
51203 put_files_struct(displaced);
51204 return retval;
51205
51206+out_fail:
51207+#ifdef CONFIG_GRKERNSEC
51208+ current->acl = old_acl;
51209+ memcpy(current->signal->rlim, old_rlim, sizeof(old_rlim));
51210+ fput(current->exec_file);
51211+ current->exec_file = old_exec_file;
51212+#endif
51213+
51214 out:
51215 if (bprm->mm) {
51216 acct_arg_size(bprm, 0);
51217@@ -1701,3 +1875,283 @@ asmlinkage long compat_sys_execve(const char __user * filename,
51218 return error;
51219 }
51220 #endif
51221+
51222+int pax_check_flags(unsigned long *flags)
51223+{
51224+ int retval = 0;
51225+
51226+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_SEGMEXEC)
51227+ if (*flags & MF_PAX_SEGMEXEC)
51228+ {
51229+ *flags &= ~MF_PAX_SEGMEXEC;
51230+ retval = -EINVAL;
51231+ }
51232+#endif
51233+
51234+ if ((*flags & MF_PAX_PAGEEXEC)
51235+
51236+#ifdef CONFIG_PAX_PAGEEXEC
51237+ && (*flags & MF_PAX_SEGMEXEC)
51238+#endif
51239+
51240+ )
51241+ {
51242+ *flags &= ~MF_PAX_PAGEEXEC;
51243+ retval = -EINVAL;
51244+ }
51245+
51246+ if ((*flags & MF_PAX_MPROTECT)
51247+
51248+#ifdef CONFIG_PAX_MPROTECT
51249+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51250+#endif
51251+
51252+ )
51253+ {
51254+ *flags &= ~MF_PAX_MPROTECT;
51255+ retval = -EINVAL;
51256+ }
51257+
51258+ if ((*flags & MF_PAX_EMUTRAMP)
51259+
51260+#ifdef CONFIG_PAX_EMUTRAMP
51261+ && !(*flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC))
51262+#endif
51263+
51264+ )
51265+ {
51266+ *flags &= ~MF_PAX_EMUTRAMP;
51267+ retval = -EINVAL;
51268+ }
51269+
51270+ return retval;
51271+}
51272+
51273+EXPORT_SYMBOL(pax_check_flags);
51274+
51275+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
51276+void pax_report_fault(struct pt_regs *regs, void *pc, void *sp)
51277+{
51278+ struct task_struct *tsk = current;
51279+ struct mm_struct *mm = current->mm;
51280+ char *buffer_exec = (char *)__get_free_page(GFP_KERNEL);
51281+ char *buffer_fault = (char *)__get_free_page(GFP_KERNEL);
51282+ char *path_exec = NULL;
51283+ char *path_fault = NULL;
51284+ unsigned long start = 0UL, end = 0UL, offset = 0UL;
51285+ siginfo_t info = { };
51286+
51287+ if (buffer_exec && buffer_fault) {
51288+ struct vm_area_struct *vma, *vma_exec = NULL, *vma_fault = NULL;
51289+
51290+ down_read(&mm->mmap_sem);
51291+ vma = mm->mmap;
51292+ while (vma && (!vma_exec || !vma_fault)) {
51293+ if (vma->vm_file && mm->exe_file == vma->vm_file && (vma->vm_flags & VM_EXEC))
51294+ vma_exec = vma;
51295+ if (vma->vm_start <= (unsigned long)pc && (unsigned long)pc < vma->vm_end)
51296+ vma_fault = vma;
51297+ vma = vma->vm_next;
51298+ }
51299+ if (vma_exec) {
51300+ path_exec = d_path(&vma_exec->vm_file->f_path, buffer_exec, PAGE_SIZE);
51301+ if (IS_ERR(path_exec))
51302+ path_exec = "<path too long>";
51303+ else {
51304+ path_exec = mangle_path(buffer_exec, path_exec, "\t\n\\");
51305+ if (path_exec) {
51306+ *path_exec = 0;
51307+ path_exec = buffer_exec;
51308+ } else
51309+ path_exec = "<path too long>";
51310+ }
51311+ }
51312+ if (vma_fault) {
51313+ start = vma_fault->vm_start;
51314+ end = vma_fault->vm_end;
51315+ offset = vma_fault->vm_pgoff << PAGE_SHIFT;
51316+ if (vma_fault->vm_file) {
51317+ path_fault = d_path(&vma_fault->vm_file->f_path, buffer_fault, PAGE_SIZE);
51318+ if (IS_ERR(path_fault))
51319+ path_fault = "<path too long>";
51320+ else {
51321+ path_fault = mangle_path(buffer_fault, path_fault, "\t\n\\");
51322+ if (path_fault) {
51323+ *path_fault = 0;
51324+ path_fault = buffer_fault;
51325+ } else
51326+ path_fault = "<path too long>";
51327+ }
51328+ } else
51329+ path_fault = "<anonymous mapping>";
51330+ }
51331+ up_read(&mm->mmap_sem);
51332+ }
51333+ if (tsk->signal->curr_ip)
51334+ printk(KERN_ERR "PAX: From %pI4: execution attempt in: %s, %08lx-%08lx %08lx\n", &tsk->signal->curr_ip, path_fault, start, end, offset);
51335+ else
51336+ printk(KERN_ERR "PAX: execution attempt in: %s, %08lx-%08lx %08lx\n", path_fault, start, end, offset);
51337+ printk(KERN_ERR "PAX: terminating task: %s(%s):%d, uid/euid: %u/%u, PC: %p, SP: %p\n", path_exec, tsk->comm, task_pid_nr(tsk),
51338+ from_kuid_munged(&init_user_ns, task_uid(tsk)), from_kuid_munged(&init_user_ns, task_euid(tsk)), pc, sp);
51339+ free_page((unsigned long)buffer_exec);
51340+ free_page((unsigned long)buffer_fault);
51341+ pax_report_insns(regs, pc, sp);
51342+ info.si_signo = SIGKILL;
51343+ info.si_errno = 0;
51344+ info.si_code = SI_KERNEL;
51345+ info.si_pid = 0;
51346+ info.si_uid = 0;
51347+ do_coredump(&info);
51348+}
51349+#endif
51350+
51351+#ifdef CONFIG_PAX_REFCOUNT
51352+void pax_report_refcount_overflow(struct pt_regs *regs)
51353+{
51354+ if (current->signal->curr_ip)
51355+ printk(KERN_ERR "PAX: From %pI4: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n",
51356+ &current->signal->curr_ip, current->comm, task_pid_nr(current),
51357+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51358+ else
51359+ printk(KERN_ERR "PAX: refcount overflow detected in: %s:%d, uid/euid: %u/%u\n", current->comm, task_pid_nr(current),
51360+ from_kuid_munged(&init_user_ns, current_uid()), from_kuid_munged(&init_user_ns, current_euid()));
51361+ print_symbol(KERN_ERR "PAX: refcount overflow occured at: %s\n", instruction_pointer(regs));
51362+ show_regs(regs);
51363+ force_sig_info(SIGKILL, SEND_SIG_FORCED, current);
51364+}
51365+#endif
51366+
51367+#ifdef CONFIG_PAX_USERCOPY
51368+/* 0: not at all, 1: fully, 2: fully inside frame, -1: partially (implies an error) */
51369+static noinline int check_stack_object(const void *obj, unsigned long len)
51370+{
51371+ const void * const stack = task_stack_page(current);
51372+ const void * const stackend = stack + THREAD_SIZE;
51373+
51374+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51375+ const void *frame = NULL;
51376+ const void *oldframe;
51377+#endif
51378+
51379+ if (obj + len < obj)
51380+ return -1;
51381+
51382+ if (obj + len <= stack || stackend <= obj)
51383+ return 0;
51384+
51385+ if (obj < stack || stackend < obj + len)
51386+ return -1;
51387+
51388+#if defined(CONFIG_FRAME_POINTER) && defined(CONFIG_X86)
51389+ oldframe = __builtin_frame_address(1);
51390+ if (oldframe)
51391+ frame = __builtin_frame_address(2);
51392+ /*
51393+ low ----------------------------------------------> high
51394+ [saved bp][saved ip][args][local vars][saved bp][saved ip]
51395+ ^----------------^
51396+ allow copies only within here
51397+ */
51398+ while (stack <= frame && frame < stackend) {
51399+ /* if obj + len extends past the last frame, this
51400+ check won't pass and the next frame will be 0,
51401+ causing us to bail out and correctly report
51402+ the copy as invalid
51403+ */
51404+ if (obj + len <= frame)
51405+ return obj >= oldframe + 2 * sizeof(void *) ? 2 : -1;
51406+ oldframe = frame;
51407+ frame = *(const void * const *)frame;
51408+ }
51409+ return -1;
51410+#else
51411+ return 1;
51412+#endif
51413+}
51414+
51415+static __noreturn void pax_report_usercopy(const void *ptr, unsigned long len, bool to_user, const char *type)
51416+{
51417+ if (current->signal->curr_ip)
51418+ printk(KERN_ERR "PAX: From %pI4: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51419+ &current->signal->curr_ip, to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51420+ else
51421+ printk(KERN_ERR "PAX: kernel memory %s attempt detected %s %p (%s) (%lu bytes)\n",
51422+ to_user ? "leak" : "overwrite", to_user ? "from" : "to", ptr, type ? : "unknown", len);
51423+ dump_stack();
51424+ gr_handle_kernel_exploit();
51425+ do_group_exit(SIGKILL);
51426+}
51427+#endif
51428+
51429+#ifdef CONFIG_PAX_USERCOPY
51430+static inline bool check_kernel_text_object(unsigned long low, unsigned long high)
51431+{
51432+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
51433+ unsigned long textlow = ktla_ktva((unsigned long)_stext);
51434+#ifdef CONFIG_MODULES
51435+ unsigned long texthigh = (unsigned long)MODULES_EXEC_VADDR;
51436+#else
51437+ unsigned long texthigh = ktla_ktva((unsigned long)_etext);
51438+#endif
51439+
51440+#else
51441+ unsigned long textlow = (unsigned long)_stext;
51442+ unsigned long texthigh = (unsigned long)_etext;
51443+#endif
51444+
51445+ if (high <= textlow || low > texthigh)
51446+ return false;
51447+ else
51448+ return true;
51449+}
51450+#endif
51451+
51452+void __check_object_size(const void *ptr, unsigned long n, bool to_user)
51453+{
51454+
51455+#ifdef CONFIG_PAX_USERCOPY
51456+ const char *type;
51457+
51458+ if (!n)
51459+ return;
51460+
51461+ type = check_heap_object(ptr, n);
51462+ if (!type) {
51463+ int ret = check_stack_object(ptr, n);
51464+ if (ret == 1 || ret == 2)
51465+ return;
51466+ if (ret == 0) {
51467+ if (check_kernel_text_object((unsigned long)ptr, (unsigned long)ptr + n))
51468+ type = "<kernel text>";
51469+ else
51470+ return;
51471+ } else
51472+ type = "<process stack>";
51473+ }
51474+
51475+ pax_report_usercopy(ptr, n, to_user, type);
51476+#endif
51477+
51478+}
51479+EXPORT_SYMBOL(__check_object_size);
51480+
51481+#ifdef CONFIG_PAX_MEMORY_STACKLEAK
51482+void pax_track_stack(void)
51483+{
51484+ unsigned long sp = (unsigned long)&sp;
51485+ if (sp < current_thread_info()->lowest_stack &&
51486+ sp > (unsigned long)task_stack_page(current))
51487+ current_thread_info()->lowest_stack = sp;
51488+}
51489+EXPORT_SYMBOL(pax_track_stack);
51490+#endif
51491+
51492+#ifdef CONFIG_PAX_SIZE_OVERFLOW
51493+void report_size_overflow(const char *file, unsigned int line, const char *func, const char *ssa_name)
51494+{
51495+ printk(KERN_ERR "PAX: size overflow detected in function %s %s:%u %s", func, file, line, ssa_name);
51496+ dump_stack();
51497+ do_group_exit(SIGKILL);
51498+}
51499+EXPORT_SYMBOL(report_size_overflow);
51500+#endif
51501diff --git a/fs/ext2/balloc.c b/fs/ext2/balloc.c
51502index 9f9992b..8b59411 100644
51503--- a/fs/ext2/balloc.c
51504+++ b/fs/ext2/balloc.c
51505@@ -1184,10 +1184,10 @@ static int ext2_has_free_blocks(struct ext2_sb_info *sbi)
51506
51507 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51508 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51509- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51510+ if (free_blocks < root_blocks + 1 &&
51511 !uid_eq(sbi->s_resuid, current_fsuid()) &&
51512 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51513- !in_group_p (sbi->s_resgid))) {
51514+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51515 return 0;
51516 }
51517 return 1;
51518diff --git a/fs/ext3/balloc.c b/fs/ext3/balloc.c
51519index 22548f5..41521d8 100644
51520--- a/fs/ext3/balloc.c
51521+++ b/fs/ext3/balloc.c
51522@@ -1438,10 +1438,10 @@ static int ext3_has_free_blocks(struct ext3_sb_info *sbi, int use_reservation)
51523
51524 free_blocks = percpu_counter_read_positive(&sbi->s_freeblocks_counter);
51525 root_blocks = le32_to_cpu(sbi->s_es->s_r_blocks_count);
51526- if (free_blocks < root_blocks + 1 && !capable(CAP_SYS_RESOURCE) &&
51527+ if (free_blocks < root_blocks + 1 &&
51528 !use_reservation && !uid_eq(sbi->s_resuid, current_fsuid()) &&
51529 (gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) ||
51530- !in_group_p (sbi->s_resgid))) {
51531+ !in_group_p (sbi->s_resgid)) && !capable_nolog(CAP_SYS_RESOURCE)) {
51532 return 0;
51533 }
51534 return 1;
51535diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
51536index 3742e4c..69a797f 100644
51537--- a/fs/ext4/balloc.c
51538+++ b/fs/ext4/balloc.c
51539@@ -528,8 +528,8 @@ static int ext4_has_free_clusters(struct ext4_sb_info *sbi,
51540 /* Hm, nope. Are (enough) root reserved clusters available? */
51541 if (uid_eq(sbi->s_resuid, current_fsuid()) ||
51542 (!gid_eq(sbi->s_resgid, GLOBAL_ROOT_GID) && in_group_p(sbi->s_resgid)) ||
51543- capable(CAP_SYS_RESOURCE) ||
51544- (flags & EXT4_MB_USE_ROOT_BLOCKS)) {
51545+ (flags & EXT4_MB_USE_ROOT_BLOCKS) ||
51546+ capable_nolog(CAP_SYS_RESOURCE)) {
51547
51548 if (free_clusters >= (nclusters + dirty_clusters +
51549 resv_clusters))
51550diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
51551index 5aae3d1..b5da7f8 100644
51552--- a/fs/ext4/ext4.h
51553+++ b/fs/ext4/ext4.h
51554@@ -1252,19 +1252,19 @@ struct ext4_sb_info {
51555 unsigned long s_mb_last_start;
51556
51557 /* stats for buddy allocator */
51558- atomic_t s_bal_reqs; /* number of reqs with len > 1 */
51559- atomic_t s_bal_success; /* we found long enough chunks */
51560- atomic_t s_bal_allocated; /* in blocks */
51561- atomic_t s_bal_ex_scanned; /* total extents scanned */
51562- atomic_t s_bal_goals; /* goal hits */
51563- atomic_t s_bal_breaks; /* too long searches */
51564- atomic_t s_bal_2orders; /* 2^order hits */
51565+ atomic_unchecked_t s_bal_reqs; /* number of reqs with len > 1 */
51566+ atomic_unchecked_t s_bal_success; /* we found long enough chunks */
51567+ atomic_unchecked_t s_bal_allocated; /* in blocks */
51568+ atomic_unchecked_t s_bal_ex_scanned; /* total extents scanned */
51569+ atomic_unchecked_t s_bal_goals; /* goal hits */
51570+ atomic_unchecked_t s_bal_breaks; /* too long searches */
51571+ atomic_unchecked_t s_bal_2orders; /* 2^order hits */
51572 spinlock_t s_bal_lock;
51573 unsigned long s_mb_buddies_generated;
51574 unsigned long long s_mb_generation_time;
51575- atomic_t s_mb_lost_chunks;
51576- atomic_t s_mb_preallocated;
51577- atomic_t s_mb_discarded;
51578+ atomic_unchecked_t s_mb_lost_chunks;
51579+ atomic_unchecked_t s_mb_preallocated;
51580+ atomic_unchecked_t s_mb_discarded;
51581 atomic_t s_lock_busy;
51582
51583 /* locality groups */
51584diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
51585index 59c6750..a549154 100644
51586--- a/fs/ext4/mballoc.c
51587+++ b/fs/ext4/mballoc.c
51588@@ -1865,7 +1865,7 @@ void ext4_mb_simple_scan_group(struct ext4_allocation_context *ac,
51589 BUG_ON(ac->ac_b_ex.fe_len != ac->ac_g_ex.fe_len);
51590
51591 if (EXT4_SB(sb)->s_mb_stats)
51592- atomic_inc(&EXT4_SB(sb)->s_bal_2orders);
51593+ atomic_inc_unchecked(&EXT4_SB(sb)->s_bal_2orders);
51594
51595 break;
51596 }
51597@@ -2170,7 +2170,7 @@ repeat:
51598 ac->ac_status = AC_STATUS_CONTINUE;
51599 ac->ac_flags |= EXT4_MB_HINT_FIRST;
51600 cr = 3;
51601- atomic_inc(&sbi->s_mb_lost_chunks);
51602+ atomic_inc_unchecked(&sbi->s_mb_lost_chunks);
51603 goto repeat;
51604 }
51605 }
51606@@ -2678,25 +2678,25 @@ int ext4_mb_release(struct super_block *sb)
51607 if (sbi->s_mb_stats) {
51608 ext4_msg(sb, KERN_INFO,
51609 "mballoc: %u blocks %u reqs (%u success)",
51610- atomic_read(&sbi->s_bal_allocated),
51611- atomic_read(&sbi->s_bal_reqs),
51612- atomic_read(&sbi->s_bal_success));
51613+ atomic_read_unchecked(&sbi->s_bal_allocated),
51614+ atomic_read_unchecked(&sbi->s_bal_reqs),
51615+ atomic_read_unchecked(&sbi->s_bal_success));
51616 ext4_msg(sb, KERN_INFO,
51617 "mballoc: %u extents scanned, %u goal hits, "
51618 "%u 2^N hits, %u breaks, %u lost",
51619- atomic_read(&sbi->s_bal_ex_scanned),
51620- atomic_read(&sbi->s_bal_goals),
51621- atomic_read(&sbi->s_bal_2orders),
51622- atomic_read(&sbi->s_bal_breaks),
51623- atomic_read(&sbi->s_mb_lost_chunks));
51624+ atomic_read_unchecked(&sbi->s_bal_ex_scanned),
51625+ atomic_read_unchecked(&sbi->s_bal_goals),
51626+ atomic_read_unchecked(&sbi->s_bal_2orders),
51627+ atomic_read_unchecked(&sbi->s_bal_breaks),
51628+ atomic_read_unchecked(&sbi->s_mb_lost_chunks));
51629 ext4_msg(sb, KERN_INFO,
51630 "mballoc: %lu generated and it took %Lu",
51631 sbi->s_mb_buddies_generated,
51632 sbi->s_mb_generation_time);
51633 ext4_msg(sb, KERN_INFO,
51634 "mballoc: %u preallocated, %u discarded",
51635- atomic_read(&sbi->s_mb_preallocated),
51636- atomic_read(&sbi->s_mb_discarded));
51637+ atomic_read_unchecked(&sbi->s_mb_preallocated),
51638+ atomic_read_unchecked(&sbi->s_mb_discarded));
51639 }
51640
51641 free_percpu(sbi->s_locality_groups);
51642@@ -3150,16 +3150,16 @@ static void ext4_mb_collect_stats(struct ext4_allocation_context *ac)
51643 struct ext4_sb_info *sbi = EXT4_SB(ac->ac_sb);
51644
51645 if (sbi->s_mb_stats && ac->ac_g_ex.fe_len > 1) {
51646- atomic_inc(&sbi->s_bal_reqs);
51647- atomic_add(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51648+ atomic_inc_unchecked(&sbi->s_bal_reqs);
51649+ atomic_add_unchecked(ac->ac_b_ex.fe_len, &sbi->s_bal_allocated);
51650 if (ac->ac_b_ex.fe_len >= ac->ac_o_ex.fe_len)
51651- atomic_inc(&sbi->s_bal_success);
51652- atomic_add(ac->ac_found, &sbi->s_bal_ex_scanned);
51653+ atomic_inc_unchecked(&sbi->s_bal_success);
51654+ atomic_add_unchecked(ac->ac_found, &sbi->s_bal_ex_scanned);
51655 if (ac->ac_g_ex.fe_start == ac->ac_b_ex.fe_start &&
51656 ac->ac_g_ex.fe_group == ac->ac_b_ex.fe_group)
51657- atomic_inc(&sbi->s_bal_goals);
51658+ atomic_inc_unchecked(&sbi->s_bal_goals);
51659 if (ac->ac_found > sbi->s_mb_max_to_scan)
51660- atomic_inc(&sbi->s_bal_breaks);
51661+ atomic_inc_unchecked(&sbi->s_bal_breaks);
51662 }
51663
51664 if (ac->ac_op == EXT4_MB_HISTORY_ALLOC)
51665@@ -3559,7 +3559,7 @@ ext4_mb_new_inode_pa(struct ext4_allocation_context *ac)
51666 trace_ext4_mb_new_inode_pa(ac, pa);
51667
51668 ext4_mb_use_inode_pa(ac, pa);
51669- atomic_add(pa->pa_free, &sbi->s_mb_preallocated);
51670+ atomic_add_unchecked(pa->pa_free, &sbi->s_mb_preallocated);
51671
51672 ei = EXT4_I(ac->ac_inode);
51673 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51674@@ -3619,7 +3619,7 @@ ext4_mb_new_group_pa(struct ext4_allocation_context *ac)
51675 trace_ext4_mb_new_group_pa(ac, pa);
51676
51677 ext4_mb_use_group_pa(ac, pa);
51678- atomic_add(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51679+ atomic_add_unchecked(pa->pa_free, &EXT4_SB(sb)->s_mb_preallocated);
51680
51681 grp = ext4_get_group_info(sb, ac->ac_b_ex.fe_group);
51682 lg = ac->ac_lg;
51683@@ -3708,7 +3708,7 @@ ext4_mb_release_inode_pa(struct ext4_buddy *e4b, struct buffer_head *bitmap_bh,
51684 * from the bitmap and continue.
51685 */
51686 }
51687- atomic_add(free, &sbi->s_mb_discarded);
51688+ atomic_add_unchecked(free, &sbi->s_mb_discarded);
51689
51690 return err;
51691 }
51692@@ -3726,7 +3726,7 @@ ext4_mb_release_group_pa(struct ext4_buddy *e4b,
51693 ext4_get_group_no_and_offset(sb, pa->pa_pstart, &group, &bit);
51694 BUG_ON(group != e4b->bd_group && pa->pa_len != 0);
51695 mb_free_blocks(pa->pa_inode, e4b, bit, pa->pa_len);
51696- atomic_add(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51697+ atomic_add_unchecked(pa->pa_len, &EXT4_SB(sb)->s_mb_discarded);
51698 trace_ext4_mballoc_discard(sb, NULL, group, bit, pa->pa_len);
51699
51700 return 0;
51701diff --git a/fs/ext4/mmp.c b/fs/ext4/mmp.c
51702index 214461e..3614c89 100644
51703--- a/fs/ext4/mmp.c
51704+++ b/fs/ext4/mmp.c
51705@@ -113,7 +113,7 @@ static int read_mmp_block(struct super_block *sb, struct buffer_head **bh,
51706 void __dump_mmp_msg(struct super_block *sb, struct mmp_struct *mmp,
51707 const char *function, unsigned int line, const char *msg)
51708 {
51709- __ext4_warning(sb, function, line, msg);
51710+ __ext4_warning(sb, function, line, "%s", msg);
51711 __ext4_warning(sb, function, line,
51712 "MMP failure info: last update time: %llu, last update "
51713 "node: %s, last update device: %s\n",
51714diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
51715index 49d3c01..9579efd 100644
51716--- a/fs/ext4/resize.c
51717+++ b/fs/ext4/resize.c
51718@@ -79,12 +79,20 @@ static int verify_group_input(struct super_block *sb,
51719 ext4_fsblk_t end = start + input->blocks_count;
51720 ext4_group_t group = input->group;
51721 ext4_fsblk_t itend = input->inode_table + sbi->s_itb_per_group;
51722- unsigned overhead = ext4_group_overhead_blocks(sb, group);
51723- ext4_fsblk_t metaend = start + overhead;
51724+ unsigned overhead;
51725+ ext4_fsblk_t metaend;
51726 struct buffer_head *bh = NULL;
51727 ext4_grpblk_t free_blocks_count, offset;
51728 int err = -EINVAL;
51729
51730+ if (group != sbi->s_groups_count) {
51731+ ext4_warning(sb, "Cannot add at group %u (only %u groups)",
51732+ input->group, sbi->s_groups_count);
51733+ return -EINVAL;
51734+ }
51735+
51736+ overhead = ext4_group_overhead_blocks(sb, group);
51737+ metaend = start + overhead;
51738 input->free_blocks_count = free_blocks_count =
51739 input->blocks_count - 2 - overhead - sbi->s_itb_per_group;
51740
51741@@ -96,10 +104,7 @@ static int verify_group_input(struct super_block *sb,
51742 free_blocks_count, input->reserved_blocks);
51743
51744 ext4_get_group_no_and_offset(sb, start, NULL, &offset);
51745- if (group != sbi->s_groups_count)
51746- ext4_warning(sb, "Cannot add at group %u (only %u groups)",
51747- input->group, sbi->s_groups_count);
51748- else if (offset != 0)
51749+ if (offset != 0)
51750 ext4_warning(sb, "Last group not full");
51751 else if (input->reserved_blocks > input->blocks_count / 5)
51752 ext4_warning(sb, "Reserved blocks too high (%u)",
51753diff --git a/fs/ext4/super.c b/fs/ext4/super.c
51754index 6681c03..d88cd33 100644
51755--- a/fs/ext4/super.c
51756+++ b/fs/ext4/super.c
51757@@ -1236,7 +1236,7 @@ static ext4_fsblk_t get_sb_block(void **data)
51758 }
51759
51760 #define DEFAULT_JOURNAL_IOPRIO (IOPRIO_PRIO_VALUE(IOPRIO_CLASS_BE, 3))
51761-static char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
51762+static const char deprecated_msg[] = "Mount option \"%s\" will be removed by %s\n"
51763 "Contact linux-ext4@vger.kernel.org if you think we should keep it.\n";
51764
51765 #ifdef CONFIG_QUOTA
51766@@ -2372,7 +2372,7 @@ struct ext4_attr {
51767 ssize_t (*store)(struct ext4_attr *, struct ext4_sb_info *,
51768 const char *, size_t);
51769 int offset;
51770-};
51771+} __do_const;
51772
51773 static int parse_strtoull(const char *buf,
51774 unsigned long long max, unsigned long long *value)
51775diff --git a/fs/fcntl.c b/fs/fcntl.c
51776index 6599222..e7bf0de 100644
51777--- a/fs/fcntl.c
51778+++ b/fs/fcntl.c
51779@@ -107,6 +107,11 @@ int __f_setown(struct file *filp, struct pid *pid, enum pid_type type,
51780 if (err)
51781 return err;
51782
51783+ if (gr_handle_chroot_fowner(pid, type))
51784+ return -ENOENT;
51785+ if (gr_check_protected_task_fowner(pid, type))
51786+ return -EACCES;
51787+
51788 f_modown(filp, pid, type, force);
51789 return 0;
51790 }
51791diff --git a/fs/fhandle.c b/fs/fhandle.c
51792index 999ff5c..41f4109 100644
51793--- a/fs/fhandle.c
51794+++ b/fs/fhandle.c
51795@@ -67,8 +67,7 @@ static long do_sys_name_to_handle(struct path *path,
51796 } else
51797 retval = 0;
51798 /* copy the mount id */
51799- if (copy_to_user(mnt_id, &real_mount(path->mnt)->mnt_id,
51800- sizeof(*mnt_id)) ||
51801+ if (put_user(real_mount(path->mnt)->mnt_id, mnt_id) ||
51802 copy_to_user(ufh, handle,
51803 sizeof(struct file_handle) + handle_bytes))
51804 retval = -EFAULT;
51805diff --git a/fs/file.c b/fs/file.c
51806index 4a78f98..9447397 100644
51807--- a/fs/file.c
51808+++ b/fs/file.c
51809@@ -16,6 +16,7 @@
51810 #include <linux/slab.h>
51811 #include <linux/vmalloc.h>
51812 #include <linux/file.h>
51813+#include <linux/security.h>
51814 #include <linux/fdtable.h>
51815 #include <linux/bitops.h>
51816 #include <linux/interrupt.h>
51817@@ -828,6 +829,7 @@ int replace_fd(unsigned fd, struct file *file, unsigned flags)
51818 if (!file)
51819 return __close_fd(files, fd);
51820
51821+ gr_learn_resource(current, RLIMIT_NOFILE, fd, 0);
51822 if (fd >= rlimit(RLIMIT_NOFILE))
51823 return -EBADF;
51824
51825@@ -854,6 +856,7 @@ SYSCALL_DEFINE3(dup3, unsigned int, oldfd, unsigned int, newfd, int, flags)
51826 if (unlikely(oldfd == newfd))
51827 return -EINVAL;
51828
51829+ gr_learn_resource(current, RLIMIT_NOFILE, newfd, 0);
51830 if (newfd >= rlimit(RLIMIT_NOFILE))
51831 return -EBADF;
51832
51833@@ -909,6 +912,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
51834 int f_dupfd(unsigned int from, struct file *file, unsigned flags)
51835 {
51836 int err;
51837+ gr_learn_resource(current, RLIMIT_NOFILE, from, 0);
51838 if (from >= rlimit(RLIMIT_NOFILE))
51839 return -EINVAL;
51840 err = alloc_fd(from, flags);
51841diff --git a/fs/filesystems.c b/fs/filesystems.c
51842index 92567d9..fcd8cbf 100644
51843--- a/fs/filesystems.c
51844+++ b/fs/filesystems.c
51845@@ -273,7 +273,11 @@ struct file_system_type *get_fs_type(const char *name)
51846 int len = dot ? dot - name : strlen(name);
51847
51848 fs = __get_fs_type(name, len);
51849+#ifdef CONFIG_GRKERNSEC_MODHARDEN
51850+ if (!fs && (___request_module(true, "grsec_modharden_fs", "fs-%.*s", len, name) == 0))
51851+#else
51852 if (!fs && (request_module("fs-%.*s", len, name) == 0))
51853+#endif
51854 fs = __get_fs_type(name, len);
51855
51856 if (dot && fs && !(fs->fs_flags & FS_HAS_SUBTYPE)) {
51857diff --git a/fs/fs_struct.c b/fs/fs_struct.c
51858index d8ac61d..79a36f0 100644
51859--- a/fs/fs_struct.c
51860+++ b/fs/fs_struct.c
51861@@ -4,6 +4,7 @@
51862 #include <linux/path.h>
51863 #include <linux/slab.h>
51864 #include <linux/fs_struct.h>
51865+#include <linux/grsecurity.h>
51866 #include "internal.h"
51867
51868 /*
51869@@ -19,6 +20,7 @@ void set_fs_root(struct fs_struct *fs, const struct path *path)
51870 write_seqcount_begin(&fs->seq);
51871 old_root = fs->root;
51872 fs->root = *path;
51873+ gr_set_chroot_entries(current, path);
51874 write_seqcount_end(&fs->seq);
51875 spin_unlock(&fs->lock);
51876 if (old_root.dentry)
51877@@ -67,6 +69,10 @@ void chroot_fs_refs(const struct path *old_root, const struct path *new_root)
51878 int hits = 0;
51879 spin_lock(&fs->lock);
51880 write_seqcount_begin(&fs->seq);
51881+ /* this root replacement is only done by pivot_root,
51882+ leave grsec's chroot tagging alone for this task
51883+ so that a pivoted root isn't treated as a chroot
51884+ */
51885 hits += replace_path(&fs->root, old_root, new_root);
51886 hits += replace_path(&fs->pwd, old_root, new_root);
51887 write_seqcount_end(&fs->seq);
51888@@ -99,7 +105,8 @@ void exit_fs(struct task_struct *tsk)
51889 task_lock(tsk);
51890 spin_lock(&fs->lock);
51891 tsk->fs = NULL;
51892- kill = !--fs->users;
51893+ gr_clear_chroot_entries(tsk);
51894+ kill = !atomic_dec_return(&fs->users);
51895 spin_unlock(&fs->lock);
51896 task_unlock(tsk);
51897 if (kill)
51898@@ -112,7 +119,7 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51899 struct fs_struct *fs = kmem_cache_alloc(fs_cachep, GFP_KERNEL);
51900 /* We don't need to lock fs - think why ;-) */
51901 if (fs) {
51902- fs->users = 1;
51903+ atomic_set(&fs->users, 1);
51904 fs->in_exec = 0;
51905 spin_lock_init(&fs->lock);
51906 seqcount_init(&fs->seq);
51907@@ -121,6 +128,9 @@ struct fs_struct *copy_fs_struct(struct fs_struct *old)
51908 spin_lock(&old->lock);
51909 fs->root = old->root;
51910 path_get(&fs->root);
51911+ /* instead of calling gr_set_chroot_entries here,
51912+ we call it from every caller of this function
51913+ */
51914 fs->pwd = old->pwd;
51915 path_get(&fs->pwd);
51916 spin_unlock(&old->lock);
51917@@ -139,8 +149,9 @@ int unshare_fs_struct(void)
51918
51919 task_lock(current);
51920 spin_lock(&fs->lock);
51921- kill = !--fs->users;
51922+ kill = !atomic_dec_return(&fs->users);
51923 current->fs = new_fs;
51924+ gr_set_chroot_entries(current, &new_fs->root);
51925 spin_unlock(&fs->lock);
51926 task_unlock(current);
51927
51928@@ -153,13 +164,13 @@ EXPORT_SYMBOL_GPL(unshare_fs_struct);
51929
51930 int current_umask(void)
51931 {
51932- return current->fs->umask;
51933+ return current->fs->umask | gr_acl_umask();
51934 }
51935 EXPORT_SYMBOL(current_umask);
51936
51937 /* to be mentioned only in INIT_TASK */
51938 struct fs_struct init_fs = {
51939- .users = 1,
51940+ .users = ATOMIC_INIT(1),
51941 .lock = __SPIN_LOCK_UNLOCKED(init_fs.lock),
51942 .seq = SEQCNT_ZERO,
51943 .umask = 0022,
51944diff --git a/fs/fscache/cookie.c b/fs/fscache/cookie.c
51945index e2cba1f..17a25bb 100644
51946--- a/fs/fscache/cookie.c
51947+++ b/fs/fscache/cookie.c
51948@@ -68,11 +68,11 @@ struct fscache_cookie *__fscache_acquire_cookie(
51949 parent ? (char *) parent->def->name : "<no-parent>",
51950 def->name, netfs_data);
51951
51952- fscache_stat(&fscache_n_acquires);
51953+ fscache_stat_unchecked(&fscache_n_acquires);
51954
51955 /* if there's no parent cookie, then we don't create one here either */
51956 if (!parent) {
51957- fscache_stat(&fscache_n_acquires_null);
51958+ fscache_stat_unchecked(&fscache_n_acquires_null);
51959 _leave(" [no parent]");
51960 return NULL;
51961 }
51962@@ -87,7 +87,7 @@ struct fscache_cookie *__fscache_acquire_cookie(
51963 /* allocate and initialise a cookie */
51964 cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
51965 if (!cookie) {
51966- fscache_stat(&fscache_n_acquires_oom);
51967+ fscache_stat_unchecked(&fscache_n_acquires_oom);
51968 _leave(" [ENOMEM]");
51969 return NULL;
51970 }
51971@@ -109,13 +109,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51972
51973 switch (cookie->def->type) {
51974 case FSCACHE_COOKIE_TYPE_INDEX:
51975- fscache_stat(&fscache_n_cookie_index);
51976+ fscache_stat_unchecked(&fscache_n_cookie_index);
51977 break;
51978 case FSCACHE_COOKIE_TYPE_DATAFILE:
51979- fscache_stat(&fscache_n_cookie_data);
51980+ fscache_stat_unchecked(&fscache_n_cookie_data);
51981 break;
51982 default:
51983- fscache_stat(&fscache_n_cookie_special);
51984+ fscache_stat_unchecked(&fscache_n_cookie_special);
51985 break;
51986 }
51987
51988@@ -126,13 +126,13 @@ struct fscache_cookie *__fscache_acquire_cookie(
51989 if (fscache_acquire_non_index_cookie(cookie) < 0) {
51990 atomic_dec(&parent->n_children);
51991 __fscache_cookie_put(cookie);
51992- fscache_stat(&fscache_n_acquires_nobufs);
51993+ fscache_stat_unchecked(&fscache_n_acquires_nobufs);
51994 _leave(" = NULL");
51995 return NULL;
51996 }
51997 }
51998
51999- fscache_stat(&fscache_n_acquires_ok);
52000+ fscache_stat_unchecked(&fscache_n_acquires_ok);
52001 _leave(" = %p", cookie);
52002 return cookie;
52003 }
52004@@ -168,7 +168,7 @@ static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
52005 cache = fscache_select_cache_for_object(cookie->parent);
52006 if (!cache) {
52007 up_read(&fscache_addremove_sem);
52008- fscache_stat(&fscache_n_acquires_no_cache);
52009+ fscache_stat_unchecked(&fscache_n_acquires_no_cache);
52010 _leave(" = -ENOMEDIUM [no cache]");
52011 return -ENOMEDIUM;
52012 }
52013@@ -255,12 +255,12 @@ static int fscache_alloc_object(struct fscache_cache *cache,
52014 object = cache->ops->alloc_object(cache, cookie);
52015 fscache_stat_d(&fscache_n_cop_alloc_object);
52016 if (IS_ERR(object)) {
52017- fscache_stat(&fscache_n_object_no_alloc);
52018+ fscache_stat_unchecked(&fscache_n_object_no_alloc);
52019 ret = PTR_ERR(object);
52020 goto error;
52021 }
52022
52023- fscache_stat(&fscache_n_object_alloc);
52024+ fscache_stat_unchecked(&fscache_n_object_alloc);
52025
52026 object->debug_id = atomic_inc_return(&fscache_object_debug_id);
52027
52028@@ -376,7 +376,7 @@ void __fscache_invalidate(struct fscache_cookie *cookie)
52029
52030 _enter("{%s}", cookie->def->name);
52031
52032- fscache_stat(&fscache_n_invalidates);
52033+ fscache_stat_unchecked(&fscache_n_invalidates);
52034
52035 /* Only permit invalidation of data files. Invalidating an index will
52036 * require the caller to release all its attachments to the tree rooted
52037@@ -434,10 +434,10 @@ void __fscache_update_cookie(struct fscache_cookie *cookie)
52038 {
52039 struct fscache_object *object;
52040
52041- fscache_stat(&fscache_n_updates);
52042+ fscache_stat_unchecked(&fscache_n_updates);
52043
52044 if (!cookie) {
52045- fscache_stat(&fscache_n_updates_null);
52046+ fscache_stat_unchecked(&fscache_n_updates_null);
52047 _leave(" [no cookie]");
52048 return;
52049 }
52050@@ -471,12 +471,12 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
52051 struct fscache_object *object;
52052 unsigned long event;
52053
52054- fscache_stat(&fscache_n_relinquishes);
52055+ fscache_stat_unchecked(&fscache_n_relinquishes);
52056 if (retire)
52057- fscache_stat(&fscache_n_relinquishes_retire);
52058+ fscache_stat_unchecked(&fscache_n_relinquishes_retire);
52059
52060 if (!cookie) {
52061- fscache_stat(&fscache_n_relinquishes_null);
52062+ fscache_stat_unchecked(&fscache_n_relinquishes_null);
52063 _leave(" [no cookie]");
52064 return;
52065 }
52066@@ -492,7 +492,7 @@ void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
52067
52068 /* wait for the cookie to finish being instantiated (or to fail) */
52069 if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
52070- fscache_stat(&fscache_n_relinquishes_waitcrt);
52071+ fscache_stat_unchecked(&fscache_n_relinquishes_waitcrt);
52072 wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
52073 fscache_wait_bit, TASK_UNINTERRUPTIBLE);
52074 }
52075diff --git a/fs/fscache/internal.h b/fs/fscache/internal.h
52076index ee38fef..0a326d4 100644
52077--- a/fs/fscache/internal.h
52078+++ b/fs/fscache/internal.h
52079@@ -148,101 +148,101 @@ extern void fscache_proc_cleanup(void);
52080 * stats.c
52081 */
52082 #ifdef CONFIG_FSCACHE_STATS
52083-extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52084-extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52085+extern atomic_unchecked_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
52086+extern atomic_unchecked_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
52087
52088-extern atomic_t fscache_n_op_pend;
52089-extern atomic_t fscache_n_op_run;
52090-extern atomic_t fscache_n_op_enqueue;
52091-extern atomic_t fscache_n_op_deferred_release;
52092-extern atomic_t fscache_n_op_release;
52093-extern atomic_t fscache_n_op_gc;
52094-extern atomic_t fscache_n_op_cancelled;
52095-extern atomic_t fscache_n_op_rejected;
52096+extern atomic_unchecked_t fscache_n_op_pend;
52097+extern atomic_unchecked_t fscache_n_op_run;
52098+extern atomic_unchecked_t fscache_n_op_enqueue;
52099+extern atomic_unchecked_t fscache_n_op_deferred_release;
52100+extern atomic_unchecked_t fscache_n_op_release;
52101+extern atomic_unchecked_t fscache_n_op_gc;
52102+extern atomic_unchecked_t fscache_n_op_cancelled;
52103+extern atomic_unchecked_t fscache_n_op_rejected;
52104
52105-extern atomic_t fscache_n_attr_changed;
52106-extern atomic_t fscache_n_attr_changed_ok;
52107-extern atomic_t fscache_n_attr_changed_nobufs;
52108-extern atomic_t fscache_n_attr_changed_nomem;
52109-extern atomic_t fscache_n_attr_changed_calls;
52110+extern atomic_unchecked_t fscache_n_attr_changed;
52111+extern atomic_unchecked_t fscache_n_attr_changed_ok;
52112+extern atomic_unchecked_t fscache_n_attr_changed_nobufs;
52113+extern atomic_unchecked_t fscache_n_attr_changed_nomem;
52114+extern atomic_unchecked_t fscache_n_attr_changed_calls;
52115
52116-extern atomic_t fscache_n_allocs;
52117-extern atomic_t fscache_n_allocs_ok;
52118-extern atomic_t fscache_n_allocs_wait;
52119-extern atomic_t fscache_n_allocs_nobufs;
52120-extern atomic_t fscache_n_allocs_intr;
52121-extern atomic_t fscache_n_allocs_object_dead;
52122-extern atomic_t fscache_n_alloc_ops;
52123-extern atomic_t fscache_n_alloc_op_waits;
52124+extern atomic_unchecked_t fscache_n_allocs;
52125+extern atomic_unchecked_t fscache_n_allocs_ok;
52126+extern atomic_unchecked_t fscache_n_allocs_wait;
52127+extern atomic_unchecked_t fscache_n_allocs_nobufs;
52128+extern atomic_unchecked_t fscache_n_allocs_intr;
52129+extern atomic_unchecked_t fscache_n_allocs_object_dead;
52130+extern atomic_unchecked_t fscache_n_alloc_ops;
52131+extern atomic_unchecked_t fscache_n_alloc_op_waits;
52132
52133-extern atomic_t fscache_n_retrievals;
52134-extern atomic_t fscache_n_retrievals_ok;
52135-extern atomic_t fscache_n_retrievals_wait;
52136-extern atomic_t fscache_n_retrievals_nodata;
52137-extern atomic_t fscache_n_retrievals_nobufs;
52138-extern atomic_t fscache_n_retrievals_intr;
52139-extern atomic_t fscache_n_retrievals_nomem;
52140-extern atomic_t fscache_n_retrievals_object_dead;
52141-extern atomic_t fscache_n_retrieval_ops;
52142-extern atomic_t fscache_n_retrieval_op_waits;
52143+extern atomic_unchecked_t fscache_n_retrievals;
52144+extern atomic_unchecked_t fscache_n_retrievals_ok;
52145+extern atomic_unchecked_t fscache_n_retrievals_wait;
52146+extern atomic_unchecked_t fscache_n_retrievals_nodata;
52147+extern atomic_unchecked_t fscache_n_retrievals_nobufs;
52148+extern atomic_unchecked_t fscache_n_retrievals_intr;
52149+extern atomic_unchecked_t fscache_n_retrievals_nomem;
52150+extern atomic_unchecked_t fscache_n_retrievals_object_dead;
52151+extern atomic_unchecked_t fscache_n_retrieval_ops;
52152+extern atomic_unchecked_t fscache_n_retrieval_op_waits;
52153
52154-extern atomic_t fscache_n_stores;
52155-extern atomic_t fscache_n_stores_ok;
52156-extern atomic_t fscache_n_stores_again;
52157-extern atomic_t fscache_n_stores_nobufs;
52158-extern atomic_t fscache_n_stores_oom;
52159-extern atomic_t fscache_n_store_ops;
52160-extern atomic_t fscache_n_store_calls;
52161-extern atomic_t fscache_n_store_pages;
52162-extern atomic_t fscache_n_store_radix_deletes;
52163-extern atomic_t fscache_n_store_pages_over_limit;
52164+extern atomic_unchecked_t fscache_n_stores;
52165+extern atomic_unchecked_t fscache_n_stores_ok;
52166+extern atomic_unchecked_t fscache_n_stores_again;
52167+extern atomic_unchecked_t fscache_n_stores_nobufs;
52168+extern atomic_unchecked_t fscache_n_stores_oom;
52169+extern atomic_unchecked_t fscache_n_store_ops;
52170+extern atomic_unchecked_t fscache_n_store_calls;
52171+extern atomic_unchecked_t fscache_n_store_pages;
52172+extern atomic_unchecked_t fscache_n_store_radix_deletes;
52173+extern atomic_unchecked_t fscache_n_store_pages_over_limit;
52174
52175-extern atomic_t fscache_n_store_vmscan_not_storing;
52176-extern atomic_t fscache_n_store_vmscan_gone;
52177-extern atomic_t fscache_n_store_vmscan_busy;
52178-extern atomic_t fscache_n_store_vmscan_cancelled;
52179-extern atomic_t fscache_n_store_vmscan_wait;
52180+extern atomic_unchecked_t fscache_n_store_vmscan_not_storing;
52181+extern atomic_unchecked_t fscache_n_store_vmscan_gone;
52182+extern atomic_unchecked_t fscache_n_store_vmscan_busy;
52183+extern atomic_unchecked_t fscache_n_store_vmscan_cancelled;
52184+extern atomic_unchecked_t fscache_n_store_vmscan_wait;
52185
52186-extern atomic_t fscache_n_marks;
52187-extern atomic_t fscache_n_uncaches;
52188+extern atomic_unchecked_t fscache_n_marks;
52189+extern atomic_unchecked_t fscache_n_uncaches;
52190
52191-extern atomic_t fscache_n_acquires;
52192-extern atomic_t fscache_n_acquires_null;
52193-extern atomic_t fscache_n_acquires_no_cache;
52194-extern atomic_t fscache_n_acquires_ok;
52195-extern atomic_t fscache_n_acquires_nobufs;
52196-extern atomic_t fscache_n_acquires_oom;
52197+extern atomic_unchecked_t fscache_n_acquires;
52198+extern atomic_unchecked_t fscache_n_acquires_null;
52199+extern atomic_unchecked_t fscache_n_acquires_no_cache;
52200+extern atomic_unchecked_t fscache_n_acquires_ok;
52201+extern atomic_unchecked_t fscache_n_acquires_nobufs;
52202+extern atomic_unchecked_t fscache_n_acquires_oom;
52203
52204-extern atomic_t fscache_n_invalidates;
52205-extern atomic_t fscache_n_invalidates_run;
52206+extern atomic_unchecked_t fscache_n_invalidates;
52207+extern atomic_unchecked_t fscache_n_invalidates_run;
52208
52209-extern atomic_t fscache_n_updates;
52210-extern atomic_t fscache_n_updates_null;
52211-extern atomic_t fscache_n_updates_run;
52212+extern atomic_unchecked_t fscache_n_updates;
52213+extern atomic_unchecked_t fscache_n_updates_null;
52214+extern atomic_unchecked_t fscache_n_updates_run;
52215
52216-extern atomic_t fscache_n_relinquishes;
52217-extern atomic_t fscache_n_relinquishes_null;
52218-extern atomic_t fscache_n_relinquishes_waitcrt;
52219-extern atomic_t fscache_n_relinquishes_retire;
52220+extern atomic_unchecked_t fscache_n_relinquishes;
52221+extern atomic_unchecked_t fscache_n_relinquishes_null;
52222+extern atomic_unchecked_t fscache_n_relinquishes_waitcrt;
52223+extern atomic_unchecked_t fscache_n_relinquishes_retire;
52224
52225-extern atomic_t fscache_n_cookie_index;
52226-extern atomic_t fscache_n_cookie_data;
52227-extern atomic_t fscache_n_cookie_special;
52228+extern atomic_unchecked_t fscache_n_cookie_index;
52229+extern atomic_unchecked_t fscache_n_cookie_data;
52230+extern atomic_unchecked_t fscache_n_cookie_special;
52231
52232-extern atomic_t fscache_n_object_alloc;
52233-extern atomic_t fscache_n_object_no_alloc;
52234-extern atomic_t fscache_n_object_lookups;
52235-extern atomic_t fscache_n_object_lookups_negative;
52236-extern atomic_t fscache_n_object_lookups_positive;
52237-extern atomic_t fscache_n_object_lookups_timed_out;
52238-extern atomic_t fscache_n_object_created;
52239-extern atomic_t fscache_n_object_avail;
52240-extern atomic_t fscache_n_object_dead;
52241+extern atomic_unchecked_t fscache_n_object_alloc;
52242+extern atomic_unchecked_t fscache_n_object_no_alloc;
52243+extern atomic_unchecked_t fscache_n_object_lookups;
52244+extern atomic_unchecked_t fscache_n_object_lookups_negative;
52245+extern atomic_unchecked_t fscache_n_object_lookups_positive;
52246+extern atomic_unchecked_t fscache_n_object_lookups_timed_out;
52247+extern atomic_unchecked_t fscache_n_object_created;
52248+extern atomic_unchecked_t fscache_n_object_avail;
52249+extern atomic_unchecked_t fscache_n_object_dead;
52250
52251-extern atomic_t fscache_n_checkaux_none;
52252-extern atomic_t fscache_n_checkaux_okay;
52253-extern atomic_t fscache_n_checkaux_update;
52254-extern atomic_t fscache_n_checkaux_obsolete;
52255+extern atomic_unchecked_t fscache_n_checkaux_none;
52256+extern atomic_unchecked_t fscache_n_checkaux_okay;
52257+extern atomic_unchecked_t fscache_n_checkaux_update;
52258+extern atomic_unchecked_t fscache_n_checkaux_obsolete;
52259
52260 extern atomic_t fscache_n_cop_alloc_object;
52261 extern atomic_t fscache_n_cop_lookup_object;
52262@@ -267,6 +267,11 @@ static inline void fscache_stat(atomic_t *stat)
52263 atomic_inc(stat);
52264 }
52265
52266+static inline void fscache_stat_unchecked(atomic_unchecked_t *stat)
52267+{
52268+ atomic_inc_unchecked(stat);
52269+}
52270+
52271 static inline void fscache_stat_d(atomic_t *stat)
52272 {
52273 atomic_dec(stat);
52274@@ -279,6 +284,7 @@ extern const struct file_operations fscache_stats_fops;
52275
52276 #define __fscache_stat(stat) (NULL)
52277 #define fscache_stat(stat) do {} while (0)
52278+#define fscache_stat_unchecked(stat) do {} while (0)
52279 #define fscache_stat_d(stat) do {} while (0)
52280 #endif
52281
52282diff --git a/fs/fscache/object.c b/fs/fscache/object.c
52283index 50d41c1..10ee117 100644
52284--- a/fs/fscache/object.c
52285+++ b/fs/fscache/object.c
52286@@ -143,7 +143,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52287 /* Invalidate an object on disk */
52288 case FSCACHE_OBJECT_INVALIDATING:
52289 clear_bit(FSCACHE_OBJECT_EV_INVALIDATE, &object->events);
52290- fscache_stat(&fscache_n_invalidates_run);
52291+ fscache_stat_unchecked(&fscache_n_invalidates_run);
52292 fscache_stat(&fscache_n_cop_invalidate_object);
52293 fscache_invalidate_object(object);
52294 fscache_stat_d(&fscache_n_cop_invalidate_object);
52295@@ -153,7 +153,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52296 /* update the object metadata on disk */
52297 case FSCACHE_OBJECT_UPDATING:
52298 clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
52299- fscache_stat(&fscache_n_updates_run);
52300+ fscache_stat_unchecked(&fscache_n_updates_run);
52301 fscache_stat(&fscache_n_cop_update_object);
52302 object->cache->ops->update_object(object);
52303 fscache_stat_d(&fscache_n_cop_update_object);
52304@@ -242,7 +242,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52305 spin_lock(&object->lock);
52306 object->state = FSCACHE_OBJECT_DEAD;
52307 spin_unlock(&object->lock);
52308- fscache_stat(&fscache_n_object_dead);
52309+ fscache_stat_unchecked(&fscache_n_object_dead);
52310 goto terminal_transit;
52311
52312 /* handle the parent cache of this object being withdrawn from
52313@@ -257,7 +257,7 @@ static void fscache_object_state_machine(struct fscache_object *object)
52314 spin_lock(&object->lock);
52315 object->state = FSCACHE_OBJECT_DEAD;
52316 spin_unlock(&object->lock);
52317- fscache_stat(&fscache_n_object_dead);
52318+ fscache_stat_unchecked(&fscache_n_object_dead);
52319 goto terminal_transit;
52320
52321 /* complain about the object being woken up once it is
52322@@ -495,7 +495,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52323 parent->cookie->def->name, cookie->def->name,
52324 object->cache->tag->name);
52325
52326- fscache_stat(&fscache_n_object_lookups);
52327+ fscache_stat_unchecked(&fscache_n_object_lookups);
52328 fscache_stat(&fscache_n_cop_lookup_object);
52329 ret = object->cache->ops->lookup_object(object);
52330 fscache_stat_d(&fscache_n_cop_lookup_object);
52331@@ -506,7 +506,7 @@ static void fscache_lookup_object(struct fscache_object *object)
52332 if (ret == -ETIMEDOUT) {
52333 /* probably stuck behind another object, so move this one to
52334 * the back of the queue */
52335- fscache_stat(&fscache_n_object_lookups_timed_out);
52336+ fscache_stat_unchecked(&fscache_n_object_lookups_timed_out);
52337 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52338 }
52339
52340@@ -529,7 +529,7 @@ void fscache_object_lookup_negative(struct fscache_object *object)
52341
52342 spin_lock(&object->lock);
52343 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52344- fscache_stat(&fscache_n_object_lookups_negative);
52345+ fscache_stat_unchecked(&fscache_n_object_lookups_negative);
52346
52347 /* transit here to allow write requests to begin stacking up
52348 * and read requests to begin returning ENODATA */
52349@@ -575,7 +575,7 @@ void fscache_obtained_object(struct fscache_object *object)
52350 * result, in which case there may be data available */
52351 spin_lock(&object->lock);
52352 if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
52353- fscache_stat(&fscache_n_object_lookups_positive);
52354+ fscache_stat_unchecked(&fscache_n_object_lookups_positive);
52355
52356 clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
52357
52358@@ -589,7 +589,7 @@ void fscache_obtained_object(struct fscache_object *object)
52359 set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
52360 } else {
52361 ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
52362- fscache_stat(&fscache_n_object_created);
52363+ fscache_stat_unchecked(&fscache_n_object_created);
52364
52365 object->state = FSCACHE_OBJECT_AVAILABLE;
52366 spin_unlock(&object->lock);
52367@@ -634,7 +634,7 @@ static void fscache_object_available(struct fscache_object *object)
52368 fscache_enqueue_dependents(object);
52369
52370 fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
52371- fscache_stat(&fscache_n_object_avail);
52372+ fscache_stat_unchecked(&fscache_n_object_avail);
52373
52374 _leave("");
52375 }
52376@@ -894,7 +894,7 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52377 enum fscache_checkaux result;
52378
52379 if (!object->cookie->def->check_aux) {
52380- fscache_stat(&fscache_n_checkaux_none);
52381+ fscache_stat_unchecked(&fscache_n_checkaux_none);
52382 return FSCACHE_CHECKAUX_OKAY;
52383 }
52384
52385@@ -903,17 +903,17 @@ enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
52386 switch (result) {
52387 /* entry okay as is */
52388 case FSCACHE_CHECKAUX_OKAY:
52389- fscache_stat(&fscache_n_checkaux_okay);
52390+ fscache_stat_unchecked(&fscache_n_checkaux_okay);
52391 break;
52392
52393 /* entry requires update */
52394 case FSCACHE_CHECKAUX_NEEDS_UPDATE:
52395- fscache_stat(&fscache_n_checkaux_update);
52396+ fscache_stat_unchecked(&fscache_n_checkaux_update);
52397 break;
52398
52399 /* entry requires deletion */
52400 case FSCACHE_CHECKAUX_OBSOLETE:
52401- fscache_stat(&fscache_n_checkaux_obsolete);
52402+ fscache_stat_unchecked(&fscache_n_checkaux_obsolete);
52403 break;
52404
52405 default:
52406diff --git a/fs/fscache/operation.c b/fs/fscache/operation.c
52407index 762a9ec..2023284 100644
52408--- a/fs/fscache/operation.c
52409+++ b/fs/fscache/operation.c
52410@@ -17,7 +17,7 @@
52411 #include <linux/slab.h>
52412 #include "internal.h"
52413
52414-atomic_t fscache_op_debug_id;
52415+atomic_unchecked_t fscache_op_debug_id;
52416 EXPORT_SYMBOL(fscache_op_debug_id);
52417
52418 /**
52419@@ -39,7 +39,7 @@ void fscache_enqueue_operation(struct fscache_operation *op)
52420 ASSERTCMP(atomic_read(&op->usage), >, 0);
52421 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_IN_PROGRESS);
52422
52423- fscache_stat(&fscache_n_op_enqueue);
52424+ fscache_stat_unchecked(&fscache_n_op_enqueue);
52425 switch (op->flags & FSCACHE_OP_TYPE) {
52426 case FSCACHE_OP_ASYNC:
52427 _debug("queue async");
52428@@ -73,7 +73,7 @@ static void fscache_run_op(struct fscache_object *object,
52429 wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
52430 if (op->processor)
52431 fscache_enqueue_operation(op);
52432- fscache_stat(&fscache_n_op_run);
52433+ fscache_stat_unchecked(&fscache_n_op_run);
52434 }
52435
52436 /*
52437@@ -105,11 +105,11 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52438 if (object->n_in_progress > 0) {
52439 atomic_inc(&op->usage);
52440 list_add_tail(&op->pend_link, &object->pending_ops);
52441- fscache_stat(&fscache_n_op_pend);
52442+ fscache_stat_unchecked(&fscache_n_op_pend);
52443 } else if (!list_empty(&object->pending_ops)) {
52444 atomic_inc(&op->usage);
52445 list_add_tail(&op->pend_link, &object->pending_ops);
52446- fscache_stat(&fscache_n_op_pend);
52447+ fscache_stat_unchecked(&fscache_n_op_pend);
52448 fscache_start_operations(object);
52449 } else {
52450 ASSERTCMP(object->n_in_progress, ==, 0);
52451@@ -125,7 +125,7 @@ int fscache_submit_exclusive_op(struct fscache_object *object,
52452 object->n_exclusive++; /* reads and writes must wait */
52453 atomic_inc(&op->usage);
52454 list_add_tail(&op->pend_link, &object->pending_ops);
52455- fscache_stat(&fscache_n_op_pend);
52456+ fscache_stat_unchecked(&fscache_n_op_pend);
52457 ret = 0;
52458 } else {
52459 /* If we're in any other state, there must have been an I/O
52460@@ -215,11 +215,11 @@ int fscache_submit_op(struct fscache_object *object,
52461 if (object->n_exclusive > 0) {
52462 atomic_inc(&op->usage);
52463 list_add_tail(&op->pend_link, &object->pending_ops);
52464- fscache_stat(&fscache_n_op_pend);
52465+ fscache_stat_unchecked(&fscache_n_op_pend);
52466 } else if (!list_empty(&object->pending_ops)) {
52467 atomic_inc(&op->usage);
52468 list_add_tail(&op->pend_link, &object->pending_ops);
52469- fscache_stat(&fscache_n_op_pend);
52470+ fscache_stat_unchecked(&fscache_n_op_pend);
52471 fscache_start_operations(object);
52472 } else {
52473 ASSERTCMP(object->n_exclusive, ==, 0);
52474@@ -231,12 +231,12 @@ int fscache_submit_op(struct fscache_object *object,
52475 object->n_ops++;
52476 atomic_inc(&op->usage);
52477 list_add_tail(&op->pend_link, &object->pending_ops);
52478- fscache_stat(&fscache_n_op_pend);
52479+ fscache_stat_unchecked(&fscache_n_op_pend);
52480 ret = 0;
52481 } else if (object->state == FSCACHE_OBJECT_DYING ||
52482 object->state == FSCACHE_OBJECT_LC_DYING ||
52483 object->state == FSCACHE_OBJECT_WITHDRAWING) {
52484- fscache_stat(&fscache_n_op_rejected);
52485+ fscache_stat_unchecked(&fscache_n_op_rejected);
52486 op->state = FSCACHE_OP_ST_CANCELLED;
52487 ret = -ENOBUFS;
52488 } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
52489@@ -315,7 +315,7 @@ int fscache_cancel_op(struct fscache_operation *op,
52490 ret = -EBUSY;
52491 if (op->state == FSCACHE_OP_ST_PENDING) {
52492 ASSERT(!list_empty(&op->pend_link));
52493- fscache_stat(&fscache_n_op_cancelled);
52494+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52495 list_del_init(&op->pend_link);
52496 if (do_cancel)
52497 do_cancel(op);
52498@@ -347,7 +347,7 @@ void fscache_cancel_all_ops(struct fscache_object *object)
52499 while (!list_empty(&object->pending_ops)) {
52500 op = list_entry(object->pending_ops.next,
52501 struct fscache_operation, pend_link);
52502- fscache_stat(&fscache_n_op_cancelled);
52503+ fscache_stat_unchecked(&fscache_n_op_cancelled);
52504 list_del_init(&op->pend_link);
52505
52506 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_PENDING);
52507@@ -419,7 +419,7 @@ void fscache_put_operation(struct fscache_operation *op)
52508 op->state, ==, FSCACHE_OP_ST_CANCELLED);
52509 op->state = FSCACHE_OP_ST_DEAD;
52510
52511- fscache_stat(&fscache_n_op_release);
52512+ fscache_stat_unchecked(&fscache_n_op_release);
52513
52514 if (op->release) {
52515 op->release(op);
52516@@ -442,7 +442,7 @@ void fscache_put_operation(struct fscache_operation *op)
52517 * lock, and defer it otherwise */
52518 if (!spin_trylock(&object->lock)) {
52519 _debug("defer put");
52520- fscache_stat(&fscache_n_op_deferred_release);
52521+ fscache_stat_unchecked(&fscache_n_op_deferred_release);
52522
52523 cache = object->cache;
52524 spin_lock(&cache->op_gc_list_lock);
52525@@ -495,7 +495,7 @@ void fscache_operation_gc(struct work_struct *work)
52526
52527 _debug("GC DEFERRED REL OBJ%x OP%x",
52528 object->debug_id, op->debug_id);
52529- fscache_stat(&fscache_n_op_gc);
52530+ fscache_stat_unchecked(&fscache_n_op_gc);
52531
52532 ASSERTCMP(atomic_read(&op->usage), ==, 0);
52533 ASSERTCMP(op->state, ==, FSCACHE_OP_ST_DEAD);
52534diff --git a/fs/fscache/page.c b/fs/fscache/page.c
52535index ff000e5..c44ec6d 100644
52536--- a/fs/fscache/page.c
52537+++ b/fs/fscache/page.c
52538@@ -61,7 +61,7 @@ try_again:
52539 val = radix_tree_lookup(&cookie->stores, page->index);
52540 if (!val) {
52541 rcu_read_unlock();
52542- fscache_stat(&fscache_n_store_vmscan_not_storing);
52543+ fscache_stat_unchecked(&fscache_n_store_vmscan_not_storing);
52544 __fscache_uncache_page(cookie, page);
52545 return true;
52546 }
52547@@ -91,11 +91,11 @@ try_again:
52548 spin_unlock(&cookie->stores_lock);
52549
52550 if (xpage) {
52551- fscache_stat(&fscache_n_store_vmscan_cancelled);
52552- fscache_stat(&fscache_n_store_radix_deletes);
52553+ fscache_stat_unchecked(&fscache_n_store_vmscan_cancelled);
52554+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52555 ASSERTCMP(xpage, ==, page);
52556 } else {
52557- fscache_stat(&fscache_n_store_vmscan_gone);
52558+ fscache_stat_unchecked(&fscache_n_store_vmscan_gone);
52559 }
52560
52561 wake_up_bit(&cookie->flags, 0);
52562@@ -110,11 +110,11 @@ page_busy:
52563 * sleeping on memory allocation, so we may need to impose a timeout
52564 * too. */
52565 if (!(gfp & __GFP_WAIT)) {
52566- fscache_stat(&fscache_n_store_vmscan_busy);
52567+ fscache_stat_unchecked(&fscache_n_store_vmscan_busy);
52568 return false;
52569 }
52570
52571- fscache_stat(&fscache_n_store_vmscan_wait);
52572+ fscache_stat_unchecked(&fscache_n_store_vmscan_wait);
52573 __fscache_wait_on_page_write(cookie, page);
52574 gfp &= ~__GFP_WAIT;
52575 goto try_again;
52576@@ -140,7 +140,7 @@ static void fscache_end_page_write(struct fscache_object *object,
52577 FSCACHE_COOKIE_STORING_TAG);
52578 if (!radix_tree_tag_get(&cookie->stores, page->index,
52579 FSCACHE_COOKIE_PENDING_TAG)) {
52580- fscache_stat(&fscache_n_store_radix_deletes);
52581+ fscache_stat_unchecked(&fscache_n_store_radix_deletes);
52582 xpage = radix_tree_delete(&cookie->stores, page->index);
52583 }
52584 spin_unlock(&cookie->stores_lock);
52585@@ -161,7 +161,7 @@ static void fscache_attr_changed_op(struct fscache_operation *op)
52586
52587 _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
52588
52589- fscache_stat(&fscache_n_attr_changed_calls);
52590+ fscache_stat_unchecked(&fscache_n_attr_changed_calls);
52591
52592 if (fscache_object_is_active(object)) {
52593 fscache_stat(&fscache_n_cop_attr_changed);
52594@@ -187,11 +187,11 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52595
52596 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52597
52598- fscache_stat(&fscache_n_attr_changed);
52599+ fscache_stat_unchecked(&fscache_n_attr_changed);
52600
52601 op = kzalloc(sizeof(*op), GFP_KERNEL);
52602 if (!op) {
52603- fscache_stat(&fscache_n_attr_changed_nomem);
52604+ fscache_stat_unchecked(&fscache_n_attr_changed_nomem);
52605 _leave(" = -ENOMEM");
52606 return -ENOMEM;
52607 }
52608@@ -209,7 +209,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52609 if (fscache_submit_exclusive_op(object, op) < 0)
52610 goto nobufs;
52611 spin_unlock(&cookie->lock);
52612- fscache_stat(&fscache_n_attr_changed_ok);
52613+ fscache_stat_unchecked(&fscache_n_attr_changed_ok);
52614 fscache_put_operation(op);
52615 _leave(" = 0");
52616 return 0;
52617@@ -217,7 +217,7 @@ int __fscache_attr_changed(struct fscache_cookie *cookie)
52618 nobufs:
52619 spin_unlock(&cookie->lock);
52620 kfree(op);
52621- fscache_stat(&fscache_n_attr_changed_nobufs);
52622+ fscache_stat_unchecked(&fscache_n_attr_changed_nobufs);
52623 _leave(" = %d", -ENOBUFS);
52624 return -ENOBUFS;
52625 }
52626@@ -255,7 +255,7 @@ static struct fscache_retrieval *fscache_alloc_retrieval(
52627 /* allocate a retrieval operation and attempt to submit it */
52628 op = kzalloc(sizeof(*op), GFP_NOIO);
52629 if (!op) {
52630- fscache_stat(&fscache_n_retrievals_nomem);
52631+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52632 return NULL;
52633 }
52634
52635@@ -283,13 +283,13 @@ static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
52636 return 0;
52637 }
52638
52639- fscache_stat(&fscache_n_retrievals_wait);
52640+ fscache_stat_unchecked(&fscache_n_retrievals_wait);
52641
52642 jif = jiffies;
52643 if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
52644 fscache_wait_bit_interruptible,
52645 TASK_INTERRUPTIBLE) != 0) {
52646- fscache_stat(&fscache_n_retrievals_intr);
52647+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52648 _leave(" = -ERESTARTSYS");
52649 return -ERESTARTSYS;
52650 }
52651@@ -318,8 +318,8 @@ static void fscache_do_cancel_retrieval(struct fscache_operation *_op)
52652 */
52653 static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52654 struct fscache_retrieval *op,
52655- atomic_t *stat_op_waits,
52656- atomic_t *stat_object_dead)
52657+ atomic_unchecked_t *stat_op_waits,
52658+ atomic_unchecked_t *stat_object_dead)
52659 {
52660 int ret;
52661
52662@@ -327,7 +327,7 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52663 goto check_if_dead;
52664
52665 _debug(">>> WT");
52666- fscache_stat(stat_op_waits);
52667+ fscache_stat_unchecked(stat_op_waits);
52668 if (wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
52669 fscache_wait_bit_interruptible,
52670 TASK_INTERRUPTIBLE) != 0) {
52671@@ -344,14 +344,14 @@ static int fscache_wait_for_retrieval_activation(struct fscache_object *object,
52672
52673 check_if_dead:
52674 if (op->op.state == FSCACHE_OP_ST_CANCELLED) {
52675- fscache_stat(stat_object_dead);
52676+ fscache_stat_unchecked(stat_object_dead);
52677 _leave(" = -ENOBUFS [cancelled]");
52678 return -ENOBUFS;
52679 }
52680 if (unlikely(fscache_object_is_dead(object))) {
52681 pr_err("%s() = -ENOBUFS [obj dead %d]\n", __func__, op->op.state);
52682 fscache_cancel_op(&op->op, fscache_do_cancel_retrieval);
52683- fscache_stat(stat_object_dead);
52684+ fscache_stat_unchecked(stat_object_dead);
52685 return -ENOBUFS;
52686 }
52687 return 0;
52688@@ -378,7 +378,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52689
52690 _enter("%p,%p,,,", cookie, page);
52691
52692- fscache_stat(&fscache_n_retrievals);
52693+ fscache_stat_unchecked(&fscache_n_retrievals);
52694
52695 if (hlist_empty(&cookie->backing_objects))
52696 goto nobufs;
52697@@ -417,7 +417,7 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52698 goto nobufs_unlock_dec;
52699 spin_unlock(&cookie->lock);
52700
52701- fscache_stat(&fscache_n_retrieval_ops);
52702+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52703
52704 /* pin the netfs read context in case we need to do the actual netfs
52705 * read because we've encountered a cache read failure */
52706@@ -447,15 +447,15 @@ int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
52707
52708 error:
52709 if (ret == -ENOMEM)
52710- fscache_stat(&fscache_n_retrievals_nomem);
52711+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52712 else if (ret == -ERESTARTSYS)
52713- fscache_stat(&fscache_n_retrievals_intr);
52714+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52715 else if (ret == -ENODATA)
52716- fscache_stat(&fscache_n_retrievals_nodata);
52717+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52718 else if (ret < 0)
52719- fscache_stat(&fscache_n_retrievals_nobufs);
52720+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52721 else
52722- fscache_stat(&fscache_n_retrievals_ok);
52723+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52724
52725 fscache_put_retrieval(op);
52726 _leave(" = %d", ret);
52727@@ -467,7 +467,7 @@ nobufs_unlock:
52728 spin_unlock(&cookie->lock);
52729 kfree(op);
52730 nobufs:
52731- fscache_stat(&fscache_n_retrievals_nobufs);
52732+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52733 _leave(" = -ENOBUFS");
52734 return -ENOBUFS;
52735 }
52736@@ -505,7 +505,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52737
52738 _enter("%p,,%d,,,", cookie, *nr_pages);
52739
52740- fscache_stat(&fscache_n_retrievals);
52741+ fscache_stat_unchecked(&fscache_n_retrievals);
52742
52743 if (hlist_empty(&cookie->backing_objects))
52744 goto nobufs;
52745@@ -541,7 +541,7 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52746 goto nobufs_unlock_dec;
52747 spin_unlock(&cookie->lock);
52748
52749- fscache_stat(&fscache_n_retrieval_ops);
52750+ fscache_stat_unchecked(&fscache_n_retrieval_ops);
52751
52752 /* pin the netfs read context in case we need to do the actual netfs
52753 * read because we've encountered a cache read failure */
52754@@ -571,15 +571,15 @@ int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
52755
52756 error:
52757 if (ret == -ENOMEM)
52758- fscache_stat(&fscache_n_retrievals_nomem);
52759+ fscache_stat_unchecked(&fscache_n_retrievals_nomem);
52760 else if (ret == -ERESTARTSYS)
52761- fscache_stat(&fscache_n_retrievals_intr);
52762+ fscache_stat_unchecked(&fscache_n_retrievals_intr);
52763 else if (ret == -ENODATA)
52764- fscache_stat(&fscache_n_retrievals_nodata);
52765+ fscache_stat_unchecked(&fscache_n_retrievals_nodata);
52766 else if (ret < 0)
52767- fscache_stat(&fscache_n_retrievals_nobufs);
52768+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52769 else
52770- fscache_stat(&fscache_n_retrievals_ok);
52771+ fscache_stat_unchecked(&fscache_n_retrievals_ok);
52772
52773 fscache_put_retrieval(op);
52774 _leave(" = %d", ret);
52775@@ -591,7 +591,7 @@ nobufs_unlock:
52776 spin_unlock(&cookie->lock);
52777 kfree(op);
52778 nobufs:
52779- fscache_stat(&fscache_n_retrievals_nobufs);
52780+ fscache_stat_unchecked(&fscache_n_retrievals_nobufs);
52781 _leave(" = -ENOBUFS");
52782 return -ENOBUFS;
52783 }
52784@@ -615,7 +615,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52785
52786 _enter("%p,%p,,,", cookie, page);
52787
52788- fscache_stat(&fscache_n_allocs);
52789+ fscache_stat_unchecked(&fscache_n_allocs);
52790
52791 if (hlist_empty(&cookie->backing_objects))
52792 goto nobufs;
52793@@ -647,7 +647,7 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52794 goto nobufs_unlock;
52795 spin_unlock(&cookie->lock);
52796
52797- fscache_stat(&fscache_n_alloc_ops);
52798+ fscache_stat_unchecked(&fscache_n_alloc_ops);
52799
52800 ret = fscache_wait_for_retrieval_activation(
52801 object, op,
52802@@ -663,11 +663,11 @@ int __fscache_alloc_page(struct fscache_cookie *cookie,
52803
52804 error:
52805 if (ret == -ERESTARTSYS)
52806- fscache_stat(&fscache_n_allocs_intr);
52807+ fscache_stat_unchecked(&fscache_n_allocs_intr);
52808 else if (ret < 0)
52809- fscache_stat(&fscache_n_allocs_nobufs);
52810+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52811 else
52812- fscache_stat(&fscache_n_allocs_ok);
52813+ fscache_stat_unchecked(&fscache_n_allocs_ok);
52814
52815 fscache_put_retrieval(op);
52816 _leave(" = %d", ret);
52817@@ -677,7 +677,7 @@ nobufs_unlock:
52818 spin_unlock(&cookie->lock);
52819 kfree(op);
52820 nobufs:
52821- fscache_stat(&fscache_n_allocs_nobufs);
52822+ fscache_stat_unchecked(&fscache_n_allocs_nobufs);
52823 _leave(" = -ENOBUFS");
52824 return -ENOBUFS;
52825 }
52826@@ -736,7 +736,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52827
52828 spin_lock(&cookie->stores_lock);
52829
52830- fscache_stat(&fscache_n_store_calls);
52831+ fscache_stat_unchecked(&fscache_n_store_calls);
52832
52833 /* find a page to store */
52834 page = NULL;
52835@@ -747,7 +747,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52836 page = results[0];
52837 _debug("gang %d [%lx]", n, page->index);
52838 if (page->index > op->store_limit) {
52839- fscache_stat(&fscache_n_store_pages_over_limit);
52840+ fscache_stat_unchecked(&fscache_n_store_pages_over_limit);
52841 goto superseded;
52842 }
52843
52844@@ -759,7 +759,7 @@ static void fscache_write_op(struct fscache_operation *_op)
52845 spin_unlock(&cookie->stores_lock);
52846 spin_unlock(&object->lock);
52847
52848- fscache_stat(&fscache_n_store_pages);
52849+ fscache_stat_unchecked(&fscache_n_store_pages);
52850 fscache_stat(&fscache_n_cop_write_page);
52851 ret = object->cache->ops->write_page(op, page);
52852 fscache_stat_d(&fscache_n_cop_write_page);
52853@@ -860,7 +860,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52854 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52855 ASSERT(PageFsCache(page));
52856
52857- fscache_stat(&fscache_n_stores);
52858+ fscache_stat_unchecked(&fscache_n_stores);
52859
52860 if (test_bit(FSCACHE_COOKIE_INVALIDATING, &cookie->flags)) {
52861 _leave(" = -ENOBUFS [invalidating]");
52862@@ -916,7 +916,7 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52863 spin_unlock(&cookie->stores_lock);
52864 spin_unlock(&object->lock);
52865
52866- op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
52867+ op->op.debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
52868 op->store_limit = object->store_limit;
52869
52870 if (fscache_submit_op(object, &op->op) < 0)
52871@@ -924,8 +924,8 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52872
52873 spin_unlock(&cookie->lock);
52874 radix_tree_preload_end();
52875- fscache_stat(&fscache_n_store_ops);
52876- fscache_stat(&fscache_n_stores_ok);
52877+ fscache_stat_unchecked(&fscache_n_store_ops);
52878+ fscache_stat_unchecked(&fscache_n_stores_ok);
52879
52880 /* the work queue now carries its own ref on the object */
52881 fscache_put_operation(&op->op);
52882@@ -933,14 +933,14 @@ int __fscache_write_page(struct fscache_cookie *cookie,
52883 return 0;
52884
52885 already_queued:
52886- fscache_stat(&fscache_n_stores_again);
52887+ fscache_stat_unchecked(&fscache_n_stores_again);
52888 already_pending:
52889 spin_unlock(&cookie->stores_lock);
52890 spin_unlock(&object->lock);
52891 spin_unlock(&cookie->lock);
52892 radix_tree_preload_end();
52893 kfree(op);
52894- fscache_stat(&fscache_n_stores_ok);
52895+ fscache_stat_unchecked(&fscache_n_stores_ok);
52896 _leave(" = 0");
52897 return 0;
52898
52899@@ -959,14 +959,14 @@ nobufs:
52900 spin_unlock(&cookie->lock);
52901 radix_tree_preload_end();
52902 kfree(op);
52903- fscache_stat(&fscache_n_stores_nobufs);
52904+ fscache_stat_unchecked(&fscache_n_stores_nobufs);
52905 _leave(" = -ENOBUFS");
52906 return -ENOBUFS;
52907
52908 nomem_free:
52909 kfree(op);
52910 nomem:
52911- fscache_stat(&fscache_n_stores_oom);
52912+ fscache_stat_unchecked(&fscache_n_stores_oom);
52913 _leave(" = -ENOMEM");
52914 return -ENOMEM;
52915 }
52916@@ -984,7 +984,7 @@ void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
52917 ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
52918 ASSERTCMP(page, !=, NULL);
52919
52920- fscache_stat(&fscache_n_uncaches);
52921+ fscache_stat_unchecked(&fscache_n_uncaches);
52922
52923 /* cache withdrawal may beat us to it */
52924 if (!PageFsCache(page))
52925@@ -1035,7 +1035,7 @@ void fscache_mark_page_cached(struct fscache_retrieval *op, struct page *page)
52926 struct fscache_cookie *cookie = op->op.object->cookie;
52927
52928 #ifdef CONFIG_FSCACHE_STATS
52929- atomic_inc(&fscache_n_marks);
52930+ atomic_inc_unchecked(&fscache_n_marks);
52931 #endif
52932
52933 _debug("- mark %p{%lx}", page, page->index);
52934diff --git a/fs/fscache/stats.c b/fs/fscache/stats.c
52935index 40d13c7..ddf52b9 100644
52936--- a/fs/fscache/stats.c
52937+++ b/fs/fscache/stats.c
52938@@ -18,99 +18,99 @@
52939 /*
52940 * operation counters
52941 */
52942-atomic_t fscache_n_op_pend;
52943-atomic_t fscache_n_op_run;
52944-atomic_t fscache_n_op_enqueue;
52945-atomic_t fscache_n_op_requeue;
52946-atomic_t fscache_n_op_deferred_release;
52947-atomic_t fscache_n_op_release;
52948-atomic_t fscache_n_op_gc;
52949-atomic_t fscache_n_op_cancelled;
52950-atomic_t fscache_n_op_rejected;
52951+atomic_unchecked_t fscache_n_op_pend;
52952+atomic_unchecked_t fscache_n_op_run;
52953+atomic_unchecked_t fscache_n_op_enqueue;
52954+atomic_unchecked_t fscache_n_op_requeue;
52955+atomic_unchecked_t fscache_n_op_deferred_release;
52956+atomic_unchecked_t fscache_n_op_release;
52957+atomic_unchecked_t fscache_n_op_gc;
52958+atomic_unchecked_t fscache_n_op_cancelled;
52959+atomic_unchecked_t fscache_n_op_rejected;
52960
52961-atomic_t fscache_n_attr_changed;
52962-atomic_t fscache_n_attr_changed_ok;
52963-atomic_t fscache_n_attr_changed_nobufs;
52964-atomic_t fscache_n_attr_changed_nomem;
52965-atomic_t fscache_n_attr_changed_calls;
52966+atomic_unchecked_t fscache_n_attr_changed;
52967+atomic_unchecked_t fscache_n_attr_changed_ok;
52968+atomic_unchecked_t fscache_n_attr_changed_nobufs;
52969+atomic_unchecked_t fscache_n_attr_changed_nomem;
52970+atomic_unchecked_t fscache_n_attr_changed_calls;
52971
52972-atomic_t fscache_n_allocs;
52973-atomic_t fscache_n_allocs_ok;
52974-atomic_t fscache_n_allocs_wait;
52975-atomic_t fscache_n_allocs_nobufs;
52976-atomic_t fscache_n_allocs_intr;
52977-atomic_t fscache_n_allocs_object_dead;
52978-atomic_t fscache_n_alloc_ops;
52979-atomic_t fscache_n_alloc_op_waits;
52980+atomic_unchecked_t fscache_n_allocs;
52981+atomic_unchecked_t fscache_n_allocs_ok;
52982+atomic_unchecked_t fscache_n_allocs_wait;
52983+atomic_unchecked_t fscache_n_allocs_nobufs;
52984+atomic_unchecked_t fscache_n_allocs_intr;
52985+atomic_unchecked_t fscache_n_allocs_object_dead;
52986+atomic_unchecked_t fscache_n_alloc_ops;
52987+atomic_unchecked_t fscache_n_alloc_op_waits;
52988
52989-atomic_t fscache_n_retrievals;
52990-atomic_t fscache_n_retrievals_ok;
52991-atomic_t fscache_n_retrievals_wait;
52992-atomic_t fscache_n_retrievals_nodata;
52993-atomic_t fscache_n_retrievals_nobufs;
52994-atomic_t fscache_n_retrievals_intr;
52995-atomic_t fscache_n_retrievals_nomem;
52996-atomic_t fscache_n_retrievals_object_dead;
52997-atomic_t fscache_n_retrieval_ops;
52998-atomic_t fscache_n_retrieval_op_waits;
52999+atomic_unchecked_t fscache_n_retrievals;
53000+atomic_unchecked_t fscache_n_retrievals_ok;
53001+atomic_unchecked_t fscache_n_retrievals_wait;
53002+atomic_unchecked_t fscache_n_retrievals_nodata;
53003+atomic_unchecked_t fscache_n_retrievals_nobufs;
53004+atomic_unchecked_t fscache_n_retrievals_intr;
53005+atomic_unchecked_t fscache_n_retrievals_nomem;
53006+atomic_unchecked_t fscache_n_retrievals_object_dead;
53007+atomic_unchecked_t fscache_n_retrieval_ops;
53008+atomic_unchecked_t fscache_n_retrieval_op_waits;
53009
53010-atomic_t fscache_n_stores;
53011-atomic_t fscache_n_stores_ok;
53012-atomic_t fscache_n_stores_again;
53013-atomic_t fscache_n_stores_nobufs;
53014-atomic_t fscache_n_stores_oom;
53015-atomic_t fscache_n_store_ops;
53016-atomic_t fscache_n_store_calls;
53017-atomic_t fscache_n_store_pages;
53018-atomic_t fscache_n_store_radix_deletes;
53019-atomic_t fscache_n_store_pages_over_limit;
53020+atomic_unchecked_t fscache_n_stores;
53021+atomic_unchecked_t fscache_n_stores_ok;
53022+atomic_unchecked_t fscache_n_stores_again;
53023+atomic_unchecked_t fscache_n_stores_nobufs;
53024+atomic_unchecked_t fscache_n_stores_oom;
53025+atomic_unchecked_t fscache_n_store_ops;
53026+atomic_unchecked_t fscache_n_store_calls;
53027+atomic_unchecked_t fscache_n_store_pages;
53028+atomic_unchecked_t fscache_n_store_radix_deletes;
53029+atomic_unchecked_t fscache_n_store_pages_over_limit;
53030
53031-atomic_t fscache_n_store_vmscan_not_storing;
53032-atomic_t fscache_n_store_vmscan_gone;
53033-atomic_t fscache_n_store_vmscan_busy;
53034-atomic_t fscache_n_store_vmscan_cancelled;
53035-atomic_t fscache_n_store_vmscan_wait;
53036+atomic_unchecked_t fscache_n_store_vmscan_not_storing;
53037+atomic_unchecked_t fscache_n_store_vmscan_gone;
53038+atomic_unchecked_t fscache_n_store_vmscan_busy;
53039+atomic_unchecked_t fscache_n_store_vmscan_cancelled;
53040+atomic_unchecked_t fscache_n_store_vmscan_wait;
53041
53042-atomic_t fscache_n_marks;
53043-atomic_t fscache_n_uncaches;
53044+atomic_unchecked_t fscache_n_marks;
53045+atomic_unchecked_t fscache_n_uncaches;
53046
53047-atomic_t fscache_n_acquires;
53048-atomic_t fscache_n_acquires_null;
53049-atomic_t fscache_n_acquires_no_cache;
53050-atomic_t fscache_n_acquires_ok;
53051-atomic_t fscache_n_acquires_nobufs;
53052-atomic_t fscache_n_acquires_oom;
53053+atomic_unchecked_t fscache_n_acquires;
53054+atomic_unchecked_t fscache_n_acquires_null;
53055+atomic_unchecked_t fscache_n_acquires_no_cache;
53056+atomic_unchecked_t fscache_n_acquires_ok;
53057+atomic_unchecked_t fscache_n_acquires_nobufs;
53058+atomic_unchecked_t fscache_n_acquires_oom;
53059
53060-atomic_t fscache_n_invalidates;
53061-atomic_t fscache_n_invalidates_run;
53062+atomic_unchecked_t fscache_n_invalidates;
53063+atomic_unchecked_t fscache_n_invalidates_run;
53064
53065-atomic_t fscache_n_updates;
53066-atomic_t fscache_n_updates_null;
53067-atomic_t fscache_n_updates_run;
53068+atomic_unchecked_t fscache_n_updates;
53069+atomic_unchecked_t fscache_n_updates_null;
53070+atomic_unchecked_t fscache_n_updates_run;
53071
53072-atomic_t fscache_n_relinquishes;
53073-atomic_t fscache_n_relinquishes_null;
53074-atomic_t fscache_n_relinquishes_waitcrt;
53075-atomic_t fscache_n_relinquishes_retire;
53076+atomic_unchecked_t fscache_n_relinquishes;
53077+atomic_unchecked_t fscache_n_relinquishes_null;
53078+atomic_unchecked_t fscache_n_relinquishes_waitcrt;
53079+atomic_unchecked_t fscache_n_relinquishes_retire;
53080
53081-atomic_t fscache_n_cookie_index;
53082-atomic_t fscache_n_cookie_data;
53083-atomic_t fscache_n_cookie_special;
53084+atomic_unchecked_t fscache_n_cookie_index;
53085+atomic_unchecked_t fscache_n_cookie_data;
53086+atomic_unchecked_t fscache_n_cookie_special;
53087
53088-atomic_t fscache_n_object_alloc;
53089-atomic_t fscache_n_object_no_alloc;
53090-atomic_t fscache_n_object_lookups;
53091-atomic_t fscache_n_object_lookups_negative;
53092-atomic_t fscache_n_object_lookups_positive;
53093-atomic_t fscache_n_object_lookups_timed_out;
53094-atomic_t fscache_n_object_created;
53095-atomic_t fscache_n_object_avail;
53096-atomic_t fscache_n_object_dead;
53097+atomic_unchecked_t fscache_n_object_alloc;
53098+atomic_unchecked_t fscache_n_object_no_alloc;
53099+atomic_unchecked_t fscache_n_object_lookups;
53100+atomic_unchecked_t fscache_n_object_lookups_negative;
53101+atomic_unchecked_t fscache_n_object_lookups_positive;
53102+atomic_unchecked_t fscache_n_object_lookups_timed_out;
53103+atomic_unchecked_t fscache_n_object_created;
53104+atomic_unchecked_t fscache_n_object_avail;
53105+atomic_unchecked_t fscache_n_object_dead;
53106
53107-atomic_t fscache_n_checkaux_none;
53108-atomic_t fscache_n_checkaux_okay;
53109-atomic_t fscache_n_checkaux_update;
53110-atomic_t fscache_n_checkaux_obsolete;
53111+atomic_unchecked_t fscache_n_checkaux_none;
53112+atomic_unchecked_t fscache_n_checkaux_okay;
53113+atomic_unchecked_t fscache_n_checkaux_update;
53114+atomic_unchecked_t fscache_n_checkaux_obsolete;
53115
53116 atomic_t fscache_n_cop_alloc_object;
53117 atomic_t fscache_n_cop_lookup_object;
53118@@ -138,118 +138,118 @@ static int fscache_stats_show(struct seq_file *m, void *v)
53119 seq_puts(m, "FS-Cache statistics\n");
53120
53121 seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
53122- atomic_read(&fscache_n_cookie_index),
53123- atomic_read(&fscache_n_cookie_data),
53124- atomic_read(&fscache_n_cookie_special));
53125+ atomic_read_unchecked(&fscache_n_cookie_index),
53126+ atomic_read_unchecked(&fscache_n_cookie_data),
53127+ atomic_read_unchecked(&fscache_n_cookie_special));
53128
53129 seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
53130- atomic_read(&fscache_n_object_alloc),
53131- atomic_read(&fscache_n_object_no_alloc),
53132- atomic_read(&fscache_n_object_avail),
53133- atomic_read(&fscache_n_object_dead));
53134+ atomic_read_unchecked(&fscache_n_object_alloc),
53135+ atomic_read_unchecked(&fscache_n_object_no_alloc),
53136+ atomic_read_unchecked(&fscache_n_object_avail),
53137+ atomic_read_unchecked(&fscache_n_object_dead));
53138 seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
53139- atomic_read(&fscache_n_checkaux_none),
53140- atomic_read(&fscache_n_checkaux_okay),
53141- atomic_read(&fscache_n_checkaux_update),
53142- atomic_read(&fscache_n_checkaux_obsolete));
53143+ atomic_read_unchecked(&fscache_n_checkaux_none),
53144+ atomic_read_unchecked(&fscache_n_checkaux_okay),
53145+ atomic_read_unchecked(&fscache_n_checkaux_update),
53146+ atomic_read_unchecked(&fscache_n_checkaux_obsolete));
53147
53148 seq_printf(m, "Pages : mrk=%u unc=%u\n",
53149- atomic_read(&fscache_n_marks),
53150- atomic_read(&fscache_n_uncaches));
53151+ atomic_read_unchecked(&fscache_n_marks),
53152+ atomic_read_unchecked(&fscache_n_uncaches));
53153
53154 seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
53155 " oom=%u\n",
53156- atomic_read(&fscache_n_acquires),
53157- atomic_read(&fscache_n_acquires_null),
53158- atomic_read(&fscache_n_acquires_no_cache),
53159- atomic_read(&fscache_n_acquires_ok),
53160- atomic_read(&fscache_n_acquires_nobufs),
53161- atomic_read(&fscache_n_acquires_oom));
53162+ atomic_read_unchecked(&fscache_n_acquires),
53163+ atomic_read_unchecked(&fscache_n_acquires_null),
53164+ atomic_read_unchecked(&fscache_n_acquires_no_cache),
53165+ atomic_read_unchecked(&fscache_n_acquires_ok),
53166+ atomic_read_unchecked(&fscache_n_acquires_nobufs),
53167+ atomic_read_unchecked(&fscache_n_acquires_oom));
53168
53169 seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u tmo=%u\n",
53170- atomic_read(&fscache_n_object_lookups),
53171- atomic_read(&fscache_n_object_lookups_negative),
53172- atomic_read(&fscache_n_object_lookups_positive),
53173- atomic_read(&fscache_n_object_created),
53174- atomic_read(&fscache_n_object_lookups_timed_out));
53175+ atomic_read_unchecked(&fscache_n_object_lookups),
53176+ atomic_read_unchecked(&fscache_n_object_lookups_negative),
53177+ atomic_read_unchecked(&fscache_n_object_lookups_positive),
53178+ atomic_read_unchecked(&fscache_n_object_created),
53179+ atomic_read_unchecked(&fscache_n_object_lookups_timed_out));
53180
53181 seq_printf(m, "Invals : n=%u run=%u\n",
53182- atomic_read(&fscache_n_invalidates),
53183- atomic_read(&fscache_n_invalidates_run));
53184+ atomic_read_unchecked(&fscache_n_invalidates),
53185+ atomic_read_unchecked(&fscache_n_invalidates_run));
53186
53187 seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
53188- atomic_read(&fscache_n_updates),
53189- atomic_read(&fscache_n_updates_null),
53190- atomic_read(&fscache_n_updates_run));
53191+ atomic_read_unchecked(&fscache_n_updates),
53192+ atomic_read_unchecked(&fscache_n_updates_null),
53193+ atomic_read_unchecked(&fscache_n_updates_run));
53194
53195 seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u rtr=%u\n",
53196- atomic_read(&fscache_n_relinquishes),
53197- atomic_read(&fscache_n_relinquishes_null),
53198- atomic_read(&fscache_n_relinquishes_waitcrt),
53199- atomic_read(&fscache_n_relinquishes_retire));
53200+ atomic_read_unchecked(&fscache_n_relinquishes),
53201+ atomic_read_unchecked(&fscache_n_relinquishes_null),
53202+ atomic_read_unchecked(&fscache_n_relinquishes_waitcrt),
53203+ atomic_read_unchecked(&fscache_n_relinquishes_retire));
53204
53205 seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
53206- atomic_read(&fscache_n_attr_changed),
53207- atomic_read(&fscache_n_attr_changed_ok),
53208- atomic_read(&fscache_n_attr_changed_nobufs),
53209- atomic_read(&fscache_n_attr_changed_nomem),
53210- atomic_read(&fscache_n_attr_changed_calls));
53211+ atomic_read_unchecked(&fscache_n_attr_changed),
53212+ atomic_read_unchecked(&fscache_n_attr_changed_ok),
53213+ atomic_read_unchecked(&fscache_n_attr_changed_nobufs),
53214+ atomic_read_unchecked(&fscache_n_attr_changed_nomem),
53215+ atomic_read_unchecked(&fscache_n_attr_changed_calls));
53216
53217 seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u int=%u\n",
53218- atomic_read(&fscache_n_allocs),
53219- atomic_read(&fscache_n_allocs_ok),
53220- atomic_read(&fscache_n_allocs_wait),
53221- atomic_read(&fscache_n_allocs_nobufs),
53222- atomic_read(&fscache_n_allocs_intr));
53223+ atomic_read_unchecked(&fscache_n_allocs),
53224+ atomic_read_unchecked(&fscache_n_allocs_ok),
53225+ atomic_read_unchecked(&fscache_n_allocs_wait),
53226+ atomic_read_unchecked(&fscache_n_allocs_nobufs),
53227+ atomic_read_unchecked(&fscache_n_allocs_intr));
53228 seq_printf(m, "Allocs : ops=%u owt=%u abt=%u\n",
53229- atomic_read(&fscache_n_alloc_ops),
53230- atomic_read(&fscache_n_alloc_op_waits),
53231- atomic_read(&fscache_n_allocs_object_dead));
53232+ atomic_read_unchecked(&fscache_n_alloc_ops),
53233+ atomic_read_unchecked(&fscache_n_alloc_op_waits),
53234+ atomic_read_unchecked(&fscache_n_allocs_object_dead));
53235
53236 seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
53237 " int=%u oom=%u\n",
53238- atomic_read(&fscache_n_retrievals),
53239- atomic_read(&fscache_n_retrievals_ok),
53240- atomic_read(&fscache_n_retrievals_wait),
53241- atomic_read(&fscache_n_retrievals_nodata),
53242- atomic_read(&fscache_n_retrievals_nobufs),
53243- atomic_read(&fscache_n_retrievals_intr),
53244- atomic_read(&fscache_n_retrievals_nomem));
53245+ atomic_read_unchecked(&fscache_n_retrievals),
53246+ atomic_read_unchecked(&fscache_n_retrievals_ok),
53247+ atomic_read_unchecked(&fscache_n_retrievals_wait),
53248+ atomic_read_unchecked(&fscache_n_retrievals_nodata),
53249+ atomic_read_unchecked(&fscache_n_retrievals_nobufs),
53250+ atomic_read_unchecked(&fscache_n_retrievals_intr),
53251+ atomic_read_unchecked(&fscache_n_retrievals_nomem));
53252 seq_printf(m, "Retrvls: ops=%u owt=%u abt=%u\n",
53253- atomic_read(&fscache_n_retrieval_ops),
53254- atomic_read(&fscache_n_retrieval_op_waits),
53255- atomic_read(&fscache_n_retrievals_object_dead));
53256+ atomic_read_unchecked(&fscache_n_retrieval_ops),
53257+ atomic_read_unchecked(&fscache_n_retrieval_op_waits),
53258+ atomic_read_unchecked(&fscache_n_retrievals_object_dead));
53259
53260 seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
53261- atomic_read(&fscache_n_stores),
53262- atomic_read(&fscache_n_stores_ok),
53263- atomic_read(&fscache_n_stores_again),
53264- atomic_read(&fscache_n_stores_nobufs),
53265- atomic_read(&fscache_n_stores_oom));
53266+ atomic_read_unchecked(&fscache_n_stores),
53267+ atomic_read_unchecked(&fscache_n_stores_ok),
53268+ atomic_read_unchecked(&fscache_n_stores_again),
53269+ atomic_read_unchecked(&fscache_n_stores_nobufs),
53270+ atomic_read_unchecked(&fscache_n_stores_oom));
53271 seq_printf(m, "Stores : ops=%u run=%u pgs=%u rxd=%u olm=%u\n",
53272- atomic_read(&fscache_n_store_ops),
53273- atomic_read(&fscache_n_store_calls),
53274- atomic_read(&fscache_n_store_pages),
53275- atomic_read(&fscache_n_store_radix_deletes),
53276- atomic_read(&fscache_n_store_pages_over_limit));
53277+ atomic_read_unchecked(&fscache_n_store_ops),
53278+ atomic_read_unchecked(&fscache_n_store_calls),
53279+ atomic_read_unchecked(&fscache_n_store_pages),
53280+ atomic_read_unchecked(&fscache_n_store_radix_deletes),
53281+ atomic_read_unchecked(&fscache_n_store_pages_over_limit));
53282
53283 seq_printf(m, "VmScan : nos=%u gon=%u bsy=%u can=%u wt=%u\n",
53284- atomic_read(&fscache_n_store_vmscan_not_storing),
53285- atomic_read(&fscache_n_store_vmscan_gone),
53286- atomic_read(&fscache_n_store_vmscan_busy),
53287- atomic_read(&fscache_n_store_vmscan_cancelled),
53288- atomic_read(&fscache_n_store_vmscan_wait));
53289+ atomic_read_unchecked(&fscache_n_store_vmscan_not_storing),
53290+ atomic_read_unchecked(&fscache_n_store_vmscan_gone),
53291+ atomic_read_unchecked(&fscache_n_store_vmscan_busy),
53292+ atomic_read_unchecked(&fscache_n_store_vmscan_cancelled),
53293+ atomic_read_unchecked(&fscache_n_store_vmscan_wait));
53294
53295 seq_printf(m, "Ops : pend=%u run=%u enq=%u can=%u rej=%u\n",
53296- atomic_read(&fscache_n_op_pend),
53297- atomic_read(&fscache_n_op_run),
53298- atomic_read(&fscache_n_op_enqueue),
53299- atomic_read(&fscache_n_op_cancelled),
53300- atomic_read(&fscache_n_op_rejected));
53301+ atomic_read_unchecked(&fscache_n_op_pend),
53302+ atomic_read_unchecked(&fscache_n_op_run),
53303+ atomic_read_unchecked(&fscache_n_op_enqueue),
53304+ atomic_read_unchecked(&fscache_n_op_cancelled),
53305+ atomic_read_unchecked(&fscache_n_op_rejected));
53306 seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
53307- atomic_read(&fscache_n_op_deferred_release),
53308- atomic_read(&fscache_n_op_release),
53309- atomic_read(&fscache_n_op_gc));
53310+ atomic_read_unchecked(&fscache_n_op_deferred_release),
53311+ atomic_read_unchecked(&fscache_n_op_release),
53312+ atomic_read_unchecked(&fscache_n_op_gc));
53313
53314 seq_printf(m, "CacheOp: alo=%d luo=%d luc=%d gro=%d\n",
53315 atomic_read(&fscache_n_cop_alloc_object),
53316diff --git a/fs/fuse/cuse.c b/fs/fuse/cuse.c
53317index aef34b1..59bfd7b 100644
53318--- a/fs/fuse/cuse.c
53319+++ b/fs/fuse/cuse.c
53320@@ -600,10 +600,12 @@ static int __init cuse_init(void)
53321 INIT_LIST_HEAD(&cuse_conntbl[i]);
53322
53323 /* inherit and extend fuse_dev_operations */
53324- cuse_channel_fops = fuse_dev_operations;
53325- cuse_channel_fops.owner = THIS_MODULE;
53326- cuse_channel_fops.open = cuse_channel_open;
53327- cuse_channel_fops.release = cuse_channel_release;
53328+ pax_open_kernel();
53329+ memcpy((void *)&cuse_channel_fops, &fuse_dev_operations, sizeof(fuse_dev_operations));
53330+ *(void **)&cuse_channel_fops.owner = THIS_MODULE;
53331+ *(void **)&cuse_channel_fops.open = cuse_channel_open;
53332+ *(void **)&cuse_channel_fops.release = cuse_channel_release;
53333+ pax_close_kernel();
53334
53335 cuse_class = class_create(THIS_MODULE, "cuse");
53336 if (IS_ERR(cuse_class))
53337diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
53338index 1d55f94..088da65 100644
53339--- a/fs/fuse/dev.c
53340+++ b/fs/fuse/dev.c
53341@@ -1339,7 +1339,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53342 ret = 0;
53343 pipe_lock(pipe);
53344
53345- if (!pipe->readers) {
53346+ if (!atomic_read(&pipe->readers)) {
53347 send_sig(SIGPIPE, current, 0);
53348 if (!ret)
53349 ret = -EPIPE;
53350@@ -1364,7 +1364,7 @@ static ssize_t fuse_dev_splice_read(struct file *in, loff_t *ppos,
53351 page_nr++;
53352 ret += buf->len;
53353
53354- if (pipe->files)
53355+ if (atomic_read(&pipe->files))
53356 do_wakeup = 1;
53357 }
53358
53359diff --git a/fs/fuse/dir.c b/fs/fuse/dir.c
53360index f3f783d..bf11a8e 100644
53361--- a/fs/fuse/dir.c
53362+++ b/fs/fuse/dir.c
53363@@ -1415,7 +1415,7 @@ static char *read_link(struct dentry *dentry)
53364 return link;
53365 }
53366
53367-static void free_link(char *link)
53368+static void free_link(const char *link)
53369 {
53370 if (!IS_ERR(link))
53371 free_page((unsigned long) link);
53372diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
53373index 62b484e..0f9a140 100644
53374--- a/fs/gfs2/inode.c
53375+++ b/fs/gfs2/inode.c
53376@@ -1441,7 +1441,7 @@ out:
53377
53378 static void gfs2_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
53379 {
53380- char *s = nd_get_link(nd);
53381+ const char *s = nd_get_link(nd);
53382 if (!IS_ERR(s))
53383 kfree(s);
53384 }
53385diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
53386index a3f868a..bb308ae 100644
53387--- a/fs/hugetlbfs/inode.c
53388+++ b/fs/hugetlbfs/inode.c
53389@@ -152,6 +152,7 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53390 struct mm_struct *mm = current->mm;
53391 struct vm_area_struct *vma;
53392 struct hstate *h = hstate_file(file);
53393+ unsigned long offset = gr_rand_threadstack_offset(mm, file, flags);
53394 struct vm_unmapped_area_info info;
53395
53396 if (len & ~huge_page_mask(h))
53397@@ -165,17 +166,26 @@ hugetlb_get_unmapped_area(struct file *file, unsigned long addr,
53398 return addr;
53399 }
53400
53401+#ifdef CONFIG_PAX_RANDMMAP
53402+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
53403+#endif
53404+
53405 if (addr) {
53406 addr = ALIGN(addr, huge_page_size(h));
53407 vma = find_vma(mm, addr);
53408- if (TASK_SIZE - len >= addr &&
53409- (!vma || addr + len <= vma->vm_start))
53410+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
53411 return addr;
53412 }
53413
53414 info.flags = 0;
53415 info.length = len;
53416 info.low_limit = TASK_UNMAPPED_BASE;
53417+
53418+#ifdef CONFIG_PAX_RANDMMAP
53419+ if (mm->pax_flags & MF_PAX_RANDMMAP)
53420+ info.low_limit += mm->delta_mmap;
53421+#endif
53422+
53423 info.high_limit = TASK_SIZE;
53424 info.align_mask = PAGE_MASK & ~huge_page_mask(h);
53425 info.align_offset = 0;
53426@@ -898,7 +908,7 @@ static struct file_system_type hugetlbfs_fs_type = {
53427 };
53428 MODULE_ALIAS_FS("hugetlbfs");
53429
53430-static struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53431+struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
53432
53433 static int can_do_hugetlb_shm(void)
53434 {
53435diff --git a/fs/inode.c b/fs/inode.c
53436index 00d5fc3..98ce7d7 100644
53437--- a/fs/inode.c
53438+++ b/fs/inode.c
53439@@ -878,8 +878,8 @@ unsigned int get_next_ino(void)
53440
53441 #ifdef CONFIG_SMP
53442 if (unlikely((res & (LAST_INO_BATCH-1)) == 0)) {
53443- static atomic_t shared_last_ino;
53444- int next = atomic_add_return(LAST_INO_BATCH, &shared_last_ino);
53445+ static atomic_unchecked_t shared_last_ino;
53446+ int next = atomic_add_return_unchecked(LAST_INO_BATCH, &shared_last_ino);
53447
53448 res = next - LAST_INO_BATCH;
53449 }
53450diff --git a/fs/jffs2/erase.c b/fs/jffs2/erase.c
53451index 4a6cf28..d3a29d3 100644
53452--- a/fs/jffs2/erase.c
53453+++ b/fs/jffs2/erase.c
53454@@ -452,7 +452,8 @@ static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseb
53455 struct jffs2_unknown_node marker = {
53456 .magic = cpu_to_je16(JFFS2_MAGIC_BITMASK),
53457 .nodetype = cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53458- .totlen = cpu_to_je32(c->cleanmarker_size)
53459+ .totlen = cpu_to_je32(c->cleanmarker_size),
53460+ .hdr_crc = cpu_to_je32(0)
53461 };
53462
53463 jffs2_prealloc_raw_node_refs(c, jeb, 1);
53464diff --git a/fs/jffs2/wbuf.c b/fs/jffs2/wbuf.c
53465index a6597d6..41b30ec 100644
53466--- a/fs/jffs2/wbuf.c
53467+++ b/fs/jffs2/wbuf.c
53468@@ -1023,7 +1023,8 @@ static const struct jffs2_unknown_node oob_cleanmarker =
53469 {
53470 .magic = constant_cpu_to_je16(JFFS2_MAGIC_BITMASK),
53471 .nodetype = constant_cpu_to_je16(JFFS2_NODETYPE_CLEANMARKER),
53472- .totlen = constant_cpu_to_je32(8)
53473+ .totlen = constant_cpu_to_je32(8),
53474+ .hdr_crc = constant_cpu_to_je32(0)
53475 };
53476
53477 /*
53478diff --git a/fs/jfs/super.c b/fs/jfs/super.c
53479index 788e0a9..8433098 100644
53480--- a/fs/jfs/super.c
53481+++ b/fs/jfs/super.c
53482@@ -878,7 +878,7 @@ static int __init init_jfs_fs(void)
53483
53484 jfs_inode_cachep =
53485 kmem_cache_create("jfs_ip", sizeof(struct jfs_inode_info), 0,
53486- SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD,
53487+ SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_USERCOPY,
53488 init_once);
53489 if (jfs_inode_cachep == NULL)
53490 return -ENOMEM;
53491diff --git a/fs/libfs.c b/fs/libfs.c
53492index 916da8c..1588998 100644
53493--- a/fs/libfs.c
53494+++ b/fs/libfs.c
53495@@ -165,6 +165,9 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53496
53497 for (p=q->next; p != &dentry->d_subdirs; p=p->next) {
53498 struct dentry *next;
53499+ char d_name[sizeof(next->d_iname)];
53500+ const unsigned char *name;
53501+
53502 next = list_entry(p, struct dentry, d_u.d_child);
53503 spin_lock_nested(&next->d_lock, DENTRY_D_LOCK_NESTED);
53504 if (!simple_positive(next)) {
53505@@ -174,7 +177,12 @@ int dcache_readdir(struct file * filp, void * dirent, filldir_t filldir)
53506
53507 spin_unlock(&next->d_lock);
53508 spin_unlock(&dentry->d_lock);
53509- if (filldir(dirent, next->d_name.name,
53510+ name = next->d_name.name;
53511+ if (name == next->d_iname) {
53512+ memcpy(d_name, name, next->d_name.len);
53513+ name = d_name;
53514+ }
53515+ if (filldir(dirent, name,
53516 next->d_name.len, filp->f_pos,
53517 next->d_inode->i_ino,
53518 dt_type(next->d_inode)) < 0)
53519diff --git a/fs/lockd/clntproc.c b/fs/lockd/clntproc.c
53520index 9760ecb..9b838ef 100644
53521--- a/fs/lockd/clntproc.c
53522+++ b/fs/lockd/clntproc.c
53523@@ -36,11 +36,11 @@ static const struct rpc_call_ops nlmclnt_cancel_ops;
53524 /*
53525 * Cookie counter for NLM requests
53526 */
53527-static atomic_t nlm_cookie = ATOMIC_INIT(0x1234);
53528+static atomic_unchecked_t nlm_cookie = ATOMIC_INIT(0x1234);
53529
53530 void nlmclnt_next_cookie(struct nlm_cookie *c)
53531 {
53532- u32 cookie = atomic_inc_return(&nlm_cookie);
53533+ u32 cookie = atomic_inc_return_unchecked(&nlm_cookie);
53534
53535 memcpy(c->data, &cookie, 4);
53536 c->len=4;
53537diff --git a/fs/lockd/svc.c b/fs/lockd/svc.c
53538index a2aa97d..10d6c41 100644
53539--- a/fs/lockd/svc.c
53540+++ b/fs/lockd/svc.c
53541@@ -305,7 +305,7 @@ static int lockd_start_svc(struct svc_serv *serv)
53542 svc_sock_update_bufs(serv);
53543 serv->sv_maxconn = nlm_max_connections;
53544
53545- nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, serv->sv_name);
53546+ nlmsvc_task = kthread_run(lockd, nlmsvc_rqst, "%s", serv->sv_name);
53547 if (IS_ERR(nlmsvc_task)) {
53548 error = PTR_ERR(nlmsvc_task);
53549 printk(KERN_WARNING
53550diff --git a/fs/locks.c b/fs/locks.c
53551index cb424a4..850e4dd 100644
53552--- a/fs/locks.c
53553+++ b/fs/locks.c
53554@@ -2064,16 +2064,16 @@ void locks_remove_flock(struct file *filp)
53555 return;
53556
53557 if (filp->f_op && filp->f_op->flock) {
53558- struct file_lock fl = {
53559+ struct file_lock flock = {
53560 .fl_pid = current->tgid,
53561 .fl_file = filp,
53562 .fl_flags = FL_FLOCK,
53563 .fl_type = F_UNLCK,
53564 .fl_end = OFFSET_MAX,
53565 };
53566- filp->f_op->flock(filp, F_SETLKW, &fl);
53567- if (fl.fl_ops && fl.fl_ops->fl_release_private)
53568- fl.fl_ops->fl_release_private(&fl);
53569+ filp->f_op->flock(filp, F_SETLKW, &flock);
53570+ if (flock.fl_ops && flock.fl_ops->fl_release_private)
53571+ flock.fl_ops->fl_release_private(&flock);
53572 }
53573
53574 lock_flocks();
53575diff --git a/fs/namei.c b/fs/namei.c
53576index 9ed9361..2b72db1 100644
53577--- a/fs/namei.c
53578+++ b/fs/namei.c
53579@@ -319,16 +319,32 @@ int generic_permission(struct inode *inode, int mask)
53580 if (ret != -EACCES)
53581 return ret;
53582
53583+#ifdef CONFIG_GRKERNSEC
53584+ /* we'll block if we have to log due to a denied capability use */
53585+ if (mask & MAY_NOT_BLOCK)
53586+ return -ECHILD;
53587+#endif
53588+
53589 if (S_ISDIR(inode->i_mode)) {
53590 /* DACs are overridable for directories */
53591- if (inode_capable(inode, CAP_DAC_OVERRIDE))
53592- return 0;
53593 if (!(mask & MAY_WRITE))
53594- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53595+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53596+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53597 return 0;
53598+ if (inode_capable(inode, CAP_DAC_OVERRIDE))
53599+ return 0;
53600 return -EACCES;
53601 }
53602 /*
53603+ * Searching includes executable on directories, else just read.
53604+ */
53605+ mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53606+ if (mask == MAY_READ)
53607+ if (inode_capable_nolog(inode, CAP_DAC_OVERRIDE) ||
53608+ inode_capable(inode, CAP_DAC_READ_SEARCH))
53609+ return 0;
53610+
53611+ /*
53612 * Read/write DACs are always overridable.
53613 * Executable DACs are overridable when there is
53614 * at least one exec bit set.
53615@@ -337,14 +353,6 @@ int generic_permission(struct inode *inode, int mask)
53616 if (inode_capable(inode, CAP_DAC_OVERRIDE))
53617 return 0;
53618
53619- /*
53620- * Searching includes executable on directories, else just read.
53621- */
53622- mask &= MAY_READ | MAY_WRITE | MAY_EXEC;
53623- if (mask == MAY_READ)
53624- if (inode_capable(inode, CAP_DAC_READ_SEARCH))
53625- return 0;
53626-
53627 return -EACCES;
53628 }
53629
53630@@ -820,7 +828,7 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53631 {
53632 struct dentry *dentry = link->dentry;
53633 int error;
53634- char *s;
53635+ const char *s;
53636
53637 BUG_ON(nd->flags & LOOKUP_RCU);
53638
53639@@ -841,6 +849,12 @@ follow_link(struct path *link, struct nameidata *nd, void **p)
53640 if (error)
53641 goto out_put_nd_path;
53642
53643+ if (gr_handle_follow_link(dentry->d_parent->d_inode,
53644+ dentry->d_inode, dentry, nd->path.mnt)) {
53645+ error = -EACCES;
53646+ goto out_put_nd_path;
53647+ }
53648+
53649 nd->last_type = LAST_BIND;
53650 *p = dentry->d_inode->i_op->follow_link(dentry, nd);
53651 error = PTR_ERR(*p);
53652@@ -1588,6 +1602,8 @@ static inline int nested_symlink(struct path *path, struct nameidata *nd)
53653 if (res)
53654 break;
53655 res = walk_component(nd, path, LOOKUP_FOLLOW);
53656+ if (res >= 0 && gr_handle_symlink_owner(&link, nd->inode))
53657+ res = -EACCES;
53658 put_link(nd, &link, cookie);
53659 } while (res > 0);
53660
53661@@ -1686,7 +1702,7 @@ EXPORT_SYMBOL(full_name_hash);
53662 static inline unsigned long hash_name(const char *name, unsigned int *hashp)
53663 {
53664 unsigned long a, b, adata, bdata, mask, hash, len;
53665- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53666+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
53667
53668 hash = a = 0;
53669 len = -sizeof(unsigned long);
53670@@ -1968,6 +1984,8 @@ static int path_lookupat(int dfd, const char *name,
53671 if (err)
53672 break;
53673 err = lookup_last(nd, &path);
53674+ if (!err && gr_handle_symlink_owner(&link, nd->inode))
53675+ err = -EACCES;
53676 put_link(nd, &link, cookie);
53677 }
53678 }
53679@@ -1975,6 +1993,13 @@ static int path_lookupat(int dfd, const char *name,
53680 if (!err)
53681 err = complete_walk(nd);
53682
53683+ if (!err && !(nd->flags & LOOKUP_PARENT)) {
53684+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53685+ path_put(&nd->path);
53686+ err = -ENOENT;
53687+ }
53688+ }
53689+
53690 if (!err && nd->flags & LOOKUP_DIRECTORY) {
53691 if (!can_lookup(nd->inode)) {
53692 path_put(&nd->path);
53693@@ -2002,8 +2027,15 @@ static int filename_lookup(int dfd, struct filename *name,
53694 retval = path_lookupat(dfd, name->name,
53695 flags | LOOKUP_REVAL, nd);
53696
53697- if (likely(!retval))
53698+ if (likely(!retval)) {
53699 audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
53700+ if (name->name[0] != '/' && nd->path.dentry && nd->inode) {
53701+ if (!gr_chroot_fchdir(nd->path.dentry, nd->path.mnt)) {
53702+ path_put(&nd->path);
53703+ return -ENOENT;
53704+ }
53705+ }
53706+ }
53707 return retval;
53708 }
53709
53710@@ -2381,6 +2413,13 @@ static int may_open(struct path *path, int acc_mode, int flag)
53711 if (flag & O_NOATIME && !inode_owner_or_capable(inode))
53712 return -EPERM;
53713
53714+ if (gr_handle_rofs_blockwrite(dentry, path->mnt, acc_mode))
53715+ return -EPERM;
53716+ if (gr_handle_rawio(inode))
53717+ return -EPERM;
53718+ if (!gr_acl_handle_open(dentry, path->mnt, acc_mode))
53719+ return -EACCES;
53720+
53721 return 0;
53722 }
53723
53724@@ -2602,7 +2641,7 @@ looked_up:
53725 * cleared otherwise prior to returning.
53726 */
53727 static int lookup_open(struct nameidata *nd, struct path *path,
53728- struct file *file,
53729+ struct path *link, struct file *file,
53730 const struct open_flags *op,
53731 bool got_write, int *opened)
53732 {
53733@@ -2637,6 +2676,17 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53734 /* Negative dentry, just create the file */
53735 if (!dentry->d_inode && (op->open_flag & O_CREAT)) {
53736 umode_t mode = op->mode;
53737+
53738+ if (link && gr_handle_symlink_owner(link, dir->d_inode)) {
53739+ error = -EACCES;
53740+ goto out_dput;
53741+ }
53742+
53743+ if (!gr_acl_handle_creat(dentry, dir, nd->path.mnt, op->open_flag, op->acc_mode, mode)) {
53744+ error = -EACCES;
53745+ goto out_dput;
53746+ }
53747+
53748 if (!IS_POSIXACL(dir->d_inode))
53749 mode &= ~current_umask();
53750 /*
53751@@ -2658,6 +2708,8 @@ static int lookup_open(struct nameidata *nd, struct path *path,
53752 nd->flags & LOOKUP_EXCL);
53753 if (error)
53754 goto out_dput;
53755+ else
53756+ gr_handle_create(dentry, nd->path.mnt);
53757 }
53758 out_no_open:
53759 path->dentry = dentry;
53760@@ -2672,7 +2724,7 @@ out_dput:
53761 /*
53762 * Handle the last step of open()
53763 */
53764-static int do_last(struct nameidata *nd, struct path *path,
53765+static int do_last(struct nameidata *nd, struct path *path, struct path *link,
53766 struct file *file, const struct open_flags *op,
53767 int *opened, struct filename *name)
53768 {
53769@@ -2701,16 +2753,32 @@ static int do_last(struct nameidata *nd, struct path *path,
53770 error = complete_walk(nd);
53771 if (error)
53772 return error;
53773+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53774+ error = -ENOENT;
53775+ goto out;
53776+ }
53777 audit_inode(name, nd->path.dentry, 0);
53778 if (open_flag & O_CREAT) {
53779 error = -EISDIR;
53780 goto out;
53781 }
53782+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53783+ error = -EACCES;
53784+ goto out;
53785+ }
53786 goto finish_open;
53787 case LAST_BIND:
53788 error = complete_walk(nd);
53789 if (error)
53790 return error;
53791+ if (!gr_acl_handle_hidden_file(dir, nd->path.mnt)) {
53792+ error = -ENOENT;
53793+ goto out;
53794+ }
53795+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53796+ error = -EACCES;
53797+ goto out;
53798+ }
53799 audit_inode(name, dir, 0);
53800 goto finish_open;
53801 }
53802@@ -2759,7 +2827,7 @@ retry_lookup:
53803 */
53804 }
53805 mutex_lock(&dir->d_inode->i_mutex);
53806- error = lookup_open(nd, path, file, op, got_write, opened);
53807+ error = lookup_open(nd, path, link, file, op, got_write, opened);
53808 mutex_unlock(&dir->d_inode->i_mutex);
53809
53810 if (error <= 0) {
53811@@ -2783,11 +2851,28 @@ retry_lookup:
53812 goto finish_open_created;
53813 }
53814
53815+ if (!gr_acl_handle_hidden_file(path->dentry, nd->path.mnt)) {
53816+ error = -ENOENT;
53817+ goto exit_dput;
53818+ }
53819+ if (link && gr_handle_symlink_owner(link, path->dentry->d_inode)) {
53820+ error = -EACCES;
53821+ goto exit_dput;
53822+ }
53823+
53824 /*
53825 * create/update audit record if it already exists.
53826 */
53827- if (path->dentry->d_inode)
53828+ if (path->dentry->d_inode) {
53829+ /* only check if O_CREAT is specified, all other checks need to go
53830+ into may_open */
53831+ if (gr_handle_fifo(path->dentry, path->mnt, dir, open_flag, acc_mode)) {
53832+ error = -EACCES;
53833+ goto exit_dput;
53834+ }
53835+
53836 audit_inode(name, path->dentry, 0);
53837+ }
53838
53839 /*
53840 * If atomic_open() acquired write access it is dropped now due to
53841@@ -2828,6 +2913,11 @@ finish_lookup:
53842 }
53843 }
53844 BUG_ON(inode != path->dentry->d_inode);
53845+ /* if we're resolving a symlink to another symlink */
53846+ if (link && gr_handle_symlink_owner(link, inode)) {
53847+ error = -EACCES;
53848+ goto out;
53849+ }
53850 return 1;
53851 }
53852
53853@@ -2837,7 +2927,6 @@ finish_lookup:
53854 save_parent.dentry = nd->path.dentry;
53855 save_parent.mnt = mntget(path->mnt);
53856 nd->path.dentry = path->dentry;
53857-
53858 }
53859 nd->inode = inode;
53860 /* Why this, you ask? _Now_ we might have grown LOOKUP_JUMPED... */
53861@@ -2846,6 +2935,16 @@ finish_lookup:
53862 path_put(&save_parent);
53863 return error;
53864 }
53865+
53866+ if (!gr_acl_handle_hidden_file(nd->path.dentry, nd->path.mnt)) {
53867+ error = -ENOENT;
53868+ goto out;
53869+ }
53870+ if (link && gr_handle_symlink_owner(link, nd->inode)) {
53871+ error = -EACCES;
53872+ goto out;
53873+ }
53874+
53875 error = -EISDIR;
53876 if ((open_flag & O_CREAT) && S_ISDIR(nd->inode->i_mode))
53877 goto out;
53878@@ -2944,7 +3043,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53879 if (unlikely(error))
53880 goto out;
53881
53882- error = do_last(nd, &path, file, op, &opened, pathname);
53883+ error = do_last(nd, &path, NULL, file, op, &opened, pathname);
53884 while (unlikely(error > 0)) { /* trailing symlink */
53885 struct path link = path;
53886 void *cookie;
53887@@ -2962,7 +3061,7 @@ static struct file *path_openat(int dfd, struct filename *pathname,
53888 error = follow_link(&link, nd, &cookie);
53889 if (unlikely(error))
53890 break;
53891- error = do_last(nd, &path, file, op, &opened, pathname);
53892+ error = do_last(nd, &path, &link, file, op, &opened, pathname);
53893 put_link(nd, &link, cookie);
53894 }
53895 out:
53896@@ -3062,8 +3161,12 @@ struct dentry *kern_path_create(int dfd, const char *pathname,
53897 goto unlock;
53898
53899 error = -EEXIST;
53900- if (dentry->d_inode)
53901+ if (dentry->d_inode) {
53902+ if (!gr_acl_handle_hidden_file(dentry, nd.path.mnt)) {
53903+ error = -ENOENT;
53904+ }
53905 goto fail;
53906+ }
53907 /*
53908 * Special case - lookup gave negative, but... we had foo/bar/
53909 * From the vfs_mknod() POV we just have a negative dentry -
53910@@ -3115,6 +3218,20 @@ struct dentry *user_path_create(int dfd, const char __user *pathname,
53911 }
53912 EXPORT_SYMBOL(user_path_create);
53913
53914+static struct dentry *user_path_create_with_name(int dfd, const char __user *pathname, struct path *path, struct filename **to, unsigned int lookup_flags)
53915+{
53916+ struct filename *tmp = getname(pathname);
53917+ struct dentry *res;
53918+ if (IS_ERR(tmp))
53919+ return ERR_CAST(tmp);
53920+ res = kern_path_create(dfd, tmp->name, path, lookup_flags);
53921+ if (IS_ERR(res))
53922+ putname(tmp);
53923+ else
53924+ *to = tmp;
53925+ return res;
53926+}
53927+
53928 int vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t dev)
53929 {
53930 int error = may_create(dir, dentry);
53931@@ -3177,6 +3294,17 @@ retry:
53932
53933 if (!IS_POSIXACL(path.dentry->d_inode))
53934 mode &= ~current_umask();
53935+
53936+ if (gr_handle_chroot_mknod(dentry, path.mnt, mode)) {
53937+ error = -EPERM;
53938+ goto out;
53939+ }
53940+
53941+ if (!gr_acl_handle_mknod(dentry, path.dentry, path.mnt, mode)) {
53942+ error = -EACCES;
53943+ goto out;
53944+ }
53945+
53946 error = security_path_mknod(&path, dentry, mode, dev);
53947 if (error)
53948 goto out;
53949@@ -3193,6 +3321,8 @@ retry:
53950 break;
53951 }
53952 out:
53953+ if (!error)
53954+ gr_handle_create(dentry, path.mnt);
53955 done_path_create(&path, dentry);
53956 if (retry_estale(error, lookup_flags)) {
53957 lookup_flags |= LOOKUP_REVAL;
53958@@ -3245,9 +3375,16 @@ retry:
53959
53960 if (!IS_POSIXACL(path.dentry->d_inode))
53961 mode &= ~current_umask();
53962+ if (!gr_acl_handle_mkdir(dentry, path.dentry, path.mnt)) {
53963+ error = -EACCES;
53964+ goto out;
53965+ }
53966 error = security_path_mkdir(&path, dentry, mode);
53967 if (!error)
53968 error = vfs_mkdir(path.dentry->d_inode, dentry, mode);
53969+ if (!error)
53970+ gr_handle_create(dentry, path.mnt);
53971+out:
53972 done_path_create(&path, dentry);
53973 if (retry_estale(error, lookup_flags)) {
53974 lookup_flags |= LOOKUP_REVAL;
53975@@ -3328,6 +3465,8 @@ static long do_rmdir(int dfd, const char __user *pathname)
53976 struct filename *name;
53977 struct dentry *dentry;
53978 struct nameidata nd;
53979+ ino_t saved_ino = 0;
53980+ dev_t saved_dev = 0;
53981 unsigned int lookup_flags = 0;
53982 retry:
53983 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
53984@@ -3360,10 +3499,21 @@ retry:
53985 error = -ENOENT;
53986 goto exit3;
53987 }
53988+
53989+ saved_ino = dentry->d_inode->i_ino;
53990+ saved_dev = gr_get_dev_from_dentry(dentry);
53991+
53992+ if (!gr_acl_handle_rmdir(dentry, nd.path.mnt)) {
53993+ error = -EACCES;
53994+ goto exit3;
53995+ }
53996+
53997 error = security_path_rmdir(&nd.path, dentry);
53998 if (error)
53999 goto exit3;
54000 error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
54001+ if (!error && (saved_dev || saved_ino))
54002+ gr_handle_delete(saved_ino, saved_dev);
54003 exit3:
54004 dput(dentry);
54005 exit2:
54006@@ -3429,6 +3579,8 @@ static long do_unlinkat(int dfd, const char __user *pathname)
54007 struct dentry *dentry;
54008 struct nameidata nd;
54009 struct inode *inode = NULL;
54010+ ino_t saved_ino = 0;
54011+ dev_t saved_dev = 0;
54012 unsigned int lookup_flags = 0;
54013 retry:
54014 name = user_path_parent(dfd, pathname, &nd, lookup_flags);
54015@@ -3455,10 +3607,22 @@ retry:
54016 if (!inode)
54017 goto slashes;
54018 ihold(inode);
54019+
54020+ if (inode->i_nlink <= 1) {
54021+ saved_ino = inode->i_ino;
54022+ saved_dev = gr_get_dev_from_dentry(dentry);
54023+ }
54024+ if (!gr_acl_handle_unlink(dentry, nd.path.mnt)) {
54025+ error = -EACCES;
54026+ goto exit2;
54027+ }
54028+
54029 error = security_path_unlink(&nd.path, dentry);
54030 if (error)
54031 goto exit2;
54032 error = vfs_unlink(nd.path.dentry->d_inode, dentry);
54033+ if (!error && (saved_ino || saved_dev))
54034+ gr_handle_delete(saved_ino, saved_dev);
54035 exit2:
54036 dput(dentry);
54037 }
54038@@ -3536,9 +3700,17 @@ retry:
54039 if (IS_ERR(dentry))
54040 goto out_putname;
54041
54042+ if (!gr_acl_handle_symlink(dentry, path.dentry, path.mnt, from)) {
54043+ error = -EACCES;
54044+ goto out;
54045+ }
54046+
54047 error = security_path_symlink(&path, dentry, from->name);
54048 if (!error)
54049 error = vfs_symlink(path.dentry->d_inode, dentry, from->name);
54050+ if (!error)
54051+ gr_handle_create(dentry, path.mnt);
54052+out:
54053 done_path_create(&path, dentry);
54054 if (retry_estale(error, lookup_flags)) {
54055 lookup_flags |= LOOKUP_REVAL;
54056@@ -3612,6 +3784,7 @@ SYSCALL_DEFINE5(linkat, int, olddfd, const char __user *, oldname,
54057 {
54058 struct dentry *new_dentry;
54059 struct path old_path, new_path;
54060+ struct filename *to = NULL;
54061 int how = 0;
54062 int error;
54063
54064@@ -3635,7 +3808,7 @@ retry:
54065 if (error)
54066 return error;
54067
54068- new_dentry = user_path_create(newdfd, newname, &new_path,
54069+ new_dentry = user_path_create_with_name(newdfd, newname, &new_path, &to,
54070 (how & LOOKUP_REVAL));
54071 error = PTR_ERR(new_dentry);
54072 if (IS_ERR(new_dentry))
54073@@ -3647,11 +3820,28 @@ retry:
54074 error = may_linkat(&old_path);
54075 if (unlikely(error))
54076 goto out_dput;
54077+
54078+ if (gr_handle_hardlink(old_path.dentry, old_path.mnt,
54079+ old_path.dentry->d_inode,
54080+ old_path.dentry->d_inode->i_mode, to)) {
54081+ error = -EACCES;
54082+ goto out_dput;
54083+ }
54084+
54085+ if (!gr_acl_handle_link(new_dentry, new_path.dentry, new_path.mnt,
54086+ old_path.dentry, old_path.mnt, to)) {
54087+ error = -EACCES;
54088+ goto out_dput;
54089+ }
54090+
54091 error = security_path_link(old_path.dentry, &new_path, new_dentry);
54092 if (error)
54093 goto out_dput;
54094 error = vfs_link(old_path.dentry, new_path.dentry->d_inode, new_dentry);
54095+ if (!error)
54096+ gr_handle_create(new_dentry, new_path.mnt);
54097 out_dput:
54098+ putname(to);
54099 done_path_create(&new_path, new_dentry);
54100 if (retry_estale(error, how)) {
54101 how |= LOOKUP_REVAL;
54102@@ -3897,12 +4087,21 @@ retry:
54103 if (new_dentry == trap)
54104 goto exit5;
54105
54106+ error = gr_acl_handle_rename(new_dentry, new_dir, newnd.path.mnt,
54107+ old_dentry, old_dir->d_inode, oldnd.path.mnt,
54108+ to);
54109+ if (error)
54110+ goto exit5;
54111+
54112 error = security_path_rename(&oldnd.path, old_dentry,
54113 &newnd.path, new_dentry);
54114 if (error)
54115 goto exit5;
54116 error = vfs_rename(old_dir->d_inode, old_dentry,
54117 new_dir->d_inode, new_dentry);
54118+ if (!error)
54119+ gr_handle_rename(old_dir->d_inode, new_dir->d_inode, old_dentry,
54120+ new_dentry, oldnd.path.mnt, new_dentry->d_inode ? 1 : 0);
54121 exit5:
54122 dput(new_dentry);
54123 exit4:
54124@@ -3934,6 +4133,8 @@ SYSCALL_DEFINE2(rename, const char __user *, oldname, const char __user *, newna
54125
54126 int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const char *link)
54127 {
54128+ char tmpbuf[64];
54129+ const char *newlink;
54130 int len;
54131
54132 len = PTR_ERR(link);
54133@@ -3943,7 +4144,14 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen, const c
54134 len = strlen(link);
54135 if (len > (unsigned) buflen)
54136 len = buflen;
54137- if (copy_to_user(buffer, link, len))
54138+
54139+ if (len < sizeof(tmpbuf)) {
54140+ memcpy(tmpbuf, link, len);
54141+ newlink = tmpbuf;
54142+ } else
54143+ newlink = link;
54144+
54145+ if (copy_to_user(buffer, newlink, len))
54146 len = -EFAULT;
54147 out:
54148 return len;
54149diff --git a/fs/namespace.c b/fs/namespace.c
54150index 7b1ca9b..6faeccf 100644
54151--- a/fs/namespace.c
54152+++ b/fs/namespace.c
54153@@ -1265,6 +1265,9 @@ static int do_umount(struct mount *mnt, int flags)
54154 if (!(sb->s_flags & MS_RDONLY))
54155 retval = do_remount_sb(sb, MS_RDONLY, NULL, 0);
54156 up_write(&sb->s_umount);
54157+
54158+ gr_log_remount(mnt->mnt_devname, retval);
54159+
54160 return retval;
54161 }
54162
54163@@ -1283,6 +1286,9 @@ static int do_umount(struct mount *mnt, int flags)
54164 }
54165 br_write_unlock(&vfsmount_lock);
54166 namespace_unlock();
54167+
54168+ gr_log_unmount(mnt->mnt_devname, retval);
54169+
54170 return retval;
54171 }
54172
54173@@ -1302,7 +1308,7 @@ static inline bool may_mount(void)
54174 * unixes. Our API is identical to OSF/1 to avoid making a mess of AMD
54175 */
54176
54177-SYSCALL_DEFINE2(umount, char __user *, name, int, flags)
54178+SYSCALL_DEFINE2(umount, const char __user *, name, int, flags)
54179 {
54180 struct path path;
54181 struct mount *mnt;
54182@@ -1342,7 +1348,7 @@ out:
54183 /*
54184 * The 2.0 compatible umount. No flags.
54185 */
54186-SYSCALL_DEFINE1(oldumount, char __user *, name)
54187+SYSCALL_DEFINE1(oldumount, const char __user *, name)
54188 {
54189 return sys_umount(name, 0);
54190 }
54191@@ -2313,6 +2319,16 @@ long do_mount(const char *dev_name, const char *dir_name,
54192 MS_NOATIME | MS_NODIRATIME | MS_RELATIME| MS_KERNMOUNT |
54193 MS_STRICTATIME);
54194
54195+ if (gr_handle_rofs_mount(path.dentry, path.mnt, mnt_flags)) {
54196+ retval = -EPERM;
54197+ goto dput_out;
54198+ }
54199+
54200+ if (gr_handle_chroot_mount(path.dentry, path.mnt, dev_name)) {
54201+ retval = -EPERM;
54202+ goto dput_out;
54203+ }
54204+
54205 if (flags & MS_REMOUNT)
54206 retval = do_remount(&path, flags & ~MS_REMOUNT, mnt_flags,
54207 data_page);
54208@@ -2327,6 +2343,9 @@ long do_mount(const char *dev_name, const char *dir_name,
54209 dev_name, data_page);
54210 dput_out:
54211 path_put(&path);
54212+
54213+ gr_log_mount(dev_name, dir_name, retval);
54214+
54215 return retval;
54216 }
54217
54218@@ -2500,8 +2519,8 @@ struct dentry *mount_subtree(struct vfsmount *mnt, const char *name)
54219 }
54220 EXPORT_SYMBOL(mount_subtree);
54221
54222-SYSCALL_DEFINE5(mount, char __user *, dev_name, char __user *, dir_name,
54223- char __user *, type, unsigned long, flags, void __user *, data)
54224+SYSCALL_DEFINE5(mount, const char __user *, dev_name, const char __user *, dir_name,
54225+ const char __user *, type, unsigned long, flags, void __user *, data)
54226 {
54227 int ret;
54228 char *kernel_type;
54229@@ -2614,6 +2633,11 @@ SYSCALL_DEFINE2(pivot_root, const char __user *, new_root,
54230 if (error)
54231 goto out2;
54232
54233+ if (gr_handle_chroot_pivot()) {
54234+ error = -EPERM;
54235+ goto out2;
54236+ }
54237+
54238 get_fs_root(current->fs, &root);
54239 old_mp = lock_mount(&old);
54240 error = PTR_ERR(old_mp);
54241@@ -2864,7 +2888,7 @@ static int mntns_install(struct nsproxy *nsproxy, void *ns)
54242 !nsown_capable(CAP_SYS_ADMIN))
54243 return -EPERM;
54244
54245- if (fs->users != 1)
54246+ if (atomic_read(&fs->users) != 1)
54247 return -EINVAL;
54248
54249 get_mnt_ns(mnt_ns);
54250diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
54251index cff089a..4c3d57a 100644
54252--- a/fs/nfs/callback.c
54253+++ b/fs/nfs/callback.c
54254@@ -211,7 +211,6 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
54255 struct svc_rqst *rqstp;
54256 int (*callback_svc)(void *vrqstp);
54257 struct nfs_callback_data *cb_info = &nfs_callback_info[minorversion];
54258- char svc_name[12];
54259 int ret;
54260
54261 nfs_callback_bc_serv(minorversion, xprt, serv);
54262@@ -235,10 +234,9 @@ static int nfs_callback_start_svc(int minorversion, struct rpc_xprt *xprt,
54263
54264 svc_sock_update_bufs(serv);
54265
54266- sprintf(svc_name, "nfsv4.%u-svc", minorversion);
54267 cb_info->serv = serv;
54268 cb_info->rqst = rqstp;
54269- cb_info->task = kthread_run(callback_svc, cb_info->rqst, svc_name);
54270+ cb_info->task = kthread_run(callback_svc, cb_info->rqst, "nfsv4.%u-svc", minorversion);
54271 if (IS_ERR(cb_info->task)) {
54272 ret = PTR_ERR(cb_info->task);
54273 svc_exit_thread(cb_info->rqst);
54274diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
54275index a35582c..ebbdcd5 100644
54276--- a/fs/nfs/callback_xdr.c
54277+++ b/fs/nfs/callback_xdr.c
54278@@ -51,7 +51,7 @@ struct callback_op {
54279 callback_decode_arg_t decode_args;
54280 callback_encode_res_t encode_res;
54281 long res_maxsize;
54282-};
54283+} __do_const;
54284
54285 static struct callback_op callback_ops[];
54286
54287diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
54288index c1c7a9d..7afa0b8 100644
54289--- a/fs/nfs/inode.c
54290+++ b/fs/nfs/inode.c
54291@@ -1043,16 +1043,16 @@ static int nfs_size_need_update(const struct inode *inode, const struct nfs_fatt
54292 return nfs_size_to_loff_t(fattr->size) > i_size_read(inode);
54293 }
54294
54295-static atomic_long_t nfs_attr_generation_counter;
54296+static atomic_long_unchecked_t nfs_attr_generation_counter;
54297
54298 static unsigned long nfs_read_attr_generation_counter(void)
54299 {
54300- return atomic_long_read(&nfs_attr_generation_counter);
54301+ return atomic_long_read_unchecked(&nfs_attr_generation_counter);
54302 }
54303
54304 unsigned long nfs_inc_attr_generation_counter(void)
54305 {
54306- return atomic_long_inc_return(&nfs_attr_generation_counter);
54307+ return atomic_long_inc_return_unchecked(&nfs_attr_generation_counter);
54308 }
54309
54310 void nfs_fattr_init(struct nfs_fattr *fattr)
54311diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
54312index 2c37442..9b9538b 100644
54313--- a/fs/nfs/nfs4state.c
54314+++ b/fs/nfs/nfs4state.c
54315@@ -1193,7 +1193,7 @@ void nfs4_schedule_state_manager(struct nfs_client *clp)
54316 snprintf(buf, sizeof(buf), "%s-manager",
54317 rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_ADDR));
54318 rcu_read_unlock();
54319- task = kthread_run(nfs4_run_state_manager, clp, buf);
54320+ task = kthread_run(nfs4_run_state_manager, clp, "%s", buf);
54321 if (IS_ERR(task)) {
54322 printk(KERN_ERR "%s: kthread_run: %ld\n",
54323 __func__, PTR_ERR(task));
54324diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
54325index 27d74a2..c4c2a73 100644
54326--- a/fs/nfsd/nfs4proc.c
54327+++ b/fs/nfsd/nfs4proc.c
54328@@ -1126,7 +1126,7 @@ struct nfsd4_operation {
54329 nfsd4op_rsize op_rsize_bop;
54330 stateid_getter op_get_currentstateid;
54331 stateid_setter op_set_currentstateid;
54332-};
54333+} __do_const;
54334
54335 static struct nfsd4_operation nfsd4_ops[];
54336
54337diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
54338index 582321a..0224663 100644
54339--- a/fs/nfsd/nfs4xdr.c
54340+++ b/fs/nfsd/nfs4xdr.c
54341@@ -1458,7 +1458,7 @@ nfsd4_decode_notsupp(struct nfsd4_compoundargs *argp, void *p)
54342
54343 typedef __be32(*nfsd4_dec)(struct nfsd4_compoundargs *argp, void *);
54344
54345-static nfsd4_dec nfsd4_dec_ops[] = {
54346+static const nfsd4_dec nfsd4_dec_ops[] = {
54347 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54348 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54349 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54350@@ -1498,7 +1498,7 @@ static nfsd4_dec nfsd4_dec_ops[] = {
54351 [OP_RELEASE_LOCKOWNER] = (nfsd4_dec)nfsd4_decode_release_lockowner,
54352 };
54353
54354-static nfsd4_dec nfsd41_dec_ops[] = {
54355+static const nfsd4_dec nfsd41_dec_ops[] = {
54356 [OP_ACCESS] = (nfsd4_dec)nfsd4_decode_access,
54357 [OP_CLOSE] = (nfsd4_dec)nfsd4_decode_close,
54358 [OP_COMMIT] = (nfsd4_dec)nfsd4_decode_commit,
54359@@ -1560,7 +1560,7 @@ static nfsd4_dec nfsd41_dec_ops[] = {
54360 };
54361
54362 struct nfsd4_minorversion_ops {
54363- nfsd4_dec *decoders;
54364+ const nfsd4_dec *decoders;
54365 int nops;
54366 };
54367
54368diff --git a/fs/nfsd/nfscache.c b/fs/nfsd/nfscache.c
54369index e76244e..9fe8f2f1 100644
54370--- a/fs/nfsd/nfscache.c
54371+++ b/fs/nfsd/nfscache.c
54372@@ -526,14 +526,17 @@ nfsd_cache_update(struct svc_rqst *rqstp, int cachetype, __be32 *statp)
54373 {
54374 struct svc_cacherep *rp = rqstp->rq_cacherep;
54375 struct kvec *resv = &rqstp->rq_res.head[0], *cachv;
54376- int len;
54377+ long len;
54378 size_t bufsize = 0;
54379
54380 if (!rp)
54381 return;
54382
54383- len = resv->iov_len - ((char*)statp - (char*)resv->iov_base);
54384- len >>= 2;
54385+ if (statp) {
54386+ len = (char*)statp - (char*)resv->iov_base;
54387+ len = resv->iov_len - len;
54388+ len >>= 2;
54389+ }
54390
54391 /* Don't cache excessive amounts of data and XDR failures */
54392 if (!statp || len > (256 >> 2)) {
54393diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
54394index 84ce601..633d226 100644
54395--- a/fs/nfsd/vfs.c
54396+++ b/fs/nfsd/vfs.c
54397@@ -939,7 +939,7 @@ nfsd_vfs_read(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54398 } else {
54399 oldfs = get_fs();
54400 set_fs(KERNEL_DS);
54401- host_err = vfs_readv(file, (struct iovec __user *)vec, vlen, &offset);
54402+ host_err = vfs_readv(file, (struct iovec __force_user *)vec, vlen, &offset);
54403 set_fs(oldfs);
54404 }
54405
54406@@ -1026,7 +1026,7 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
54407
54408 /* Write the data. */
54409 oldfs = get_fs(); set_fs(KERNEL_DS);
54410- host_err = vfs_writev(file, (struct iovec __user *)vec, vlen, &pos);
54411+ host_err = vfs_writev(file, (struct iovec __force_user *)vec, vlen, &pos);
54412 set_fs(oldfs);
54413 if (host_err < 0)
54414 goto out_nfserr;
54415@@ -1572,7 +1572,7 @@ nfsd_readlink(struct svc_rqst *rqstp, struct svc_fh *fhp, char *buf, int *lenp)
54416 */
54417
54418 oldfs = get_fs(); set_fs(KERNEL_DS);
54419- host_err = inode->i_op->readlink(path.dentry, (char __user *)buf, *lenp);
54420+ host_err = inode->i_op->readlink(path.dentry, (char __force_user *)buf, *lenp);
54421 set_fs(oldfs);
54422
54423 if (host_err < 0)
54424diff --git a/fs/nls/nls_base.c b/fs/nls/nls_base.c
54425index fea6bd5..8ee9d81 100644
54426--- a/fs/nls/nls_base.c
54427+++ b/fs/nls/nls_base.c
54428@@ -234,20 +234,22 @@ EXPORT_SYMBOL(utf16s_to_utf8s);
54429
54430 int register_nls(struct nls_table * nls)
54431 {
54432- struct nls_table ** tmp = &tables;
54433+ struct nls_table *tmp = tables;
54434
54435 if (nls->next)
54436 return -EBUSY;
54437
54438 spin_lock(&nls_lock);
54439- while (*tmp) {
54440- if (nls == *tmp) {
54441+ while (tmp) {
54442+ if (nls == tmp) {
54443 spin_unlock(&nls_lock);
54444 return -EBUSY;
54445 }
54446- tmp = &(*tmp)->next;
54447+ tmp = tmp->next;
54448 }
54449- nls->next = tables;
54450+ pax_open_kernel();
54451+ *(struct nls_table **)&nls->next = tables;
54452+ pax_close_kernel();
54453 tables = nls;
54454 spin_unlock(&nls_lock);
54455 return 0;
54456@@ -255,12 +257,14 @@ int register_nls(struct nls_table * nls)
54457
54458 int unregister_nls(struct nls_table * nls)
54459 {
54460- struct nls_table ** tmp = &tables;
54461+ struct nls_table * const * tmp = &tables;
54462
54463 spin_lock(&nls_lock);
54464 while (*tmp) {
54465 if (nls == *tmp) {
54466- *tmp = nls->next;
54467+ pax_open_kernel();
54468+ *(struct nls_table **)tmp = nls->next;
54469+ pax_close_kernel();
54470 spin_unlock(&nls_lock);
54471 return 0;
54472 }
54473diff --git a/fs/nls/nls_euc-jp.c b/fs/nls/nls_euc-jp.c
54474index 7424929..35f6be5 100644
54475--- a/fs/nls/nls_euc-jp.c
54476+++ b/fs/nls/nls_euc-jp.c
54477@@ -561,8 +561,10 @@ static int __init init_nls_euc_jp(void)
54478 p_nls = load_nls("cp932");
54479
54480 if (p_nls) {
54481- table.charset2upper = p_nls->charset2upper;
54482- table.charset2lower = p_nls->charset2lower;
54483+ pax_open_kernel();
54484+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54485+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54486+ pax_close_kernel();
54487 return register_nls(&table);
54488 }
54489
54490diff --git a/fs/nls/nls_koi8-ru.c b/fs/nls/nls_koi8-ru.c
54491index e7bc1d7..06bd4bb 100644
54492--- a/fs/nls/nls_koi8-ru.c
54493+++ b/fs/nls/nls_koi8-ru.c
54494@@ -63,8 +63,10 @@ static int __init init_nls_koi8_ru(void)
54495 p_nls = load_nls("koi8-u");
54496
54497 if (p_nls) {
54498- table.charset2upper = p_nls->charset2upper;
54499- table.charset2lower = p_nls->charset2lower;
54500+ pax_open_kernel();
54501+ *(const unsigned char **)&table.charset2upper = p_nls->charset2upper;
54502+ *(const unsigned char **)&table.charset2lower = p_nls->charset2lower;
54503+ pax_close_kernel();
54504 return register_nls(&table);
54505 }
54506
54507diff --git a/fs/notify/fanotify/fanotify_user.c b/fs/notify/fanotify/fanotify_user.c
54508index 6c80083..a1e6299 100644
54509--- a/fs/notify/fanotify/fanotify_user.c
54510+++ b/fs/notify/fanotify/fanotify_user.c
54511@@ -122,6 +122,7 @@ static int fill_event_metadata(struct fsnotify_group *group,
54512 metadata->event_len = FAN_EVENT_METADATA_LEN;
54513 metadata->metadata_len = FAN_EVENT_METADATA_LEN;
54514 metadata->vers = FANOTIFY_METADATA_VERSION;
54515+ metadata->reserved = 0;
54516 metadata->mask = event->mask & FAN_ALL_OUTGOING_EVENTS;
54517 metadata->pid = pid_vnr(event->tgid);
54518 if (unlikely(event->mask & FAN_Q_OVERFLOW))
54519@@ -252,8 +253,8 @@ static ssize_t copy_event_to_user(struct fsnotify_group *group,
54520
54521 fd = fanotify_event_metadata.fd;
54522 ret = -EFAULT;
54523- if (copy_to_user(buf, &fanotify_event_metadata,
54524- fanotify_event_metadata.event_len))
54525+ if (fanotify_event_metadata.event_len > sizeof fanotify_event_metadata ||
54526+ copy_to_user(buf, &fanotify_event_metadata, fanotify_event_metadata.event_len))
54527 goto out_close_fd;
54528
54529 ret = prepare_for_access_response(group, event, fd);
54530diff --git a/fs/notify/notification.c b/fs/notify/notification.c
54531index 7b51b05..5ea5ef6 100644
54532--- a/fs/notify/notification.c
54533+++ b/fs/notify/notification.c
54534@@ -57,7 +57,7 @@ static struct kmem_cache *fsnotify_event_holder_cachep;
54535 * get set to 0 so it will never get 'freed'
54536 */
54537 static struct fsnotify_event *q_overflow_event;
54538-static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54539+static atomic_unchecked_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54540
54541 /**
54542 * fsnotify_get_cookie - return a unique cookie for use in synchronizing events.
54543@@ -65,7 +65,7 @@ static atomic_t fsnotify_sync_cookie = ATOMIC_INIT(0);
54544 */
54545 u32 fsnotify_get_cookie(void)
54546 {
54547- return atomic_inc_return(&fsnotify_sync_cookie);
54548+ return atomic_inc_return_unchecked(&fsnotify_sync_cookie);
54549 }
54550 EXPORT_SYMBOL_GPL(fsnotify_get_cookie);
54551
54552diff --git a/fs/ntfs/dir.c b/fs/ntfs/dir.c
54553index aa411c3..c260a84 100644
54554--- a/fs/ntfs/dir.c
54555+++ b/fs/ntfs/dir.c
54556@@ -1329,7 +1329,7 @@ find_next_index_buffer:
54557 ia = (INDEX_ALLOCATION*)(kaddr + (ia_pos & ~PAGE_CACHE_MASK &
54558 ~(s64)(ndir->itype.index.block_size - 1)));
54559 /* Bounds checks. */
54560- if (unlikely((u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54561+ if (unlikely(!kaddr || (u8*)ia < kaddr || (u8*)ia > kaddr + PAGE_CACHE_SIZE)) {
54562 ntfs_error(sb, "Out of bounds check failed. Corrupt directory "
54563 "inode 0x%lx or driver bug.", vdir->i_ino);
54564 goto err_out;
54565diff --git a/fs/ntfs/file.c b/fs/ntfs/file.c
54566index c5670b8..01a3656 100644
54567--- a/fs/ntfs/file.c
54568+++ b/fs/ntfs/file.c
54569@@ -2241,6 +2241,6 @@ const struct inode_operations ntfs_file_inode_ops = {
54570 #endif /* NTFS_RW */
54571 };
54572
54573-const struct file_operations ntfs_empty_file_ops = {};
54574+const struct file_operations ntfs_empty_file_ops __read_only;
54575
54576-const struct inode_operations ntfs_empty_inode_ops = {};
54577+const struct inode_operations ntfs_empty_inode_ops __read_only;
54578diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
54579index aebeacd..0dcdd26 100644
54580--- a/fs/ocfs2/localalloc.c
54581+++ b/fs/ocfs2/localalloc.c
54582@@ -1278,7 +1278,7 @@ static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
54583 goto bail;
54584 }
54585
54586- atomic_inc(&osb->alloc_stats.moves);
54587+ atomic_inc_unchecked(&osb->alloc_stats.moves);
54588
54589 bail:
54590 if (handle)
54591diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
54592index d355e6e..578d905 100644
54593--- a/fs/ocfs2/ocfs2.h
54594+++ b/fs/ocfs2/ocfs2.h
54595@@ -235,11 +235,11 @@ enum ocfs2_vol_state
54596
54597 struct ocfs2_alloc_stats
54598 {
54599- atomic_t moves;
54600- atomic_t local_data;
54601- atomic_t bitmap_data;
54602- atomic_t bg_allocs;
54603- atomic_t bg_extends;
54604+ atomic_unchecked_t moves;
54605+ atomic_unchecked_t local_data;
54606+ atomic_unchecked_t bitmap_data;
54607+ atomic_unchecked_t bg_allocs;
54608+ atomic_unchecked_t bg_extends;
54609 };
54610
54611 enum ocfs2_local_alloc_state
54612diff --git a/fs/ocfs2/suballoc.c b/fs/ocfs2/suballoc.c
54613index b7e74b5..19c6536 100644
54614--- a/fs/ocfs2/suballoc.c
54615+++ b/fs/ocfs2/suballoc.c
54616@@ -872,7 +872,7 @@ static int ocfs2_reserve_suballoc_bits(struct ocfs2_super *osb,
54617 mlog_errno(status);
54618 goto bail;
54619 }
54620- atomic_inc(&osb->alloc_stats.bg_extends);
54621+ atomic_inc_unchecked(&osb->alloc_stats.bg_extends);
54622
54623 /* You should never ask for this much metadata */
54624 BUG_ON(bits_wanted >
54625@@ -2007,7 +2007,7 @@ int ocfs2_claim_metadata(handle_t *handle,
54626 mlog_errno(status);
54627 goto bail;
54628 }
54629- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54630+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54631
54632 *suballoc_loc = res.sr_bg_blkno;
54633 *suballoc_bit_start = res.sr_bit_offset;
54634@@ -2171,7 +2171,7 @@ int ocfs2_claim_new_inode_at_loc(handle_t *handle,
54635 trace_ocfs2_claim_new_inode_at_loc((unsigned long long)di_blkno,
54636 res->sr_bits);
54637
54638- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54639+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54640
54641 BUG_ON(res->sr_bits != 1);
54642
54643@@ -2213,7 +2213,7 @@ int ocfs2_claim_new_inode(handle_t *handle,
54644 mlog_errno(status);
54645 goto bail;
54646 }
54647- atomic_inc(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54648+ atomic_inc_unchecked(&OCFS2_SB(ac->ac_inode->i_sb)->alloc_stats.bg_allocs);
54649
54650 BUG_ON(res.sr_bits != 1);
54651
54652@@ -2317,7 +2317,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54653 cluster_start,
54654 num_clusters);
54655 if (!status)
54656- atomic_inc(&osb->alloc_stats.local_data);
54657+ atomic_inc_unchecked(&osb->alloc_stats.local_data);
54658 } else {
54659 if (min_clusters > (osb->bitmap_cpg - 1)) {
54660 /* The only paths asking for contiguousness
54661@@ -2343,7 +2343,7 @@ int __ocfs2_claim_clusters(handle_t *handle,
54662 ocfs2_desc_bitmap_to_cluster_off(ac->ac_inode,
54663 res.sr_bg_blkno,
54664 res.sr_bit_offset);
54665- atomic_inc(&osb->alloc_stats.bitmap_data);
54666+ atomic_inc_unchecked(&osb->alloc_stats.bitmap_data);
54667 *num_clusters = res.sr_bits;
54668 }
54669 }
54670diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
54671index 01b8516..579c4df 100644
54672--- a/fs/ocfs2/super.c
54673+++ b/fs/ocfs2/super.c
54674@@ -301,11 +301,11 @@ static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
54675 "%10s => GlobalAllocs: %d LocalAllocs: %d "
54676 "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
54677 "Stats",
54678- atomic_read(&osb->alloc_stats.bitmap_data),
54679- atomic_read(&osb->alloc_stats.local_data),
54680- atomic_read(&osb->alloc_stats.bg_allocs),
54681- atomic_read(&osb->alloc_stats.moves),
54682- atomic_read(&osb->alloc_stats.bg_extends));
54683+ atomic_read_unchecked(&osb->alloc_stats.bitmap_data),
54684+ atomic_read_unchecked(&osb->alloc_stats.local_data),
54685+ atomic_read_unchecked(&osb->alloc_stats.bg_allocs),
54686+ atomic_read_unchecked(&osb->alloc_stats.moves),
54687+ atomic_read_unchecked(&osb->alloc_stats.bg_extends));
54688
54689 out += snprintf(buf + out, len - out,
54690 "%10s => State: %u Descriptor: %llu Size: %u bits "
54691@@ -2122,11 +2122,11 @@ static int ocfs2_initialize_super(struct super_block *sb,
54692 spin_lock_init(&osb->osb_xattr_lock);
54693 ocfs2_init_steal_slots(osb);
54694
54695- atomic_set(&osb->alloc_stats.moves, 0);
54696- atomic_set(&osb->alloc_stats.local_data, 0);
54697- atomic_set(&osb->alloc_stats.bitmap_data, 0);
54698- atomic_set(&osb->alloc_stats.bg_allocs, 0);
54699- atomic_set(&osb->alloc_stats.bg_extends, 0);
54700+ atomic_set_unchecked(&osb->alloc_stats.moves, 0);
54701+ atomic_set_unchecked(&osb->alloc_stats.local_data, 0);
54702+ atomic_set_unchecked(&osb->alloc_stats.bitmap_data, 0);
54703+ atomic_set_unchecked(&osb->alloc_stats.bg_allocs, 0);
54704+ atomic_set_unchecked(&osb->alloc_stats.bg_extends, 0);
54705
54706 /* Copy the blockcheck stats from the superblock probe */
54707 osb->osb_ecc_stats = *stats;
54708diff --git a/fs/open.c b/fs/open.c
54709index 8c74100..4239c48 100644
54710--- a/fs/open.c
54711+++ b/fs/open.c
54712@@ -32,6 +32,8 @@
54713 #include <linux/dnotify.h>
54714 #include <linux/compat.h>
54715
54716+#define CREATE_TRACE_POINTS
54717+#include <trace/events/fs.h>
54718 #include "internal.h"
54719
54720 int do_truncate(struct dentry *dentry, loff_t length, unsigned int time_attrs,
54721@@ -102,6 +104,8 @@ long vfs_truncate(struct path *path, loff_t length)
54722 error = locks_verify_truncate(inode, NULL, length);
54723 if (!error)
54724 error = security_path_truncate(path);
54725+ if (!error && !gr_acl_handle_truncate(path->dentry, path->mnt))
54726+ error = -EACCES;
54727 if (!error)
54728 error = do_truncate(path->dentry, length, 0, NULL);
54729
54730@@ -186,6 +190,8 @@ static long do_sys_ftruncate(unsigned int fd, loff_t length, int small)
54731 error = locks_verify_truncate(inode, f.file, length);
54732 if (!error)
54733 error = security_path_truncate(&f.file->f_path);
54734+ if (!error && !gr_acl_handle_truncate(f.file->f_path.dentry, f.file->f_path.mnt))
54735+ error = -EACCES;
54736 if (!error)
54737 error = do_truncate(dentry, length, ATTR_MTIME|ATTR_CTIME, f.file);
54738 sb_end_write(inode->i_sb);
54739@@ -360,6 +366,9 @@ retry:
54740 if (__mnt_is_readonly(path.mnt))
54741 res = -EROFS;
54742
54743+ if (!res && !gr_acl_handle_access(path.dentry, path.mnt, mode))
54744+ res = -EACCES;
54745+
54746 out_path_release:
54747 path_put(&path);
54748 if (retry_estale(res, lookup_flags)) {
54749@@ -391,6 +400,8 @@ retry:
54750 if (error)
54751 goto dput_and_out;
54752
54753+ gr_log_chdir(path.dentry, path.mnt);
54754+
54755 set_fs_pwd(current->fs, &path);
54756
54757 dput_and_out:
54758@@ -420,6 +431,13 @@ SYSCALL_DEFINE1(fchdir, unsigned int, fd)
54759 goto out_putf;
54760
54761 error = inode_permission(inode, MAY_EXEC | MAY_CHDIR);
54762+
54763+ if (!error && !gr_chroot_fchdir(f.file->f_path.dentry, f.file->f_path.mnt))
54764+ error = -EPERM;
54765+
54766+ if (!error)
54767+ gr_log_chdir(f.file->f_path.dentry, f.file->f_path.mnt);
54768+
54769 if (!error)
54770 set_fs_pwd(current->fs, &f.file->f_path);
54771 out_putf:
54772@@ -449,7 +467,13 @@ retry:
54773 if (error)
54774 goto dput_and_out;
54775
54776+ if (gr_handle_chroot_chroot(path.dentry, path.mnt))
54777+ goto dput_and_out;
54778+
54779 set_fs_root(current->fs, &path);
54780+
54781+ gr_handle_chroot_chdir(&path);
54782+
54783 error = 0;
54784 dput_and_out:
54785 path_put(&path);
54786@@ -471,6 +495,16 @@ static int chmod_common(struct path *path, umode_t mode)
54787 if (error)
54788 return error;
54789 mutex_lock(&inode->i_mutex);
54790+
54791+ if (!gr_acl_handle_chmod(path->dentry, path->mnt, &mode)) {
54792+ error = -EACCES;
54793+ goto out_unlock;
54794+ }
54795+ if (gr_handle_chroot_chmod(path->dentry, path->mnt, mode)) {
54796+ error = -EACCES;
54797+ goto out_unlock;
54798+ }
54799+
54800 error = security_path_chmod(path, mode);
54801 if (error)
54802 goto out_unlock;
54803@@ -531,6 +565,9 @@ static int chown_common(struct path *path, uid_t user, gid_t group)
54804 uid = make_kuid(current_user_ns(), user);
54805 gid = make_kgid(current_user_ns(), group);
54806
54807+ if (!gr_acl_handle_chown(path->dentry, path->mnt))
54808+ return -EACCES;
54809+
54810 newattrs.ia_valid = ATTR_CTIME;
54811 if (user != (uid_t) -1) {
54812 if (!uid_valid(uid))
54813@@ -946,6 +983,7 @@ long do_sys_open(int dfd, const char __user *filename, int flags, umode_t mode)
54814 } else {
54815 fsnotify_open(f);
54816 fd_install(fd, f);
54817+ trace_do_sys_open(tmp->name, flags, mode);
54818 }
54819 }
54820 putname(tmp);
54821diff --git a/fs/pipe.c b/fs/pipe.c
54822index d2c45e1..009fe1c 100644
54823--- a/fs/pipe.c
54824+++ b/fs/pipe.c
54825@@ -56,7 +56,7 @@ unsigned int pipe_min_size = PAGE_SIZE;
54826
54827 static void pipe_lock_nested(struct pipe_inode_info *pipe, int subclass)
54828 {
54829- if (pipe->files)
54830+ if (atomic_read(&pipe->files))
54831 mutex_lock_nested(&pipe->mutex, subclass);
54832 }
54833
54834@@ -71,7 +71,7 @@ EXPORT_SYMBOL(pipe_lock);
54835
54836 void pipe_unlock(struct pipe_inode_info *pipe)
54837 {
54838- if (pipe->files)
54839+ if (atomic_read(&pipe->files))
54840 mutex_unlock(&pipe->mutex);
54841 }
54842 EXPORT_SYMBOL(pipe_unlock);
54843@@ -449,9 +449,9 @@ redo:
54844 }
54845 if (bufs) /* More to do? */
54846 continue;
54847- if (!pipe->writers)
54848+ if (!atomic_read(&pipe->writers))
54849 break;
54850- if (!pipe->waiting_writers) {
54851+ if (!atomic_read(&pipe->waiting_writers)) {
54852 /* syscall merging: Usually we must not sleep
54853 * if O_NONBLOCK is set, or if we got some data.
54854 * But if a writer sleeps in kernel space, then
54855@@ -513,7 +513,7 @@ pipe_write(struct kiocb *iocb, const struct iovec *_iov,
54856 ret = 0;
54857 __pipe_lock(pipe);
54858
54859- if (!pipe->readers) {
54860+ if (!atomic_read(&pipe->readers)) {
54861 send_sig(SIGPIPE, current, 0);
54862 ret = -EPIPE;
54863 goto out;
54864@@ -562,7 +562,7 @@ redo1:
54865 for (;;) {
54866 int bufs;
54867
54868- if (!pipe->readers) {
54869+ if (!atomic_read(&pipe->readers)) {
54870 send_sig(SIGPIPE, current, 0);
54871 if (!ret)
54872 ret = -EPIPE;
54873@@ -653,9 +653,9 @@ redo2:
54874 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54875 do_wakeup = 0;
54876 }
54877- pipe->waiting_writers++;
54878+ atomic_inc(&pipe->waiting_writers);
54879 pipe_wait(pipe);
54880- pipe->waiting_writers--;
54881+ atomic_dec(&pipe->waiting_writers);
54882 }
54883 out:
54884 __pipe_unlock(pipe);
54885@@ -709,7 +709,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54886 mask = 0;
54887 if (filp->f_mode & FMODE_READ) {
54888 mask = (nrbufs > 0) ? POLLIN | POLLRDNORM : 0;
54889- if (!pipe->writers && filp->f_version != pipe->w_counter)
54890+ if (!atomic_read(&pipe->writers) && filp->f_version != pipe->w_counter)
54891 mask |= POLLHUP;
54892 }
54893
54894@@ -719,7 +719,7 @@ pipe_poll(struct file *filp, poll_table *wait)
54895 * Most Unices do not set POLLERR for FIFOs but on Linux they
54896 * behave exactly like pipes for poll().
54897 */
54898- if (!pipe->readers)
54899+ if (!atomic_read(&pipe->readers))
54900 mask |= POLLERR;
54901 }
54902
54903@@ -734,17 +734,17 @@ pipe_release(struct inode *inode, struct file *file)
54904
54905 __pipe_lock(pipe);
54906 if (file->f_mode & FMODE_READ)
54907- pipe->readers--;
54908+ atomic_dec(&pipe->readers);
54909 if (file->f_mode & FMODE_WRITE)
54910- pipe->writers--;
54911+ atomic_dec(&pipe->writers);
54912
54913- if (pipe->readers || pipe->writers) {
54914+ if (atomic_read(&pipe->readers) || atomic_read(&pipe->writers)) {
54915 wake_up_interruptible_sync_poll(&pipe->wait, POLLIN | POLLOUT | POLLRDNORM | POLLWRNORM | POLLERR | POLLHUP);
54916 kill_fasync(&pipe->fasync_readers, SIGIO, POLL_IN);
54917 kill_fasync(&pipe->fasync_writers, SIGIO, POLL_OUT);
54918 }
54919 spin_lock(&inode->i_lock);
54920- if (!--pipe->files) {
54921+ if (atomic_dec_and_test(&pipe->files)) {
54922 inode->i_pipe = NULL;
54923 kill = 1;
54924 }
54925@@ -811,7 +811,7 @@ void free_pipe_info(struct pipe_inode_info *pipe)
54926 kfree(pipe);
54927 }
54928
54929-static struct vfsmount *pipe_mnt __read_mostly;
54930+struct vfsmount *pipe_mnt __read_mostly;
54931
54932 /*
54933 * pipefs_dname() is called from d_path().
54934@@ -841,8 +841,9 @@ static struct inode * get_pipe_inode(void)
54935 goto fail_iput;
54936
54937 inode->i_pipe = pipe;
54938- pipe->files = 2;
54939- pipe->readers = pipe->writers = 1;
54940+ atomic_set(&pipe->files, 2);
54941+ atomic_set(&pipe->readers, 1);
54942+ atomic_set(&pipe->writers, 1);
54943 inode->i_fop = &pipefifo_fops;
54944
54945 /*
54946@@ -1022,17 +1023,17 @@ static int fifo_open(struct inode *inode, struct file *filp)
54947 spin_lock(&inode->i_lock);
54948 if (inode->i_pipe) {
54949 pipe = inode->i_pipe;
54950- pipe->files++;
54951+ atomic_inc(&pipe->files);
54952 spin_unlock(&inode->i_lock);
54953 } else {
54954 spin_unlock(&inode->i_lock);
54955 pipe = alloc_pipe_info();
54956 if (!pipe)
54957 return -ENOMEM;
54958- pipe->files = 1;
54959+ atomic_set(&pipe->files, 1);
54960 spin_lock(&inode->i_lock);
54961 if (unlikely(inode->i_pipe)) {
54962- inode->i_pipe->files++;
54963+ atomic_inc(&inode->i_pipe->files);
54964 spin_unlock(&inode->i_lock);
54965 free_pipe_info(pipe);
54966 pipe = inode->i_pipe;
54967@@ -1057,10 +1058,10 @@ static int fifo_open(struct inode *inode, struct file *filp)
54968 * opened, even when there is no process writing the FIFO.
54969 */
54970 pipe->r_counter++;
54971- if (pipe->readers++ == 0)
54972+ if (atomic_inc_return(&pipe->readers) == 1)
54973 wake_up_partner(pipe);
54974
54975- if (!is_pipe && !pipe->writers) {
54976+ if (!is_pipe && !atomic_read(&pipe->writers)) {
54977 if ((filp->f_flags & O_NONBLOCK)) {
54978 /* suppress POLLHUP until we have
54979 * seen a writer */
54980@@ -1079,14 +1080,14 @@ static int fifo_open(struct inode *inode, struct file *filp)
54981 * errno=ENXIO when there is no process reading the FIFO.
54982 */
54983 ret = -ENXIO;
54984- if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !pipe->readers)
54985+ if (!is_pipe && (filp->f_flags & O_NONBLOCK) && !atomic_read(&pipe->readers))
54986 goto err;
54987
54988 pipe->w_counter++;
54989- if (!pipe->writers++)
54990+ if (atomic_inc_return(&pipe->writers) == 1)
54991 wake_up_partner(pipe);
54992
54993- if (!is_pipe && !pipe->readers) {
54994+ if (!is_pipe && !atomic_read(&pipe->readers)) {
54995 if (wait_for_partner(pipe, &pipe->r_counter))
54996 goto err_wr;
54997 }
54998@@ -1100,11 +1101,11 @@ static int fifo_open(struct inode *inode, struct file *filp)
54999 * the process can at least talk to itself.
55000 */
55001
55002- pipe->readers++;
55003- pipe->writers++;
55004+ atomic_inc(&pipe->readers);
55005+ atomic_inc(&pipe->writers);
55006 pipe->r_counter++;
55007 pipe->w_counter++;
55008- if (pipe->readers == 1 || pipe->writers == 1)
55009+ if (atomic_read(&pipe->readers) == 1 || atomic_read(&pipe->writers) == 1)
55010 wake_up_partner(pipe);
55011 break;
55012
55013@@ -1118,20 +1119,20 @@ static int fifo_open(struct inode *inode, struct file *filp)
55014 return 0;
55015
55016 err_rd:
55017- if (!--pipe->readers)
55018+ if (atomic_dec_and_test(&pipe->readers))
55019 wake_up_interruptible(&pipe->wait);
55020 ret = -ERESTARTSYS;
55021 goto err;
55022
55023 err_wr:
55024- if (!--pipe->writers)
55025+ if (atomic_dec_and_test(&pipe->writers))
55026 wake_up_interruptible(&pipe->wait);
55027 ret = -ERESTARTSYS;
55028 goto err;
55029
55030 err:
55031 spin_lock(&inode->i_lock);
55032- if (!--pipe->files) {
55033+ if (atomic_dec_and_test(&pipe->files)) {
55034 inode->i_pipe = NULL;
55035 kill = 1;
55036 }
55037diff --git a/fs/proc/Kconfig b/fs/proc/Kconfig
55038index 15af622..0e9f4467 100644
55039--- a/fs/proc/Kconfig
55040+++ b/fs/proc/Kconfig
55041@@ -30,12 +30,12 @@ config PROC_FS
55042
55043 config PROC_KCORE
55044 bool "/proc/kcore support" if !ARM
55045- depends on PROC_FS && MMU
55046+ depends on PROC_FS && MMU && !GRKERNSEC_PROC_ADD
55047
55048 config PROC_VMCORE
55049 bool "/proc/vmcore support"
55050- depends on PROC_FS && CRASH_DUMP
55051- default y
55052+ depends on PROC_FS && CRASH_DUMP && !GRKERNSEC
55053+ default n
55054 help
55055 Exports the dump image of crashed kernel in ELF format.
55056
55057@@ -59,8 +59,8 @@ config PROC_SYSCTL
55058 limited in memory.
55059
55060 config PROC_PAGE_MONITOR
55061- default y
55062- depends on PROC_FS && MMU
55063+ default n
55064+ depends on PROC_FS && MMU && !GRKERNSEC
55065 bool "Enable /proc page monitoring" if EXPERT
55066 help
55067 Various /proc files exist to monitor process memory utilization:
55068diff --git a/fs/proc/array.c b/fs/proc/array.c
55069index cbd0f1b..adec3f0 100644
55070--- a/fs/proc/array.c
55071+++ b/fs/proc/array.c
55072@@ -60,6 +60,7 @@
55073 #include <linux/tty.h>
55074 #include <linux/string.h>
55075 #include <linux/mman.h>
55076+#include <linux/grsecurity.h>
55077 #include <linux/proc_fs.h>
55078 #include <linux/ioport.h>
55079 #include <linux/uaccess.h>
55080@@ -363,6 +364,21 @@ static void task_cpus_allowed(struct seq_file *m, struct task_struct *task)
55081 seq_putc(m, '\n');
55082 }
55083
55084+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55085+static inline void task_pax(struct seq_file *m, struct task_struct *p)
55086+{
55087+ if (p->mm)
55088+ seq_printf(m, "PaX:\t%c%c%c%c%c\n",
55089+ p->mm->pax_flags & MF_PAX_PAGEEXEC ? 'P' : 'p',
55090+ p->mm->pax_flags & MF_PAX_EMUTRAMP ? 'E' : 'e',
55091+ p->mm->pax_flags & MF_PAX_MPROTECT ? 'M' : 'm',
55092+ p->mm->pax_flags & MF_PAX_RANDMMAP ? 'R' : 'r',
55093+ p->mm->pax_flags & MF_PAX_SEGMEXEC ? 'S' : 's');
55094+ else
55095+ seq_printf(m, "PaX:\t-----\n");
55096+}
55097+#endif
55098+
55099 int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55100 struct pid *pid, struct task_struct *task)
55101 {
55102@@ -381,9 +397,24 @@ int proc_pid_status(struct seq_file *m, struct pid_namespace *ns,
55103 task_cpus_allowed(m, task);
55104 cpuset_task_status_allowed(m, task);
55105 task_context_switch_counts(m, task);
55106+
55107+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
55108+ task_pax(m, task);
55109+#endif
55110+
55111+#if defined(CONFIG_GRKERNSEC) && !defined(CONFIG_GRKERNSEC_NO_RBAC)
55112+ task_grsec_rbac(m, task);
55113+#endif
55114+
55115 return 0;
55116 }
55117
55118+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55119+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55120+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55121+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55122+#endif
55123+
55124 static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55125 struct pid *pid, struct task_struct *task, int whole)
55126 {
55127@@ -405,6 +436,13 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55128 char tcomm[sizeof(task->comm)];
55129 unsigned long flags;
55130
55131+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55132+ if (current->exec_id != m->exec_id) {
55133+ gr_log_badprocpid("stat");
55134+ return 0;
55135+ }
55136+#endif
55137+
55138 state = *get_task_state(task);
55139 vsize = eip = esp = 0;
55140 permitted = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55141@@ -476,6 +514,19 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55142 gtime = task_gtime(task);
55143 }
55144
55145+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55146+ if (PAX_RAND_FLAGS(mm)) {
55147+ eip = 0;
55148+ esp = 0;
55149+ wchan = 0;
55150+ }
55151+#endif
55152+#ifdef CONFIG_GRKERNSEC_HIDESYM
55153+ wchan = 0;
55154+ eip =0;
55155+ esp =0;
55156+#endif
55157+
55158 /* scale priority and nice values from timeslices to -20..20 */
55159 /* to make it look like a "normal" Unix priority/nice value */
55160 priority = task_prio(task);
55161@@ -512,9 +563,15 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55162 seq_put_decimal_ull(m, ' ', vsize);
55163 seq_put_decimal_ull(m, ' ', mm ? get_mm_rss(mm) : 0);
55164 seq_put_decimal_ull(m, ' ', rsslim);
55165+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55166+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->start_code : 1) : 0));
55167+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 1 : (mm ? (permitted ? mm->end_code : 1) : 0));
55168+ seq_put_decimal_ull(m, ' ', PAX_RAND_FLAGS(mm) ? 0 : ((permitted && mm) ? mm->start_stack : 0));
55169+#else
55170 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->start_code : 1) : 0);
55171 seq_put_decimal_ull(m, ' ', mm ? (permitted ? mm->end_code : 1) : 0);
55172 seq_put_decimal_ull(m, ' ', (permitted && mm) ? mm->start_stack : 0);
55173+#endif
55174 seq_put_decimal_ull(m, ' ', esp);
55175 seq_put_decimal_ull(m, ' ', eip);
55176 /* The signal information here is obsolete.
55177@@ -536,7 +593,11 @@ static int do_task_stat(struct seq_file *m, struct pid_namespace *ns,
55178 seq_put_decimal_ull(m, ' ', cputime_to_clock_t(gtime));
55179 seq_put_decimal_ll(m, ' ', cputime_to_clock_t(cgtime));
55180
55181- if (mm && permitted) {
55182+ if (mm && permitted
55183+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55184+ && !PAX_RAND_FLAGS(mm)
55185+#endif
55186+ ) {
55187 seq_put_decimal_ull(m, ' ', mm->start_data);
55188 seq_put_decimal_ull(m, ' ', mm->end_data);
55189 seq_put_decimal_ull(m, ' ', mm->start_brk);
55190@@ -574,8 +635,15 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55191 struct pid *pid, struct task_struct *task)
55192 {
55193 unsigned long size = 0, resident = 0, shared = 0, text = 0, data = 0;
55194- struct mm_struct *mm = get_task_mm(task);
55195+ struct mm_struct *mm;
55196
55197+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55198+ if (current->exec_id != m->exec_id) {
55199+ gr_log_badprocpid("statm");
55200+ return 0;
55201+ }
55202+#endif
55203+ mm = get_task_mm(task);
55204 if (mm) {
55205 size = task_statm(mm, &shared, &text, &data, &resident);
55206 mmput(mm);
55207@@ -598,6 +666,13 @@ int proc_pid_statm(struct seq_file *m, struct pid_namespace *ns,
55208 return 0;
55209 }
55210
55211+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55212+int proc_pid_ipaddr(struct task_struct *task, char *buffer)
55213+{
55214+ return sprintf(buffer, "%pI4\n", &task->signal->curr_ip);
55215+}
55216+#endif
55217+
55218 #ifdef CONFIG_CHECKPOINT_RESTORE
55219 static struct pid *
55220 get_children_pid(struct inode *inode, struct pid *pid_prev, loff_t pos)
55221diff --git a/fs/proc/base.c b/fs/proc/base.c
55222index c3834da..b402b2b 100644
55223--- a/fs/proc/base.c
55224+++ b/fs/proc/base.c
55225@@ -113,6 +113,14 @@ struct pid_entry {
55226 union proc_op op;
55227 };
55228
55229+struct getdents_callback {
55230+ struct linux_dirent __user * current_dir;
55231+ struct linux_dirent __user * previous;
55232+ struct file * file;
55233+ int count;
55234+ int error;
55235+};
55236+
55237 #define NOD(NAME, MODE, IOP, FOP, OP) { \
55238 .name = (NAME), \
55239 .len = sizeof(NAME) - 1, \
55240@@ -210,6 +218,9 @@ static int proc_pid_cmdline(struct task_struct *task, char * buffer)
55241 if (!mm->arg_end)
55242 goto out_mm; /* Shh! No looking before we're done */
55243
55244+ if (gr_acl_handle_procpidmem(task))
55245+ goto out_mm;
55246+
55247 len = mm->arg_end - mm->arg_start;
55248
55249 if (len > PAGE_SIZE)
55250@@ -237,12 +248,28 @@ out:
55251 return res;
55252 }
55253
55254+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55255+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
55256+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
55257+ _mm->pax_flags & MF_PAX_SEGMEXEC))
55258+#endif
55259+
55260 static int proc_pid_auxv(struct task_struct *task, char *buffer)
55261 {
55262 struct mm_struct *mm = mm_access(task, PTRACE_MODE_READ);
55263 int res = PTR_ERR(mm);
55264 if (mm && !IS_ERR(mm)) {
55265 unsigned int nwords = 0;
55266+
55267+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55268+ /* allow if we're currently ptracing this task */
55269+ if (PAX_RAND_FLAGS(mm) &&
55270+ (!(task->ptrace & PT_PTRACED) || (task->parent != current))) {
55271+ mmput(mm);
55272+ return 0;
55273+ }
55274+#endif
55275+
55276 do {
55277 nwords += 2;
55278 } while (mm->saved_auxv[nwords - 2] != 0); /* AT_NULL */
55279@@ -256,7 +283,7 @@ static int proc_pid_auxv(struct task_struct *task, char *buffer)
55280 }
55281
55282
55283-#ifdef CONFIG_KALLSYMS
55284+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55285 /*
55286 * Provides a wchan file via kallsyms in a proper one-value-per-file format.
55287 * Returns the resolved symbol. If that fails, simply return the address.
55288@@ -295,7 +322,7 @@ static void unlock_trace(struct task_struct *task)
55289 mutex_unlock(&task->signal->cred_guard_mutex);
55290 }
55291
55292-#ifdef CONFIG_STACKTRACE
55293+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55294
55295 #define MAX_STACK_TRACE_DEPTH 64
55296
55297@@ -518,7 +545,7 @@ static int proc_pid_limits(struct task_struct *task, char *buffer)
55298 return count;
55299 }
55300
55301-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55302+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55303 static int proc_pid_syscall(struct task_struct *task, char *buffer)
55304 {
55305 long nr;
55306@@ -547,7 +574,7 @@ static int proc_pid_syscall(struct task_struct *task, char *buffer)
55307 /************************************************************************/
55308
55309 /* permission checks */
55310-static int proc_fd_access_allowed(struct inode *inode)
55311+static int proc_fd_access_allowed(struct inode *inode, unsigned int log)
55312 {
55313 struct task_struct *task;
55314 int allowed = 0;
55315@@ -557,7 +584,10 @@ static int proc_fd_access_allowed(struct inode *inode)
55316 */
55317 task = get_proc_task(inode);
55318 if (task) {
55319- allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55320+ if (log)
55321+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
55322+ else
55323+ allowed = ptrace_may_access(task, PTRACE_MODE_READ | PTRACE_MODE_NOAUDIT);
55324 put_task_struct(task);
55325 }
55326 return allowed;
55327@@ -588,10 +618,35 @@ static bool has_pid_permissions(struct pid_namespace *pid,
55328 struct task_struct *task,
55329 int hide_pid_min)
55330 {
55331+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55332+ return false;
55333+
55334+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55335+ rcu_read_lock();
55336+ {
55337+ const struct cred *tmpcred = current_cred();
55338+ const struct cred *cred = __task_cred(task);
55339+
55340+ if (uid_eq(tmpcred->uid, GLOBAL_ROOT_UID) || uid_eq(tmpcred->uid, cred->uid)
55341+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55342+ || in_group_p(grsec_proc_gid)
55343+#endif
55344+ ) {
55345+ rcu_read_unlock();
55346+ return true;
55347+ }
55348+ }
55349+ rcu_read_unlock();
55350+
55351+ if (!pid->hide_pid)
55352+ return false;
55353+#endif
55354+
55355 if (pid->hide_pid < hide_pid_min)
55356 return true;
55357 if (in_group_p(pid->pid_gid))
55358 return true;
55359+
55360 return ptrace_may_access(task, PTRACE_MODE_READ);
55361 }
55362
55363@@ -609,7 +664,11 @@ static int proc_pid_permission(struct inode *inode, int mask)
55364 put_task_struct(task);
55365
55366 if (!has_perms) {
55367+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55368+ {
55369+#else
55370 if (pid->hide_pid == 2) {
55371+#endif
55372 /*
55373 * Let's make getdents(), stat(), and open()
55374 * consistent with each other. If a process
55375@@ -707,6 +766,11 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55376 if (!task)
55377 return -ESRCH;
55378
55379+ if (gr_acl_handle_procpidmem(task)) {
55380+ put_task_struct(task);
55381+ return -EPERM;
55382+ }
55383+
55384 mm = mm_access(task, mode);
55385 put_task_struct(task);
55386
55387@@ -722,6 +786,10 @@ static int __mem_open(struct inode *inode, struct file *file, unsigned int mode)
55388
55389 file->private_data = mm;
55390
55391+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55392+ file->f_version = current->exec_id;
55393+#endif
55394+
55395 return 0;
55396 }
55397
55398@@ -743,6 +811,17 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55399 ssize_t copied;
55400 char *page;
55401
55402+#ifdef CONFIG_GRKERNSEC
55403+ if (write)
55404+ return -EPERM;
55405+#endif
55406+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55407+ if (file->f_version != current->exec_id) {
55408+ gr_log_badprocpid("mem");
55409+ return 0;
55410+ }
55411+#endif
55412+
55413 if (!mm)
55414 return 0;
55415
55416@@ -755,7 +834,7 @@ static ssize_t mem_rw(struct file *file, char __user *buf,
55417 goto free;
55418
55419 while (count > 0) {
55420- int this_len = min_t(int, count, PAGE_SIZE);
55421+ ssize_t this_len = min_t(ssize_t, count, PAGE_SIZE);
55422
55423 if (write && copy_from_user(page, buf, this_len)) {
55424 copied = -EFAULT;
55425@@ -847,6 +926,13 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55426 if (!mm)
55427 return 0;
55428
55429+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
55430+ if (file->f_version != current->exec_id) {
55431+ gr_log_badprocpid("environ");
55432+ return 0;
55433+ }
55434+#endif
55435+
55436 page = (char *)__get_free_page(GFP_TEMPORARY);
55437 if (!page)
55438 return -ENOMEM;
55439@@ -856,7 +942,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
55440 goto free;
55441 while (count > 0) {
55442 size_t this_len, max_len;
55443- int retval;
55444+ ssize_t retval;
55445
55446 if (src >= (mm->env_end - mm->env_start))
55447 break;
55448@@ -1461,7 +1547,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
55449 int error = -EACCES;
55450
55451 /* Are we allowed to snoop on the tasks file descriptors? */
55452- if (!proc_fd_access_allowed(inode))
55453+ if (!proc_fd_access_allowed(inode, 0))
55454 goto out;
55455
55456 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55457@@ -1505,8 +1591,18 @@ static int proc_pid_readlink(struct dentry * dentry, char __user * buffer, int b
55458 struct path path;
55459
55460 /* Are we allowed to snoop on the tasks file descriptors? */
55461- if (!proc_fd_access_allowed(inode))
55462- goto out;
55463+ /* logging this is needed for learning on chromium to work properly,
55464+ but we don't want to flood the logs from 'ps' which does a readlink
55465+ on /proc/fd/2 of tasks in the listing, nor do we want 'ps' to learn
55466+ CAP_SYS_PTRACE as it's not necessary for its basic functionality
55467+ */
55468+ if (dentry->d_name.name[0] == '2' && dentry->d_name.name[1] == '\0') {
55469+ if (!proc_fd_access_allowed(inode,0))
55470+ goto out;
55471+ } else {
55472+ if (!proc_fd_access_allowed(inode,1))
55473+ goto out;
55474+ }
55475
55476 error = PROC_I(inode)->op.proc_get_link(dentry, &path);
55477 if (error)
55478@@ -1556,7 +1652,11 @@ struct inode *proc_pid_make_inode(struct super_block * sb, struct task_struct *t
55479 rcu_read_lock();
55480 cred = __task_cred(task);
55481 inode->i_uid = cred->euid;
55482+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55483+ inode->i_gid = grsec_proc_gid;
55484+#else
55485 inode->i_gid = cred->egid;
55486+#endif
55487 rcu_read_unlock();
55488 }
55489 security_task_to_inode(task, inode);
55490@@ -1592,10 +1692,19 @@ int pid_getattr(struct vfsmount *mnt, struct dentry *dentry, struct kstat *stat)
55491 return -ENOENT;
55492 }
55493 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55494+#ifdef CONFIG_GRKERNSEC_PROC_USER
55495+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55496+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55497+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55498+#endif
55499 task_dumpable(task)) {
55500 cred = __task_cred(task);
55501 stat->uid = cred->euid;
55502+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55503+ stat->gid = grsec_proc_gid;
55504+#else
55505 stat->gid = cred->egid;
55506+#endif
55507 }
55508 }
55509 rcu_read_unlock();
55510@@ -1633,11 +1742,20 @@ int pid_revalidate(struct dentry *dentry, unsigned int flags)
55511
55512 if (task) {
55513 if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
55514+#ifdef CONFIG_GRKERNSEC_PROC_USER
55515+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IXUSR)) ||
55516+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55517+ (inode->i_mode == (S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP)) ||
55518+#endif
55519 task_dumpable(task)) {
55520 rcu_read_lock();
55521 cred = __task_cred(task);
55522 inode->i_uid = cred->euid;
55523+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55524+ inode->i_gid = grsec_proc_gid;
55525+#else
55526 inode->i_gid = cred->egid;
55527+#endif
55528 rcu_read_unlock();
55529 } else {
55530 inode->i_uid = GLOBAL_ROOT_UID;
55531@@ -2196,6 +2314,9 @@ static struct dentry *proc_pident_lookup(struct inode *dir,
55532 if (!task)
55533 goto out_no_task;
55534
55535+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55536+ goto out;
55537+
55538 /*
55539 * Yes, it does not scale. And it should not. Don't add
55540 * new entries into /proc/<tgid>/ without very good reasons.
55541@@ -2240,6 +2361,9 @@ static int proc_pident_readdir(struct file *filp,
55542 if (!task)
55543 goto out_no_task;
55544
55545+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55546+ goto out;
55547+
55548 ret = 0;
55549 i = filp->f_pos;
55550 switch (i) {
55551@@ -2653,7 +2777,7 @@ static const struct pid_entry tgid_base_stuff[] = {
55552 REG("autogroup", S_IRUGO|S_IWUSR, proc_pid_sched_autogroup_operations),
55553 #endif
55554 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55555-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55556+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55557 INF("syscall", S_IRUGO, proc_pid_syscall),
55558 #endif
55559 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55560@@ -2678,10 +2802,10 @@ static const struct pid_entry tgid_base_stuff[] = {
55561 #ifdef CONFIG_SECURITY
55562 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55563 #endif
55564-#ifdef CONFIG_KALLSYMS
55565+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55566 INF("wchan", S_IRUGO, proc_pid_wchan),
55567 #endif
55568-#ifdef CONFIG_STACKTRACE
55569+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55570 ONE("stack", S_IRUGO, proc_pid_stack),
55571 #endif
55572 #ifdef CONFIG_SCHEDSTATS
55573@@ -2715,6 +2839,9 @@ static const struct pid_entry tgid_base_stuff[] = {
55574 #ifdef CONFIG_HARDWALL
55575 INF("hardwall", S_IRUGO, proc_pid_hardwall),
55576 #endif
55577+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55578+ INF("ipaddr", S_IRUSR, proc_pid_ipaddr),
55579+#endif
55580 #ifdef CONFIG_USER_NS
55581 REG("uid_map", S_IRUGO|S_IWUSR, proc_uid_map_operations),
55582 REG("gid_map", S_IRUGO|S_IWUSR, proc_gid_map_operations),
55583@@ -2847,7 +2974,14 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
55584 if (!inode)
55585 goto out;
55586
55587+#ifdef CONFIG_GRKERNSEC_PROC_USER
55588+ inode->i_mode = S_IFDIR|S_IRUSR|S_IXUSR;
55589+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55590+ inode->i_gid = grsec_proc_gid;
55591+ inode->i_mode = S_IFDIR|S_IRUSR|S_IRGRP|S_IXUSR|S_IXGRP;
55592+#else
55593 inode->i_mode = S_IFDIR|S_IRUGO|S_IXUGO;
55594+#endif
55595 inode->i_op = &proc_tgid_base_inode_operations;
55596 inode->i_fop = &proc_tgid_base_operations;
55597 inode->i_flags|=S_IMMUTABLE;
55598@@ -2885,7 +3019,11 @@ struct dentry *proc_pid_lookup(struct inode *dir, struct dentry * dentry, unsign
55599 if (!task)
55600 goto out;
55601
55602+ if (gr_pid_is_chrooted(task) || gr_check_hidden_task(task))
55603+ goto out_put_task;
55604+
55605 result = proc_pid_instantiate(dir, dentry, task, NULL);
55606+out_put_task:
55607 put_task_struct(task);
55608 out:
55609 return result;
55610@@ -2948,6 +3086,8 @@ static int proc_pid_fill_cache(struct file *filp, void *dirent, filldir_t filldi
55611 static int fake_filldir(void *buf, const char *name, int namelen,
55612 loff_t offset, u64 ino, unsigned d_type)
55613 {
55614+ struct getdents_callback * __buf = (struct getdents_callback *) buf;
55615+ __buf->error = -EINVAL;
55616 return 0;
55617 }
55618
55619@@ -3007,7 +3147,7 @@ static const struct pid_entry tid_base_stuff[] = {
55620 REG("sched", S_IRUGO|S_IWUSR, proc_pid_sched_operations),
55621 #endif
55622 REG("comm", S_IRUGO|S_IWUSR, proc_pid_set_comm_operations),
55623-#ifdef CONFIG_HAVE_ARCH_TRACEHOOK
55624+#if defined(CONFIG_HAVE_ARCH_TRACEHOOK) && !defined(CONFIG_GRKERNSEC_PROC_MEMMAP)
55625 INF("syscall", S_IRUGO, proc_pid_syscall),
55626 #endif
55627 INF("cmdline", S_IRUGO, proc_pid_cmdline),
55628@@ -3034,10 +3174,10 @@ static const struct pid_entry tid_base_stuff[] = {
55629 #ifdef CONFIG_SECURITY
55630 DIR("attr", S_IRUGO|S_IXUGO, proc_attr_dir_inode_operations, proc_attr_dir_operations),
55631 #endif
55632-#ifdef CONFIG_KALLSYMS
55633+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55634 INF("wchan", S_IRUGO, proc_pid_wchan),
55635 #endif
55636-#ifdef CONFIG_STACKTRACE
55637+#if defined(CONFIG_STACKTRACE) && !defined(CONFIG_GRKERNSEC_HIDESYM)
55638 ONE("stack", S_IRUGO, proc_pid_stack),
55639 #endif
55640 #ifdef CONFIG_SCHEDSTATS
55641diff --git a/fs/proc/cmdline.c b/fs/proc/cmdline.c
55642index 82676e3..5f8518a 100644
55643--- a/fs/proc/cmdline.c
55644+++ b/fs/proc/cmdline.c
55645@@ -23,7 +23,11 @@ static const struct file_operations cmdline_proc_fops = {
55646
55647 static int __init proc_cmdline_init(void)
55648 {
55649+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55650+ proc_create_grsec("cmdline", 0, NULL, &cmdline_proc_fops);
55651+#else
55652 proc_create("cmdline", 0, NULL, &cmdline_proc_fops);
55653+#endif
55654 return 0;
55655 }
55656 module_init(proc_cmdline_init);
55657diff --git a/fs/proc/devices.c b/fs/proc/devices.c
55658index b143471..bb105e5 100644
55659--- a/fs/proc/devices.c
55660+++ b/fs/proc/devices.c
55661@@ -64,7 +64,11 @@ static const struct file_operations proc_devinfo_operations = {
55662
55663 static int __init proc_devices_init(void)
55664 {
55665+#ifdef CONFIG_GRKERNSEC_PROC_ADD
55666+ proc_create_grsec("devices", 0, NULL, &proc_devinfo_operations);
55667+#else
55668 proc_create("devices", 0, NULL, &proc_devinfo_operations);
55669+#endif
55670 return 0;
55671 }
55672 module_init(proc_devices_init);
55673diff --git a/fs/proc/fd.c b/fs/proc/fd.c
55674index d7a4a28..0201742 100644
55675--- a/fs/proc/fd.c
55676+++ b/fs/proc/fd.c
55677@@ -25,7 +25,8 @@ static int seq_show(struct seq_file *m, void *v)
55678 if (!task)
55679 return -ENOENT;
55680
55681- files = get_files_struct(task);
55682+ if (!gr_acl_handle_procpidmem(task))
55683+ files = get_files_struct(task);
55684 put_task_struct(task);
55685
55686 if (files) {
55687@@ -302,11 +303,21 @@ static struct dentry *proc_lookupfd(struct inode *dir, struct dentry *dentry,
55688 */
55689 int proc_fd_permission(struct inode *inode, int mask)
55690 {
55691+ struct task_struct *task;
55692 int rv = generic_permission(inode, mask);
55693- if (rv == 0)
55694- return 0;
55695+
55696 if (task_pid(current) == proc_pid(inode))
55697 rv = 0;
55698+
55699+ task = get_proc_task(inode);
55700+ if (task == NULL)
55701+ return rv;
55702+
55703+ if (gr_acl_handle_procpidmem(task))
55704+ rv = -EACCES;
55705+
55706+ put_task_struct(task);
55707+
55708 return rv;
55709 }
55710
55711diff --git a/fs/proc/inode.c b/fs/proc/inode.c
55712index 073aea6..0630370 100644
55713--- a/fs/proc/inode.c
55714+++ b/fs/proc/inode.c
55715@@ -23,11 +23,17 @@
55716 #include <linux/slab.h>
55717 #include <linux/mount.h>
55718 #include <linux/magic.h>
55719+#include <linux/grsecurity.h>
55720
55721 #include <asm/uaccess.h>
55722
55723 #include "internal.h"
55724
55725+#ifdef CONFIG_PROC_SYSCTL
55726+extern const struct inode_operations proc_sys_inode_operations;
55727+extern const struct inode_operations proc_sys_dir_operations;
55728+#endif
55729+
55730 static void proc_evict_inode(struct inode *inode)
55731 {
55732 struct proc_dir_entry *de;
55733@@ -55,6 +61,13 @@ static void proc_evict_inode(struct inode *inode)
55734 ns = PROC_I(inode)->ns.ns;
55735 if (ns_ops && ns)
55736 ns_ops->put(ns);
55737+
55738+#ifdef CONFIG_PROC_SYSCTL
55739+ if (inode->i_op == &proc_sys_inode_operations ||
55740+ inode->i_op == &proc_sys_dir_operations)
55741+ gr_handle_delete(inode->i_ino, inode->i_sb->s_dev);
55742+#endif
55743+
55744 }
55745
55746 static struct kmem_cache * proc_inode_cachep;
55747@@ -385,7 +398,11 @@ struct inode *proc_get_inode(struct super_block *sb, struct proc_dir_entry *de)
55748 if (de->mode) {
55749 inode->i_mode = de->mode;
55750 inode->i_uid = de->uid;
55751+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
55752+ inode->i_gid = grsec_proc_gid;
55753+#else
55754 inode->i_gid = de->gid;
55755+#endif
55756 }
55757 if (de->size)
55758 inode->i_size = de->size;
55759diff --git a/fs/proc/internal.h b/fs/proc/internal.h
55760index d600fb0..3b495fe 100644
55761--- a/fs/proc/internal.h
55762+++ b/fs/proc/internal.h
55763@@ -155,6 +155,9 @@ extern int proc_pid_status(struct seq_file *, struct pid_namespace *,
55764 struct pid *, struct task_struct *);
55765 extern int proc_pid_statm(struct seq_file *, struct pid_namespace *,
55766 struct pid *, struct task_struct *);
55767+#ifdef CONFIG_GRKERNSEC_PROC_IPADDR
55768+extern int proc_pid_ipaddr(struct task_struct *task, char *buffer);
55769+#endif
55770
55771 /*
55772 * base.c
55773diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
55774index 0a22194..a9fc8c1 100644
55775--- a/fs/proc/kcore.c
55776+++ b/fs/proc/kcore.c
55777@@ -484,9 +484,10 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55778 * the addresses in the elf_phdr on our list.
55779 */
55780 start = kc_offset_to_vaddr(*fpos - elf_buflen);
55781- if ((tsz = (PAGE_SIZE - (start & ~PAGE_MASK))) > buflen)
55782+ tsz = PAGE_SIZE - (start & ~PAGE_MASK);
55783+ if (tsz > buflen)
55784 tsz = buflen;
55785-
55786+
55787 while (buflen) {
55788 struct kcore_list *m;
55789
55790@@ -515,20 +516,23 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55791 kfree(elf_buf);
55792 } else {
55793 if (kern_addr_valid(start)) {
55794- unsigned long n;
55795+ char *elf_buf;
55796+ mm_segment_t oldfs;
55797
55798- n = copy_to_user(buffer, (char *)start, tsz);
55799- /*
55800- * We cannot distinguish between fault on source
55801- * and fault on destination. When this happens
55802- * we clear too and hope it will trigger the
55803- * EFAULT again.
55804- */
55805- if (n) {
55806- if (clear_user(buffer + tsz - n,
55807- n))
55808+ elf_buf = kmalloc(tsz, GFP_KERNEL);
55809+ if (!elf_buf)
55810+ return -ENOMEM;
55811+ oldfs = get_fs();
55812+ set_fs(KERNEL_DS);
55813+ if (!__copy_from_user(elf_buf, (const void __user *)start, tsz)) {
55814+ set_fs(oldfs);
55815+ if (copy_to_user(buffer, elf_buf, tsz)) {
55816+ kfree(elf_buf);
55817 return -EFAULT;
55818+ }
55819 }
55820+ set_fs(oldfs);
55821+ kfree(elf_buf);
55822 } else {
55823 if (clear_user(buffer, tsz))
55824 return -EFAULT;
55825@@ -548,6 +552,9 @@ read_kcore(struct file *file, char __user *buffer, size_t buflen, loff_t *fpos)
55826
55827 static int open_kcore(struct inode *inode, struct file *filp)
55828 {
55829+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
55830+ return -EPERM;
55831+#endif
55832 if (!capable(CAP_SYS_RAWIO))
55833 return -EPERM;
55834 if (kcore_need_update)
55835diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
55836index 5aa847a..f77c8d4 100644
55837--- a/fs/proc/meminfo.c
55838+++ b/fs/proc/meminfo.c
55839@@ -159,7 +159,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
55840 vmi.used >> 10,
55841 vmi.largest_chunk >> 10
55842 #ifdef CONFIG_MEMORY_FAILURE
55843- ,atomic_long_read(&num_poisoned_pages) << (PAGE_SHIFT - 10)
55844+ ,atomic_long_read_unchecked(&num_poisoned_pages) << (PAGE_SHIFT - 10)
55845 #endif
55846 #ifdef CONFIG_TRANSPARENT_HUGEPAGE
55847 ,K(global_page_state(NR_ANON_TRANSPARENT_HUGEPAGES) *
55848diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c
55849index ccfd99b..1b7e255 100644
55850--- a/fs/proc/nommu.c
55851+++ b/fs/proc/nommu.c
55852@@ -66,7 +66,7 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region)
55853 if (len < 1)
55854 len = 1;
55855 seq_printf(m, "%*c", len, ' ');
55856- seq_path(m, &file->f_path, "");
55857+ seq_path(m, &file->f_path, "\n\\");
55858 }
55859
55860 seq_putc(m, '\n');
55861diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
55862index 986e832..6e8e859 100644
55863--- a/fs/proc/proc_net.c
55864+++ b/fs/proc/proc_net.c
55865@@ -23,6 +23,7 @@
55866 #include <linux/nsproxy.h>
55867 #include <net/net_namespace.h>
55868 #include <linux/seq_file.h>
55869+#include <linux/grsecurity.h>
55870
55871 #include "internal.h"
55872
55873@@ -109,6 +110,17 @@ static struct net *get_proc_task_net(struct inode *dir)
55874 struct task_struct *task;
55875 struct nsproxy *ns;
55876 struct net *net = NULL;
55877+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55878+ const struct cred *cred = current_cred();
55879+#endif
55880+
55881+#ifdef CONFIG_GRKERNSEC_PROC_USER
55882+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID))
55883+ return net;
55884+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
55885+ if (!uid_eq(cred->fsuid, GLOBAL_ROOT_UID) && !in_group_p(grsec_proc_gid))
55886+ return net;
55887+#endif
55888
55889 rcu_read_lock();
55890 task = pid_task(proc_pid(dir), PIDTYPE_PID);
55891diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
55892index ac05f33..1e6dc7e 100644
55893--- a/fs/proc/proc_sysctl.c
55894+++ b/fs/proc/proc_sysctl.c
55895@@ -13,11 +13,15 @@
55896 #include <linux/module.h>
55897 #include "internal.h"
55898
55899+extern int gr_handle_chroot_sysctl(const int op);
55900+extern int gr_handle_sysctl_mod(const char *dirname, const char *name,
55901+ const int op);
55902+
55903 static const struct dentry_operations proc_sys_dentry_operations;
55904 static const struct file_operations proc_sys_file_operations;
55905-static const struct inode_operations proc_sys_inode_operations;
55906+const struct inode_operations proc_sys_inode_operations;
55907 static const struct file_operations proc_sys_dir_file_operations;
55908-static const struct inode_operations proc_sys_dir_operations;
55909+const struct inode_operations proc_sys_dir_operations;
55910
55911 void proc_sys_poll_notify(struct ctl_table_poll *poll)
55912 {
55913@@ -467,6 +471,9 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
55914
55915 err = NULL;
55916 d_set_d_op(dentry, &proc_sys_dentry_operations);
55917+
55918+ gr_handle_proc_create(dentry, inode);
55919+
55920 d_add(dentry, inode);
55921
55922 out:
55923@@ -482,6 +489,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55924 struct inode *inode = file_inode(filp);
55925 struct ctl_table_header *head = grab_header(inode);
55926 struct ctl_table *table = PROC_I(inode)->sysctl_entry;
55927+ int op = write ? MAY_WRITE : MAY_READ;
55928 ssize_t error;
55929 size_t res;
55930
55931@@ -493,7 +501,7 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55932 * and won't be until we finish.
55933 */
55934 error = -EPERM;
55935- if (sysctl_perm(head, table, write ? MAY_WRITE : MAY_READ))
55936+ if (sysctl_perm(head, table, op))
55937 goto out;
55938
55939 /* if that can happen at all, it should be -EINVAL, not -EISDIR */
55940@@ -501,6 +509,22 @@ static ssize_t proc_sys_call_handler(struct file *filp, void __user *buf,
55941 if (!table->proc_handler)
55942 goto out;
55943
55944+#ifdef CONFIG_GRKERNSEC
55945+ error = -EPERM;
55946+ if (gr_handle_chroot_sysctl(op))
55947+ goto out;
55948+ dget(filp->f_path.dentry);
55949+ if (gr_handle_sysctl_mod(filp->f_path.dentry->d_parent->d_name.name, table->procname, op)) {
55950+ dput(filp->f_path.dentry);
55951+ goto out;
55952+ }
55953+ dput(filp->f_path.dentry);
55954+ if (!gr_acl_handle_open(filp->f_path.dentry, filp->f_path.mnt, op))
55955+ goto out;
55956+ if (write && !capable(CAP_SYS_ADMIN))
55957+ goto out;
55958+#endif
55959+
55960 /* careful: calling conventions are nasty here */
55961 res = count;
55962 error = table->proc_handler(table, write, buf, &res, ppos);
55963@@ -598,6 +622,9 @@ static int proc_sys_fill_cache(struct file *filp, void *dirent,
55964 return -ENOMEM;
55965 } else {
55966 d_set_d_op(child, &proc_sys_dentry_operations);
55967+
55968+ gr_handle_proc_create(child, inode);
55969+
55970 d_add(child, inode);
55971 }
55972 } else {
55973@@ -641,6 +668,9 @@ static int scan(struct ctl_table_header *head, ctl_table *table,
55974 if ((*pos)++ < file->f_pos)
55975 return 0;
55976
55977+ if (!gr_acl_handle_hidden_file(file->f_path.dentry, file->f_path.mnt))
55978+ return 0;
55979+
55980 if (unlikely(S_ISLNK(table->mode)))
55981 res = proc_sys_link_fill_cache(file, dirent, filldir, head, table);
55982 else
55983@@ -751,6 +781,9 @@ static int proc_sys_getattr(struct vfsmount *mnt, struct dentry *dentry, struct
55984 if (IS_ERR(head))
55985 return PTR_ERR(head);
55986
55987+ if (table && !gr_acl_handle_hidden_file(dentry, mnt))
55988+ return -ENOENT;
55989+
55990 generic_fillattr(inode, stat);
55991 if (table)
55992 stat->mode = (stat->mode & S_IFMT) | table->mode;
55993@@ -773,13 +806,13 @@ static const struct file_operations proc_sys_dir_file_operations = {
55994 .llseek = generic_file_llseek,
55995 };
55996
55997-static const struct inode_operations proc_sys_inode_operations = {
55998+const struct inode_operations proc_sys_inode_operations = {
55999 .permission = proc_sys_permission,
56000 .setattr = proc_sys_setattr,
56001 .getattr = proc_sys_getattr,
56002 };
56003
56004-static const struct inode_operations proc_sys_dir_operations = {
56005+const struct inode_operations proc_sys_dir_operations = {
56006 .lookup = proc_sys_lookup,
56007 .permission = proc_sys_permission,
56008 .setattr = proc_sys_setattr,
56009@@ -855,7 +888,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
56010 static struct ctl_dir *new_dir(struct ctl_table_set *set,
56011 const char *name, int namelen)
56012 {
56013- struct ctl_table *table;
56014+ ctl_table_no_const *table;
56015 struct ctl_dir *new;
56016 struct ctl_node *node;
56017 char *new_name;
56018@@ -867,7 +900,7 @@ static struct ctl_dir *new_dir(struct ctl_table_set *set,
56019 return NULL;
56020
56021 node = (struct ctl_node *)(new + 1);
56022- table = (struct ctl_table *)(node + 1);
56023+ table = (ctl_table_no_const *)(node + 1);
56024 new_name = (char *)(table + 2);
56025 memcpy(new_name, name, namelen);
56026 new_name[namelen] = '\0';
56027@@ -1036,7 +1069,8 @@ static int sysctl_check_table(const char *path, struct ctl_table *table)
56028 static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table *table,
56029 struct ctl_table_root *link_root)
56030 {
56031- struct ctl_table *link_table, *entry, *link;
56032+ ctl_table_no_const *link_table, *link;
56033+ struct ctl_table *entry;
56034 struct ctl_table_header *links;
56035 struct ctl_node *node;
56036 char *link_name;
56037@@ -1059,7 +1093,7 @@ static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table
56038 return NULL;
56039
56040 node = (struct ctl_node *)(links + 1);
56041- link_table = (struct ctl_table *)(node + nr_entries);
56042+ link_table = (ctl_table_no_const *)(node + nr_entries);
56043 link_name = (char *)&link_table[nr_entries + 1];
56044
56045 for (link = link_table, entry = table; entry->procname; link++, entry++) {
56046@@ -1307,8 +1341,8 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56047 struct ctl_table_header ***subheader, struct ctl_table_set *set,
56048 struct ctl_table *table)
56049 {
56050- struct ctl_table *ctl_table_arg = NULL;
56051- struct ctl_table *entry, *files;
56052+ ctl_table_no_const *ctl_table_arg = NULL, *files = NULL;
56053+ struct ctl_table *entry;
56054 int nr_files = 0;
56055 int nr_dirs = 0;
56056 int err = -ENOMEM;
56057@@ -1320,10 +1354,9 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56058 nr_files++;
56059 }
56060
56061- files = table;
56062 /* If there are mixed files and directories we need a new table */
56063 if (nr_dirs && nr_files) {
56064- struct ctl_table *new;
56065+ ctl_table_no_const *new;
56066 files = kzalloc(sizeof(struct ctl_table) * (nr_files + 1),
56067 GFP_KERNEL);
56068 if (!files)
56069@@ -1341,7 +1374,7 @@ static int register_leaf_sysctl_tables(const char *path, char *pos,
56070 /* Register everything except a directory full of subdirectories */
56071 if (nr_files || !nr_dirs) {
56072 struct ctl_table_header *header;
56073- header = __register_sysctl_table(set, path, files);
56074+ header = __register_sysctl_table(set, path, files ? files : table);
56075 if (!header) {
56076 kfree(ctl_table_arg);
56077 goto out;
56078diff --git a/fs/proc/root.c b/fs/proc/root.c
56079index 41a6ea9..23eaa92 100644
56080--- a/fs/proc/root.c
56081+++ b/fs/proc/root.c
56082@@ -182,7 +182,15 @@ void __init proc_root_init(void)
56083 #ifdef CONFIG_PROC_DEVICETREE
56084 proc_device_tree_init();
56085 #endif
56086+#ifdef CONFIG_GRKERNSEC_PROC_ADD
56087+#ifdef CONFIG_GRKERNSEC_PROC_USER
56088+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR, NULL);
56089+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
56090+ proc_mkdir_mode("bus", S_IRUSR | S_IXUSR | S_IRGRP | S_IXGRP, NULL);
56091+#endif
56092+#else
56093 proc_mkdir("bus", NULL);
56094+#endif
56095 proc_sys_init();
56096 }
56097
56098diff --git a/fs/proc/self.c b/fs/proc/self.c
56099index 6b6a993..807cccc 100644
56100--- a/fs/proc/self.c
56101+++ b/fs/proc/self.c
56102@@ -39,7 +39,7 @@ static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
56103 static void proc_self_put_link(struct dentry *dentry, struct nameidata *nd,
56104 void *cookie)
56105 {
56106- char *s = nd_get_link(nd);
56107+ const char *s = nd_get_link(nd);
56108 if (!IS_ERR(s))
56109 kfree(s);
56110 }
56111diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
56112index 3e636d8..83e3b71 100644
56113--- a/fs/proc/task_mmu.c
56114+++ b/fs/proc/task_mmu.c
56115@@ -11,12 +11,19 @@
56116 #include <linux/rmap.h>
56117 #include <linux/swap.h>
56118 #include <linux/swapops.h>
56119+#include <linux/grsecurity.h>
56120
56121 #include <asm/elf.h>
56122 #include <asm/uaccess.h>
56123 #include <asm/tlbflush.h>
56124 #include "internal.h"
56125
56126+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56127+#define PAX_RAND_FLAGS(_mm) (_mm != NULL && _mm != current->mm && \
56128+ (_mm->pax_flags & MF_PAX_RANDMMAP || \
56129+ _mm->pax_flags & MF_PAX_SEGMEXEC))
56130+#endif
56131+
56132 void task_mem(struct seq_file *m, struct mm_struct *mm)
56133 {
56134 unsigned long data, text, lib, swap;
56135@@ -52,8 +59,13 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56136 "VmExe:\t%8lu kB\n"
56137 "VmLib:\t%8lu kB\n"
56138 "VmPTE:\t%8lu kB\n"
56139- "VmSwap:\t%8lu kB\n",
56140- hiwater_vm << (PAGE_SHIFT-10),
56141+ "VmSwap:\t%8lu kB\n"
56142+
56143+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56144+ "CsBase:\t%8lx\nCsLim:\t%8lx\n"
56145+#endif
56146+
56147+ ,hiwater_vm << (PAGE_SHIFT-10),
56148 total_vm << (PAGE_SHIFT-10),
56149 mm->locked_vm << (PAGE_SHIFT-10),
56150 mm->pinned_vm << (PAGE_SHIFT-10),
56151@@ -62,7 +74,19 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56152 data << (PAGE_SHIFT-10),
56153 mm->stack_vm << (PAGE_SHIFT-10), text, lib,
56154 (PTRS_PER_PTE*sizeof(pte_t)*mm->nr_ptes) >> 10,
56155- swap << (PAGE_SHIFT-10));
56156+ swap << (PAGE_SHIFT-10)
56157+
56158+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
56159+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56160+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_base
56161+ , PAX_RAND_FLAGS(mm) ? 0 : mm->context.user_cs_limit
56162+#else
56163+ , mm->context.user_cs_base
56164+ , mm->context.user_cs_limit
56165+#endif
56166+#endif
56167+
56168+ );
56169 }
56170
56171 unsigned long task_vsize(struct mm_struct *mm)
56172@@ -277,13 +301,13 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56173 pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT;
56174 }
56175
56176- /* We don't show the stack guard page in /proc/maps */
56177+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56178+ start = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_start;
56179+ end = PAX_RAND_FLAGS(mm) ? 0UL : vma->vm_end;
56180+#else
56181 start = vma->vm_start;
56182- if (stack_guard_page_start(vma, start))
56183- start += PAGE_SIZE;
56184 end = vma->vm_end;
56185- if (stack_guard_page_end(vma, end))
56186- end -= PAGE_SIZE;
56187+#endif
56188
56189 seq_printf(m, "%08lx-%08lx %c%c%c%c %08llx %02x:%02x %lu %n",
56190 start,
56191@@ -292,7 +316,11 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56192 flags & VM_WRITE ? 'w' : '-',
56193 flags & VM_EXEC ? 'x' : '-',
56194 flags & VM_MAYSHARE ? 's' : 'p',
56195+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56196+ PAX_RAND_FLAGS(mm) ? 0UL : pgoff,
56197+#else
56198 pgoff,
56199+#endif
56200 MAJOR(dev), MINOR(dev), ino, &len);
56201
56202 /*
56203@@ -301,7 +329,7 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56204 */
56205 if (file) {
56206 pad_len_spaces(m, len);
56207- seq_path(m, &file->f_path, "\n");
56208+ seq_path(m, &file->f_path, "\n\\");
56209 goto done;
56210 }
56211
56212@@ -327,8 +355,9 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid)
56213 * Thread stack in /proc/PID/task/TID/maps or
56214 * the main process stack.
56215 */
56216- if (!is_pid || (vma->vm_start <= mm->start_stack &&
56217- vma->vm_end >= mm->start_stack)) {
56218+ if (!is_pid || (vma->vm_flags & (VM_GROWSDOWN | VM_GROWSUP)) ||
56219+ (vma->vm_start <= mm->start_stack &&
56220+ vma->vm_end >= mm->start_stack)) {
56221 name = "[stack]";
56222 } else {
56223 /* Thread stack in /proc/PID/maps */
56224@@ -352,6 +381,13 @@ static int show_map(struct seq_file *m, void *v, int is_pid)
56225 struct proc_maps_private *priv = m->private;
56226 struct task_struct *task = priv->task;
56227
56228+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56229+ if (current->exec_id != m->exec_id) {
56230+ gr_log_badprocpid("maps");
56231+ return 0;
56232+ }
56233+#endif
56234+
56235 show_map_vma(m, vma, is_pid);
56236
56237 if (m->count < m->size) /* vma is copied successfully */
56238@@ -589,12 +625,23 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56239 .private = &mss,
56240 };
56241
56242+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56243+ if (current->exec_id != m->exec_id) {
56244+ gr_log_badprocpid("smaps");
56245+ return 0;
56246+ }
56247+#endif
56248 memset(&mss, 0, sizeof mss);
56249- mss.vma = vma;
56250- /* mmap_sem is held in m_start */
56251- if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56252- walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56253-
56254+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56255+ if (!PAX_RAND_FLAGS(vma->vm_mm)) {
56256+#endif
56257+ mss.vma = vma;
56258+ /* mmap_sem is held in m_start */
56259+ if (vma->vm_mm && !is_vm_hugetlb_page(vma))
56260+ walk_page_range(vma->vm_start, vma->vm_end, &smaps_walk);
56261+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56262+ }
56263+#endif
56264 show_map_vma(m, vma, is_pid);
56265
56266 seq_printf(m,
56267@@ -612,7 +659,11 @@ static int show_smap(struct seq_file *m, void *v, int is_pid)
56268 "KernelPageSize: %8lu kB\n"
56269 "MMUPageSize: %8lu kB\n"
56270 "Locked: %8lu kB\n",
56271+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56272+ PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : (vma->vm_end - vma->vm_start) >> 10,
56273+#else
56274 (vma->vm_end - vma->vm_start) >> 10,
56275+#endif
56276 mss.resident >> 10,
56277 (unsigned long)(mss.pss >> (10 + PSS_SHIFT)),
56278 mss.shared_clean >> 10,
56279@@ -1264,6 +1315,13 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56280 int n;
56281 char buffer[50];
56282
56283+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56284+ if (current->exec_id != m->exec_id) {
56285+ gr_log_badprocpid("numa_maps");
56286+ return 0;
56287+ }
56288+#endif
56289+
56290 if (!mm)
56291 return 0;
56292
56293@@ -1281,11 +1339,15 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid)
56294 mpol_to_str(buffer, sizeof(buffer), pol);
56295 mpol_cond_put(pol);
56296
56297+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56298+ seq_printf(m, "%08lx %s", PAX_RAND_FLAGS(vma->vm_mm) ? 0UL : vma->vm_start, buffer);
56299+#else
56300 seq_printf(m, "%08lx %s", vma->vm_start, buffer);
56301+#endif
56302
56303 if (file) {
56304 seq_printf(m, " file=");
56305- seq_path(m, &file->f_path, "\n\t= ");
56306+ seq_path(m, &file->f_path, "\n\t\\= ");
56307 } else if (vma->vm_start <= mm->brk && vma->vm_end >= mm->start_brk) {
56308 seq_printf(m, " heap");
56309 } else {
56310diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
56311index 56123a6..5a2f6ec 100644
56312--- a/fs/proc/task_nommu.c
56313+++ b/fs/proc/task_nommu.c
56314@@ -51,7 +51,7 @@ void task_mem(struct seq_file *m, struct mm_struct *mm)
56315 else
56316 bytes += kobjsize(mm);
56317
56318- if (current->fs && current->fs->users > 1)
56319+ if (current->fs && atomic_read(&current->fs->users) > 1)
56320 sbytes += kobjsize(current->fs);
56321 else
56322 bytes += kobjsize(current->fs);
56323@@ -168,7 +168,7 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma,
56324
56325 if (file) {
56326 pad_len_spaces(m, len);
56327- seq_path(m, &file->f_path, "");
56328+ seq_path(m, &file->f_path, "\n\\");
56329 } else if (mm) {
56330 pid_t tid = vm_is_stack(priv->task, vma, is_pid);
56331
56332diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
56333index 17f7e08..e4b1529 100644
56334--- a/fs/proc/vmcore.c
56335+++ b/fs/proc/vmcore.c
56336@@ -99,9 +99,13 @@ static ssize_t read_from_oldmem(char *buf, size_t count,
56337 nr_bytes = count;
56338
56339 /* If pfn is not ram, return zeros for sparse dump files */
56340- if (pfn_is_ram(pfn) == 0)
56341- memset(buf, 0, nr_bytes);
56342- else {
56343+ if (pfn_is_ram(pfn) == 0) {
56344+ if (userbuf) {
56345+ if (clear_user((char __force_user *)buf, nr_bytes))
56346+ return -EFAULT;
56347+ } else
56348+ memset(buf, 0, nr_bytes);
56349+ } else {
56350 tmp = copy_oldmem_page(pfn, buf, nr_bytes,
56351 offset, userbuf);
56352 if (tmp < 0)
56353@@ -186,7 +190,7 @@ static ssize_t read_vmcore(struct file *file, char __user *buffer,
56354 if (tsz > nr_bytes)
56355 tsz = nr_bytes;
56356
56357- tmp = read_from_oldmem(buffer, tsz, &start, 1);
56358+ tmp = read_from_oldmem((char __force_kernel *)buffer, tsz, &start, 1);
56359 if (tmp < 0)
56360 return tmp;
56361 buflen -= tsz;
56362diff --git a/fs/qnx6/qnx6.h b/fs/qnx6/qnx6.h
56363index b00fcc9..e0c6381 100644
56364--- a/fs/qnx6/qnx6.h
56365+++ b/fs/qnx6/qnx6.h
56366@@ -74,7 +74,7 @@ enum {
56367 BYTESEX_BE,
56368 };
56369
56370-static inline __u64 fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56371+static inline __u64 __intentional_overflow(-1) fs64_to_cpu(struct qnx6_sb_info *sbi, __fs64 n)
56372 {
56373 if (sbi->s_bytesex == BYTESEX_LE)
56374 return le64_to_cpu((__force __le64)n);
56375@@ -90,7 +90,7 @@ static inline __fs64 cpu_to_fs64(struct qnx6_sb_info *sbi, __u64 n)
56376 return (__force __fs64)cpu_to_be64(n);
56377 }
56378
56379-static inline __u32 fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56380+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct qnx6_sb_info *sbi, __fs32 n)
56381 {
56382 if (sbi->s_bytesex == BYTESEX_LE)
56383 return le32_to_cpu((__force __le32)n);
56384diff --git a/fs/quota/netlink.c b/fs/quota/netlink.c
56385index 16e8abb..2dcf914 100644
56386--- a/fs/quota/netlink.c
56387+++ b/fs/quota/netlink.c
56388@@ -33,7 +33,7 @@ static struct genl_family quota_genl_family = {
56389 void quota_send_warning(struct kqid qid, dev_t dev,
56390 const char warntype)
56391 {
56392- static atomic_t seq;
56393+ static atomic_unchecked_t seq;
56394 struct sk_buff *skb;
56395 void *msg_head;
56396 int ret;
56397@@ -49,7 +49,7 @@ void quota_send_warning(struct kqid qid, dev_t dev,
56398 "VFS: Not enough memory to send quota warning.\n");
56399 return;
56400 }
56401- msg_head = genlmsg_put(skb, 0, atomic_add_return(1, &seq),
56402+ msg_head = genlmsg_put(skb, 0, atomic_add_return_unchecked(1, &seq),
56403 &quota_genl_family, 0, QUOTA_NL_C_WARNING);
56404 if (!msg_head) {
56405 printk(KERN_ERR
56406diff --git a/fs/read_write.c b/fs/read_write.c
56407index 2cefa41..c7e2fe0 100644
56408--- a/fs/read_write.c
56409+++ b/fs/read_write.c
56410@@ -411,7 +411,7 @@ ssize_t __kernel_write(struct file *file, const char *buf, size_t count, loff_t
56411
56412 old_fs = get_fs();
56413 set_fs(get_ds());
56414- p = (__force const char __user *)buf;
56415+ p = (const char __force_user *)buf;
56416 if (count > MAX_RW_COUNT)
56417 count = MAX_RW_COUNT;
56418 if (file->f_op->write)
56419diff --git a/fs/readdir.c b/fs/readdir.c
56420index fee38e0..12fdf47 100644
56421--- a/fs/readdir.c
56422+++ b/fs/readdir.c
56423@@ -17,6 +17,7 @@
56424 #include <linux/security.h>
56425 #include <linux/syscalls.h>
56426 #include <linux/unistd.h>
56427+#include <linux/namei.h>
56428
56429 #include <asm/uaccess.h>
56430
56431@@ -67,6 +68,7 @@ struct old_linux_dirent {
56432
56433 struct readdir_callback {
56434 struct old_linux_dirent __user * dirent;
56435+ struct file * file;
56436 int result;
56437 };
56438
56439@@ -84,6 +86,10 @@ static int fillonedir(void * __buf, const char * name, int namlen, loff_t offset
56440 buf->result = -EOVERFLOW;
56441 return -EOVERFLOW;
56442 }
56443+
56444+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56445+ return 0;
56446+
56447 buf->result++;
56448 dirent = buf->dirent;
56449 if (!access_ok(VERIFY_WRITE, dirent,
56450@@ -114,6 +120,7 @@ SYSCALL_DEFINE3(old_readdir, unsigned int, fd,
56451
56452 buf.result = 0;
56453 buf.dirent = dirent;
56454+ buf.file = f.file;
56455
56456 error = vfs_readdir(f.file, fillonedir, &buf);
56457 if (buf.result)
56458@@ -139,6 +146,7 @@ struct linux_dirent {
56459 struct getdents_callback {
56460 struct linux_dirent __user * current_dir;
56461 struct linux_dirent __user * previous;
56462+ struct file * file;
56463 int count;
56464 int error;
56465 };
56466@@ -160,6 +168,10 @@ static int filldir(void * __buf, const char * name, int namlen, loff_t offset,
56467 buf->error = -EOVERFLOW;
56468 return -EOVERFLOW;
56469 }
56470+
56471+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56472+ return 0;
56473+
56474 dirent = buf->previous;
56475 if (dirent) {
56476 if (__put_user(offset, &dirent->d_off))
56477@@ -205,6 +217,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56478 buf.previous = NULL;
56479 buf.count = count;
56480 buf.error = 0;
56481+ buf.file = f.file;
56482
56483 error = vfs_readdir(f.file, filldir, &buf);
56484 if (error >= 0)
56485@@ -223,6 +236,7 @@ SYSCALL_DEFINE3(getdents, unsigned int, fd,
56486 struct getdents_callback64 {
56487 struct linux_dirent64 __user * current_dir;
56488 struct linux_dirent64 __user * previous;
56489+ struct file *file;
56490 int count;
56491 int error;
56492 };
56493@@ -238,6 +252,10 @@ static int filldir64(void * __buf, const char * name, int namlen, loff_t offset,
56494 buf->error = -EINVAL; /* only used if we fail.. */
56495 if (reclen > buf->count)
56496 return -EINVAL;
56497+
56498+ if (!gr_acl_handle_filldir(buf->file, name, namlen, ino))
56499+ return 0;
56500+
56501 dirent = buf->previous;
56502 if (dirent) {
56503 if (__put_user(offset, &dirent->d_off))
56504@@ -283,6 +301,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56505
56506 buf.current_dir = dirent;
56507 buf.previous = NULL;
56508+ buf.file = f.file;
56509 buf.count = count;
56510 buf.error = 0;
56511
56512@@ -291,7 +310,7 @@ SYSCALL_DEFINE3(getdents64, unsigned int, fd,
56513 error = buf.error;
56514 lastdirent = buf.previous;
56515 if (lastdirent) {
56516- typeof(lastdirent->d_off) d_off = f.file->f_pos;
56517+ typeof(((struct linux_dirent64 *)0)->d_off) d_off = f.file->f_pos;
56518 if (__put_user(d_off, &lastdirent->d_off))
56519 error = -EFAULT;
56520 else
56521diff --git a/fs/reiserfs/do_balan.c b/fs/reiserfs/do_balan.c
56522index 2b7882b..1c5ef48 100644
56523--- a/fs/reiserfs/do_balan.c
56524+++ b/fs/reiserfs/do_balan.c
56525@@ -2051,7 +2051,7 @@ void do_balance(struct tree_balance *tb, /* tree_balance structure */
56526 return;
56527 }
56528
56529- atomic_inc(&(fs_generation(tb->tb_sb)));
56530+ atomic_inc_unchecked(&(fs_generation(tb->tb_sb)));
56531 do_balance_starts(tb);
56532
56533 /* balance leaf returns 0 except if combining L R and S into
56534diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
56535index 33532f7..4846ade 100644
56536--- a/fs/reiserfs/procfs.c
56537+++ b/fs/reiserfs/procfs.c
56538@@ -112,7 +112,7 @@ static int show_super(struct seq_file *m, struct super_block *sb)
56539 "SMALL_TAILS " : "NO_TAILS ",
56540 replay_only(sb) ? "REPLAY_ONLY " : "",
56541 convert_reiserfs(sb) ? "CONV " : "",
56542- atomic_read(&r->s_generation_counter),
56543+ atomic_read_unchecked(&r->s_generation_counter),
56544 SF(s_disk_reads), SF(s_disk_writes), SF(s_fix_nodes),
56545 SF(s_do_balance), SF(s_unneeded_left_neighbor),
56546 SF(s_good_search_by_key_reada), SF(s_bmaps),
56547diff --git a/fs/reiserfs/reiserfs.h b/fs/reiserfs/reiserfs.h
56548index 157e474..65a6114 100644
56549--- a/fs/reiserfs/reiserfs.h
56550+++ b/fs/reiserfs/reiserfs.h
56551@@ -453,7 +453,7 @@ struct reiserfs_sb_info {
56552 /* Comment? -Hans */
56553 wait_queue_head_t s_wait;
56554 /* To be obsoleted soon by per buffer seals.. -Hans */
56555- atomic_t s_generation_counter; // increased by one every time the
56556+ atomic_unchecked_t s_generation_counter; // increased by one every time the
56557 // tree gets re-balanced
56558 unsigned long s_properties; /* File system properties. Currently holds
56559 on-disk FS format */
56560@@ -1978,7 +1978,7 @@ static inline loff_t max_reiserfs_offset(struct inode *inode)
56561 #define REISERFS_USER_MEM 1 /* reiserfs user memory mode */
56562
56563 #define fs_generation(s) (REISERFS_SB(s)->s_generation_counter)
56564-#define get_generation(s) atomic_read (&fs_generation(s))
56565+#define get_generation(s) atomic_read_unchecked (&fs_generation(s))
56566 #define FILESYSTEM_CHANGED_TB(tb) (get_generation((tb)->tb_sb) != (tb)->fs_gen)
56567 #define __fs_changed(gen,s) (gen != get_generation (s))
56568 #define fs_changed(gen,s) \
56569diff --git a/fs/select.c b/fs/select.c
56570index 8c1c96c..a0f9b6d 100644
56571--- a/fs/select.c
56572+++ b/fs/select.c
56573@@ -20,6 +20,7 @@
56574 #include <linux/export.h>
56575 #include <linux/slab.h>
56576 #include <linux/poll.h>
56577+#include <linux/security.h>
56578 #include <linux/personality.h> /* for STICKY_TIMEOUTS */
56579 #include <linux/file.h>
56580 #include <linux/fdtable.h>
56581@@ -827,6 +828,7 @@ int do_sys_poll(struct pollfd __user *ufds, unsigned int nfds,
56582 struct poll_list *walk = head;
56583 unsigned long todo = nfds;
56584
56585+ gr_learn_resource(current, RLIMIT_NOFILE, nfds, 1);
56586 if (nfds > rlimit(RLIMIT_NOFILE))
56587 return -EINVAL;
56588
56589diff --git a/fs/seq_file.c b/fs/seq_file.c
56590index 774c1eb..b67582a 100644
56591--- a/fs/seq_file.c
56592+++ b/fs/seq_file.c
56593@@ -10,6 +10,7 @@
56594 #include <linux/seq_file.h>
56595 #include <linux/slab.h>
56596 #include <linux/cred.h>
56597+#include <linux/sched.h>
56598
56599 #include <asm/uaccess.h>
56600 #include <asm/page.h>
56601@@ -60,6 +61,9 @@ int seq_open(struct file *file, const struct seq_operations *op)
56602 #ifdef CONFIG_USER_NS
56603 p->user_ns = file->f_cred->user_ns;
56604 #endif
56605+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
56606+ p->exec_id = current->exec_id;
56607+#endif
56608
56609 /*
56610 * Wrappers around seq_open(e.g. swaps_open) need to be
56611@@ -96,7 +100,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56612 return 0;
56613 }
56614 if (!m->buf) {
56615- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56616+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56617 if (!m->buf)
56618 return -ENOMEM;
56619 }
56620@@ -136,7 +140,7 @@ static int traverse(struct seq_file *m, loff_t offset)
56621 Eoverflow:
56622 m->op->stop(m, p);
56623 kfree(m->buf);
56624- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56625+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56626 return !m->buf ? -ENOMEM : -EAGAIN;
56627 }
56628
56629@@ -191,7 +195,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56630
56631 /* grab buffer if we didn't have one */
56632 if (!m->buf) {
56633- m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL);
56634+ m->buf = kmalloc(m->size = PAGE_SIZE, GFP_KERNEL | GFP_USERCOPY);
56635 if (!m->buf)
56636 goto Enomem;
56637 }
56638@@ -232,7 +236,7 @@ ssize_t seq_read(struct file *file, char __user *buf, size_t size, loff_t *ppos)
56639 goto Fill;
56640 m->op->stop(m, p);
56641 kfree(m->buf);
56642- m->buf = kmalloc(m->size <<= 1, GFP_KERNEL);
56643+ m->buf = kmalloc(m->size <<= 1, GFP_KERNEL | GFP_USERCOPY);
56644 if (!m->buf)
56645 goto Enomem;
56646 m->count = 0;
56647@@ -581,7 +585,7 @@ static void single_stop(struct seq_file *p, void *v)
56648 int single_open(struct file *file, int (*show)(struct seq_file *, void *),
56649 void *data)
56650 {
56651- struct seq_operations *op = kmalloc(sizeof(*op), GFP_KERNEL);
56652+ seq_operations_no_const *op = kzalloc(sizeof(*op), GFP_KERNEL);
56653 int res = -ENOMEM;
56654
56655 if (op) {
56656diff --git a/fs/splice.c b/fs/splice.c
56657index d37431d..81c3044 100644
56658--- a/fs/splice.c
56659+++ b/fs/splice.c
56660@@ -196,7 +196,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56661 pipe_lock(pipe);
56662
56663 for (;;) {
56664- if (!pipe->readers) {
56665+ if (!atomic_read(&pipe->readers)) {
56666 send_sig(SIGPIPE, current, 0);
56667 if (!ret)
56668 ret = -EPIPE;
56669@@ -219,7 +219,7 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56670 page_nr++;
56671 ret += buf->len;
56672
56673- if (pipe->files)
56674+ if (atomic_read(&pipe->files))
56675 do_wakeup = 1;
56676
56677 if (!--spd->nr_pages)
56678@@ -250,9 +250,9 @@ ssize_t splice_to_pipe(struct pipe_inode_info *pipe,
56679 do_wakeup = 0;
56680 }
56681
56682- pipe->waiting_writers++;
56683+ atomic_inc(&pipe->waiting_writers);
56684 pipe_wait(pipe);
56685- pipe->waiting_writers--;
56686+ atomic_dec(&pipe->waiting_writers);
56687 }
56688
56689 pipe_unlock(pipe);
56690@@ -565,7 +565,7 @@ static ssize_t kernel_readv(struct file *file, const struct iovec *vec,
56691 old_fs = get_fs();
56692 set_fs(get_ds());
56693 /* The cast to a user pointer is valid due to the set_fs() */
56694- res = vfs_readv(file, (const struct iovec __user *)vec, vlen, &pos);
56695+ res = vfs_readv(file, (const struct iovec __force_user *)vec, vlen, &pos);
56696 set_fs(old_fs);
56697
56698 return res;
56699@@ -580,7 +580,7 @@ ssize_t kernel_write(struct file *file, const char *buf, size_t count,
56700 old_fs = get_fs();
56701 set_fs(get_ds());
56702 /* The cast to a user pointer is valid due to the set_fs() */
56703- res = vfs_write(file, (__force const char __user *)buf, count, &pos);
56704+ res = vfs_write(file, (const char __force_user *)buf, count, &pos);
56705 set_fs(old_fs);
56706
56707 return res;
56708@@ -633,7 +633,7 @@ ssize_t default_file_splice_read(struct file *in, loff_t *ppos,
56709 goto err;
56710
56711 this_len = min_t(size_t, len, PAGE_CACHE_SIZE - offset);
56712- vec[i].iov_base = (void __user *) page_address(page);
56713+ vec[i].iov_base = (void __force_user *) page_address(page);
56714 vec[i].iov_len = this_len;
56715 spd.pages[i] = page;
56716 spd.nr_pages++;
56717@@ -829,7 +829,7 @@ int splice_from_pipe_feed(struct pipe_inode_info *pipe, struct splice_desc *sd,
56718 ops->release(pipe, buf);
56719 pipe->curbuf = (pipe->curbuf + 1) & (pipe->buffers - 1);
56720 pipe->nrbufs--;
56721- if (pipe->files)
56722+ if (atomic_read(&pipe->files))
56723 sd->need_wakeup = true;
56724 }
56725
56726@@ -854,10 +854,10 @@ EXPORT_SYMBOL(splice_from_pipe_feed);
56727 int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_desc *sd)
56728 {
56729 while (!pipe->nrbufs) {
56730- if (!pipe->writers)
56731+ if (!atomic_read(&pipe->writers))
56732 return 0;
56733
56734- if (!pipe->waiting_writers && sd->num_spliced)
56735+ if (!atomic_read(&pipe->waiting_writers) && sd->num_spliced)
56736 return 0;
56737
56738 if (sd->flags & SPLICE_F_NONBLOCK)
56739@@ -1193,7 +1193,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
56740 * out of the pipe right after the splice_to_pipe(). So set
56741 * PIPE_READERS appropriately.
56742 */
56743- pipe->readers = 1;
56744+ atomic_set(&pipe->readers, 1);
56745
56746 current->splice_pipe = pipe;
56747 }
56748@@ -1769,9 +1769,9 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56749 ret = -ERESTARTSYS;
56750 break;
56751 }
56752- if (!pipe->writers)
56753+ if (!atomic_read(&pipe->writers))
56754 break;
56755- if (!pipe->waiting_writers) {
56756+ if (!atomic_read(&pipe->waiting_writers)) {
56757 if (flags & SPLICE_F_NONBLOCK) {
56758 ret = -EAGAIN;
56759 break;
56760@@ -1803,7 +1803,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56761 pipe_lock(pipe);
56762
56763 while (pipe->nrbufs >= pipe->buffers) {
56764- if (!pipe->readers) {
56765+ if (!atomic_read(&pipe->readers)) {
56766 send_sig(SIGPIPE, current, 0);
56767 ret = -EPIPE;
56768 break;
56769@@ -1816,9 +1816,9 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags)
56770 ret = -ERESTARTSYS;
56771 break;
56772 }
56773- pipe->waiting_writers++;
56774+ atomic_inc(&pipe->waiting_writers);
56775 pipe_wait(pipe);
56776- pipe->waiting_writers--;
56777+ atomic_dec(&pipe->waiting_writers);
56778 }
56779
56780 pipe_unlock(pipe);
56781@@ -1854,14 +1854,14 @@ retry:
56782 pipe_double_lock(ipipe, opipe);
56783
56784 do {
56785- if (!opipe->readers) {
56786+ if (!atomic_read(&opipe->readers)) {
56787 send_sig(SIGPIPE, current, 0);
56788 if (!ret)
56789 ret = -EPIPE;
56790 break;
56791 }
56792
56793- if (!ipipe->nrbufs && !ipipe->writers)
56794+ if (!ipipe->nrbufs && !atomic_read(&ipipe->writers))
56795 break;
56796
56797 /*
56798@@ -1958,7 +1958,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56799 pipe_double_lock(ipipe, opipe);
56800
56801 do {
56802- if (!opipe->readers) {
56803+ if (!atomic_read(&opipe->readers)) {
56804 send_sig(SIGPIPE, current, 0);
56805 if (!ret)
56806 ret = -EPIPE;
56807@@ -2003,7 +2003,7 @@ static int link_pipe(struct pipe_inode_info *ipipe,
56808 * return EAGAIN if we have the potential of some data in the
56809 * future, otherwise just return 0
56810 */
56811- if (!ret && ipipe->waiting_writers && (flags & SPLICE_F_NONBLOCK))
56812+ if (!ret && atomic_read(&ipipe->waiting_writers) && (flags & SPLICE_F_NONBLOCK))
56813 ret = -EAGAIN;
56814
56815 pipe_unlock(ipipe);
56816diff --git a/fs/stat.c b/fs/stat.c
56817index 04ce1ac..a13dd1e 100644
56818--- a/fs/stat.c
56819+++ b/fs/stat.c
56820@@ -28,8 +28,13 @@ void generic_fillattr(struct inode *inode, struct kstat *stat)
56821 stat->gid = inode->i_gid;
56822 stat->rdev = inode->i_rdev;
56823 stat->size = i_size_read(inode);
56824- stat->atime = inode->i_atime;
56825- stat->mtime = inode->i_mtime;
56826+ if (is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56827+ stat->atime = inode->i_ctime;
56828+ stat->mtime = inode->i_ctime;
56829+ } else {
56830+ stat->atime = inode->i_atime;
56831+ stat->mtime = inode->i_mtime;
56832+ }
56833 stat->ctime = inode->i_ctime;
56834 stat->blksize = (1 << inode->i_blkbits);
56835 stat->blocks = inode->i_blocks;
56836@@ -46,8 +51,14 @@ int vfs_getattr(struct path *path, struct kstat *stat)
56837 if (retval)
56838 return retval;
56839
56840- if (inode->i_op->getattr)
56841- return inode->i_op->getattr(path->mnt, path->dentry, stat);
56842+ if (inode->i_op->getattr) {
56843+ retval = inode->i_op->getattr(path->mnt, path->dentry, stat);
56844+ if (!retval && is_sidechannel_device(inode) && !capable_nolog(CAP_MKNOD)) {
56845+ stat->atime = stat->ctime;
56846+ stat->mtime = stat->ctime;
56847+ }
56848+ return retval;
56849+ }
56850
56851 generic_fillattr(inode, stat);
56852 return 0;
56853diff --git a/fs/super.c b/fs/super.c
56854index 7465d43..68307c0 100644
56855--- a/fs/super.c
56856+++ b/fs/super.c
56857@@ -336,19 +336,19 @@ EXPORT_SYMBOL(deactivate_super);
56858 * and want to turn it into a full-blown active reference. grab_super()
56859 * is called with sb_lock held and drops it. Returns 1 in case of
56860 * success, 0 if we had failed (superblock contents was already dead or
56861- * dying when grab_super() had been called).
56862+ * dying when grab_super() had been called). Note that this is only
56863+ * called for superblocks not in rundown mode (== ones still on ->fs_supers
56864+ * of their type), so increment of ->s_count is OK here.
56865 */
56866 static int grab_super(struct super_block *s) __releases(sb_lock)
56867 {
56868- if (atomic_inc_not_zero(&s->s_active)) {
56869- spin_unlock(&sb_lock);
56870- return 1;
56871- }
56872- /* it's going away */
56873 s->s_count++;
56874 spin_unlock(&sb_lock);
56875- /* wait for it to die */
56876 down_write(&s->s_umount);
56877+ if ((s->s_flags & MS_BORN) && atomic_inc_not_zero(&s->s_active)) {
56878+ put_super(s);
56879+ return 1;
56880+ }
56881 up_write(&s->s_umount);
56882 put_super(s);
56883 return 0;
56884@@ -463,11 +463,6 @@ retry:
56885 destroy_super(s);
56886 s = NULL;
56887 }
56888- down_write(&old->s_umount);
56889- if (unlikely(!(old->s_flags & MS_BORN))) {
56890- deactivate_locked_super(old);
56891- goto retry;
56892- }
56893 return old;
56894 }
56895 }
56896@@ -660,10 +655,10 @@ restart:
56897 if (hlist_unhashed(&sb->s_instances))
56898 continue;
56899 if (sb->s_bdev == bdev) {
56900- if (grab_super(sb)) /* drops sb_lock */
56901- return sb;
56902- else
56903+ if (!grab_super(sb))
56904 goto restart;
56905+ up_write(&sb->s_umount);
56906+ return sb;
56907 }
56908 }
56909 spin_unlock(&sb_lock);
56910diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
56911index 15c68f9..36a8b3e 100644
56912--- a/fs/sysfs/bin.c
56913+++ b/fs/sysfs/bin.c
56914@@ -235,13 +235,13 @@ static int bin_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf)
56915 return ret;
56916 }
56917
56918-static int bin_access(struct vm_area_struct *vma, unsigned long addr,
56919- void *buf, int len, int write)
56920+static ssize_t bin_access(struct vm_area_struct *vma, unsigned long addr,
56921+ void *buf, size_t len, int write)
56922 {
56923 struct file *file = vma->vm_file;
56924 struct bin_buffer *bb = file->private_data;
56925 struct sysfs_dirent *attr_sd = file->f_path.dentry->d_fsdata;
56926- int ret;
56927+ ssize_t ret;
56928
56929 if (!bb->vm_ops)
56930 return -EINVAL;
56931diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
56932index e8e0e71..79c28ac5 100644
56933--- a/fs/sysfs/dir.c
56934+++ b/fs/sysfs/dir.c
56935@@ -40,7 +40,7 @@ static DEFINE_IDA(sysfs_ino_ida);
56936 *
56937 * Returns 31 bit hash of ns + name (so it fits in an off_t )
56938 */
56939-static unsigned int sysfs_name_hash(const void *ns, const char *name)
56940+static unsigned int sysfs_name_hash(const void *ns, const unsigned char *name)
56941 {
56942 unsigned long hash = init_name_hash();
56943 unsigned int len = strlen(name);
56944@@ -679,6 +679,18 @@ static int create_dir(struct kobject *kobj, struct sysfs_dirent *parent_sd,
56945 struct sysfs_dirent *sd;
56946 int rc;
56947
56948+#ifdef CONFIG_GRKERNSEC_SYSFS_RESTRICT
56949+ const char *parent_name = parent_sd->s_name;
56950+
56951+ mode = S_IFDIR | S_IRWXU;
56952+
56953+ if ((!strcmp(parent_name, "") && (!strcmp(name, "devices") || !strcmp(name, "fs"))) ||
56954+ (!strcmp(parent_name, "devices") && !strcmp(name, "system")) ||
56955+ (!strcmp(parent_name, "fs") && (!strcmp(name, "selinux") || !strcmp(name, "fuse") || !strcmp(name, "ecryptfs"))) ||
56956+ (!strcmp(parent_name, "system") && !strcmp(name, "cpu")))
56957+ mode = S_IFDIR | S_IRWXU | S_IRUGO | S_IXUGO;
56958+#endif
56959+
56960 /* allocate */
56961 sd = sysfs_new_dirent(name, mode, SYSFS_DIR);
56962 if (!sd)
56963diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
56964index 602f56d..6853db8 100644
56965--- a/fs/sysfs/file.c
56966+++ b/fs/sysfs/file.c
56967@@ -37,7 +37,7 @@ static DEFINE_SPINLOCK(sysfs_open_dirent_lock);
56968
56969 struct sysfs_open_dirent {
56970 atomic_t refcnt;
56971- atomic_t event;
56972+ atomic_unchecked_t event;
56973 wait_queue_head_t poll;
56974 struct list_head buffers; /* goes through sysfs_buffer.list */
56975 };
56976@@ -81,7 +81,7 @@ static int fill_read_buffer(struct dentry * dentry, struct sysfs_buffer * buffer
56977 if (!sysfs_get_active(attr_sd))
56978 return -ENODEV;
56979
56980- buffer->event = atomic_read(&attr_sd->s_attr.open->event);
56981+ buffer->event = atomic_read_unchecked(&attr_sd->s_attr.open->event);
56982 count = ops->show(kobj, attr_sd->s_attr.attr, buffer->page);
56983
56984 sysfs_put_active(attr_sd);
56985@@ -287,7 +287,7 @@ static int sysfs_get_open_dirent(struct sysfs_dirent *sd,
56986 return -ENOMEM;
56987
56988 atomic_set(&new_od->refcnt, 0);
56989- atomic_set(&new_od->event, 1);
56990+ atomic_set_unchecked(&new_od->event, 1);
56991 init_waitqueue_head(&new_od->poll);
56992 INIT_LIST_HEAD(&new_od->buffers);
56993 goto retry;
56994@@ -432,7 +432,7 @@ static unsigned int sysfs_poll(struct file *filp, poll_table *wait)
56995
56996 sysfs_put_active(attr_sd);
56997
56998- if (buffer->event != atomic_read(&od->event))
56999+ if (buffer->event != atomic_read_unchecked(&od->event))
57000 goto trigger;
57001
57002 return DEFAULT_POLLMASK;
57003@@ -451,7 +451,7 @@ void sysfs_notify_dirent(struct sysfs_dirent *sd)
57004
57005 od = sd->s_attr.open;
57006 if (od) {
57007- atomic_inc(&od->event);
57008+ atomic_inc_unchecked(&od->event);
57009 wake_up_interruptible(&od->poll);
57010 }
57011
57012diff --git a/fs/sysfs/symlink.c b/fs/sysfs/symlink.c
57013index 8c940df..25b733e 100644
57014--- a/fs/sysfs/symlink.c
57015+++ b/fs/sysfs/symlink.c
57016@@ -305,7 +305,7 @@ static void *sysfs_follow_link(struct dentry *dentry, struct nameidata *nd)
57017
57018 static void sysfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
57019 {
57020- char *page = nd_get_link(nd);
57021+ const char *page = nd_get_link(nd);
57022 if (!IS_ERR(page))
57023 free_page((unsigned long)page);
57024 }
57025diff --git a/fs/sysv/sysv.h b/fs/sysv/sysv.h
57026index 69d4889..a810bd4 100644
57027--- a/fs/sysv/sysv.h
57028+++ b/fs/sysv/sysv.h
57029@@ -188,7 +188,7 @@ static inline u32 PDP_swab(u32 x)
57030 #endif
57031 }
57032
57033-static inline __u32 fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
57034+static inline __u32 __intentional_overflow(-1) fs32_to_cpu(struct sysv_sb_info *sbi, __fs32 n)
57035 {
57036 if (sbi->s_bytesex == BYTESEX_PDP)
57037 return PDP_swab((__force __u32)n);
57038diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
57039index e18b988..f1d4ad0f 100644
57040--- a/fs/ubifs/io.c
57041+++ b/fs/ubifs/io.c
57042@@ -155,7 +155,7 @@ int ubifs_leb_change(struct ubifs_info *c, int lnum, const void *buf, int len)
57043 return err;
57044 }
57045
57046-int ubifs_leb_unmap(struct ubifs_info *c, int lnum)
57047+int __intentional_overflow(-1) ubifs_leb_unmap(struct ubifs_info *c, int lnum)
57048 {
57049 int err;
57050
57051diff --git a/fs/udf/misc.c b/fs/udf/misc.c
57052index c175b4d..8f36a16 100644
57053--- a/fs/udf/misc.c
57054+++ b/fs/udf/misc.c
57055@@ -289,7 +289,7 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
57056
57057 u8 udf_tag_checksum(const struct tag *t)
57058 {
57059- u8 *data = (u8 *)t;
57060+ const u8 *data = (const u8 *)t;
57061 u8 checksum = 0;
57062 int i;
57063 for (i = 0; i < sizeof(struct tag); ++i)
57064diff --git a/fs/ufs/swab.h b/fs/ufs/swab.h
57065index 8d974c4..b82f6ec 100644
57066--- a/fs/ufs/swab.h
57067+++ b/fs/ufs/swab.h
57068@@ -22,7 +22,7 @@ enum {
57069 BYTESEX_BE
57070 };
57071
57072-static inline u64
57073+static inline u64 __intentional_overflow(-1)
57074 fs64_to_cpu(struct super_block *sbp, __fs64 n)
57075 {
57076 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
57077@@ -40,7 +40,7 @@ cpu_to_fs64(struct super_block *sbp, u64 n)
57078 return (__force __fs64)cpu_to_be64(n);
57079 }
57080
57081-static inline u32
57082+static inline u32 __intentional_overflow(-1)
57083 fs32_to_cpu(struct super_block *sbp, __fs32 n)
57084 {
57085 if (UFS_SB(sbp)->s_bytesex == BYTESEX_LE)
57086diff --git a/fs/utimes.c b/fs/utimes.c
57087index f4fb7ec..3fe03c0 100644
57088--- a/fs/utimes.c
57089+++ b/fs/utimes.c
57090@@ -1,6 +1,7 @@
57091 #include <linux/compiler.h>
57092 #include <linux/file.h>
57093 #include <linux/fs.h>
57094+#include <linux/security.h>
57095 #include <linux/linkage.h>
57096 #include <linux/mount.h>
57097 #include <linux/namei.h>
57098@@ -101,6 +102,12 @@ static int utimes_common(struct path *path, struct timespec *times)
57099 goto mnt_drop_write_and_out;
57100 }
57101 }
57102+
57103+ if (!gr_acl_handle_utime(path->dentry, path->mnt)) {
57104+ error = -EACCES;
57105+ goto mnt_drop_write_and_out;
57106+ }
57107+
57108 mutex_lock(&inode->i_mutex);
57109 error = notify_change(path->dentry, &newattrs);
57110 mutex_unlock(&inode->i_mutex);
57111diff --git a/fs/xattr.c b/fs/xattr.c
57112index 3377dff..4d074d9 100644
57113--- a/fs/xattr.c
57114+++ b/fs/xattr.c
57115@@ -227,6 +227,27 @@ int vfs_xattr_cmp(struct dentry *dentry, const char *xattr_name,
57116 return rc;
57117 }
57118
57119+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
57120+ssize_t
57121+pax_getxattr(struct dentry *dentry, void *value, size_t size)
57122+{
57123+ struct inode *inode = dentry->d_inode;
57124+ ssize_t error;
57125+
57126+ error = inode_permission(inode, MAY_EXEC);
57127+ if (error)
57128+ return error;
57129+
57130+ if (inode->i_op->getxattr)
57131+ error = inode->i_op->getxattr(dentry, XATTR_NAME_PAX_FLAGS, value, size);
57132+ else
57133+ error = -EOPNOTSUPP;
57134+
57135+ return error;
57136+}
57137+EXPORT_SYMBOL(pax_getxattr);
57138+#endif
57139+
57140 ssize_t
57141 vfs_getxattr(struct dentry *dentry, const char *name, void *value, size_t size)
57142 {
57143@@ -319,7 +340,7 @@ EXPORT_SYMBOL_GPL(vfs_removexattr);
57144 * Extended attribute SET operations
57145 */
57146 static long
57147-setxattr(struct dentry *d, const char __user *name, const void __user *value,
57148+setxattr(struct path *path, const char __user *name, const void __user *value,
57149 size_t size, int flags)
57150 {
57151 int error;
57152@@ -355,7 +376,12 @@ setxattr(struct dentry *d, const char __user *name, const void __user *value,
57153 posix_acl_fix_xattr_from_user(kvalue, size);
57154 }
57155
57156- error = vfs_setxattr(d, kname, kvalue, size, flags);
57157+ if (!gr_acl_handle_setxattr(path->dentry, path->mnt)) {
57158+ error = -EACCES;
57159+ goto out;
57160+ }
57161+
57162+ error = vfs_setxattr(path->dentry, kname, kvalue, size, flags);
57163 out:
57164 if (vvalue)
57165 vfree(vvalue);
57166@@ -377,7 +403,7 @@ retry:
57167 return error;
57168 error = mnt_want_write(path.mnt);
57169 if (!error) {
57170- error = setxattr(path.dentry, name, value, size, flags);
57171+ error = setxattr(&path, name, value, size, flags);
57172 mnt_drop_write(path.mnt);
57173 }
57174 path_put(&path);
57175@@ -401,7 +427,7 @@ retry:
57176 return error;
57177 error = mnt_want_write(path.mnt);
57178 if (!error) {
57179- error = setxattr(path.dentry, name, value, size, flags);
57180+ error = setxattr(&path, name, value, size, flags);
57181 mnt_drop_write(path.mnt);
57182 }
57183 path_put(&path);
57184@@ -416,16 +442,14 @@ SYSCALL_DEFINE5(fsetxattr, int, fd, const char __user *, name,
57185 const void __user *,value, size_t, size, int, flags)
57186 {
57187 struct fd f = fdget(fd);
57188- struct dentry *dentry;
57189 int error = -EBADF;
57190
57191 if (!f.file)
57192 return error;
57193- dentry = f.file->f_path.dentry;
57194- audit_inode(NULL, dentry, 0);
57195+ audit_inode(NULL, f.file->f_path.dentry, 0);
57196 error = mnt_want_write_file(f.file);
57197 if (!error) {
57198- error = setxattr(dentry, name, value, size, flags);
57199+ error = setxattr(&f.file->f_path, name, value, size, flags);
57200 mnt_drop_write_file(f.file);
57201 }
57202 fdput(f);
57203diff --git a/fs/xattr_acl.c b/fs/xattr_acl.c
57204index 9fbea87..6b19972 100644
57205--- a/fs/xattr_acl.c
57206+++ b/fs/xattr_acl.c
57207@@ -76,8 +76,8 @@ struct posix_acl *
57208 posix_acl_from_xattr(struct user_namespace *user_ns,
57209 const void *value, size_t size)
57210 {
57211- posix_acl_xattr_header *header = (posix_acl_xattr_header *)value;
57212- posix_acl_xattr_entry *entry = (posix_acl_xattr_entry *)(header+1), *end;
57213+ const posix_acl_xattr_header *header = (const posix_acl_xattr_header *)value;
57214+ const posix_acl_xattr_entry *entry = (const posix_acl_xattr_entry *)(header+1), *end;
57215 int count;
57216 struct posix_acl *acl;
57217 struct posix_acl_entry *acl_e;
57218diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
57219index 8904284..ee0e14b 100644
57220--- a/fs/xfs/xfs_bmap.c
57221+++ b/fs/xfs/xfs_bmap.c
57222@@ -765,7 +765,7 @@ xfs_bmap_validate_ret(
57223
57224 #else
57225 #define xfs_bmap_check_leaf_extents(cur, ip, whichfork) do { } while (0)
57226-#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap)
57227+#define xfs_bmap_validate_ret(bno,len,flags,mval,onmap,nmap) do { } while (0)
57228 #endif /* DEBUG */
57229
57230 /*
57231diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
57232index 6157424..ac98f6d 100644
57233--- a/fs/xfs/xfs_dir2_sf.c
57234+++ b/fs/xfs/xfs_dir2_sf.c
57235@@ -851,7 +851,15 @@ xfs_dir2_sf_getdents(
57236 }
57237
57238 ino = xfs_dir2_sfe_get_ino(sfp, sfep);
57239- if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57240+ if (dp->i_df.if_u1.if_data == dp->i_df.if_u2.if_inline_data) {
57241+ char name[sfep->namelen];
57242+ memcpy(name, sfep->name, sfep->namelen);
57243+ if (filldir(dirent, name, sfep->namelen,
57244+ off & 0x7fffffff, ino, DT_UNKNOWN)) {
57245+ *offset = off & 0x7fffffff;
57246+ return 0;
57247+ }
57248+ } else if (filldir(dirent, (char *)sfep->name, sfep->namelen,
57249 off & 0x7fffffff, ino, DT_UNKNOWN)) {
57250 *offset = off & 0x7fffffff;
57251 return 0;
57252diff --git a/fs/xfs/xfs_ioctl.c b/fs/xfs/xfs_ioctl.c
57253index 5e99968..45bd327 100644
57254--- a/fs/xfs/xfs_ioctl.c
57255+++ b/fs/xfs/xfs_ioctl.c
57256@@ -127,7 +127,7 @@ xfs_find_handle(
57257 }
57258
57259 error = -EFAULT;
57260- if (copy_to_user(hreq->ohandle, &handle, hsize) ||
57261+ if (hsize > sizeof handle || copy_to_user(hreq->ohandle, &handle, hsize) ||
57262 copy_to_user(hreq->ohandlen, &hsize, sizeof(__s32)))
57263 goto out_put;
57264
57265diff --git a/fs/xfs/xfs_iops.c b/fs/xfs/xfs_iops.c
57266index ca9ecaa..60100c7 100644
57267--- a/fs/xfs/xfs_iops.c
57268+++ b/fs/xfs/xfs_iops.c
57269@@ -395,7 +395,7 @@ xfs_vn_put_link(
57270 struct nameidata *nd,
57271 void *p)
57272 {
57273- char *s = nd_get_link(nd);
57274+ const char *s = nd_get_link(nd);
57275
57276 if (!IS_ERR(s))
57277 kfree(s);
57278diff --git a/grsecurity/Kconfig b/grsecurity/Kconfig
57279new file mode 100644
57280index 0000000..c9c4ac3
57281--- /dev/null
57282+++ b/grsecurity/Kconfig
57283@@ -0,0 +1,1054 @@
57284+#
57285+# grecurity configuration
57286+#
57287+menu "Memory Protections"
57288+depends on GRKERNSEC
57289+
57290+config GRKERNSEC_KMEM
57291+ bool "Deny reading/writing to /dev/kmem, /dev/mem, and /dev/port"
57292+ default y if GRKERNSEC_CONFIG_AUTO
57293+ select STRICT_DEVMEM if (X86 || ARM || TILE || S390)
57294+ help
57295+ If you say Y here, /dev/kmem and /dev/mem won't be allowed to
57296+ be written to or read from to modify or leak the contents of the running
57297+ kernel. /dev/port will also not be allowed to be opened and support
57298+ for /dev/cpu/*/msr will be removed. If you have module
57299+ support disabled, enabling this will close up five ways that are
57300+ currently used to insert malicious code into the running kernel.
57301+
57302+ Even with all these features enabled, we still highly recommend that
57303+ you use the RBAC system, as it is still possible for an attacker to
57304+ modify the running kernel through privileged I/O granted by ioperm/iopl.
57305+
57306+ If you are not using XFree86, you may be able to stop this additional
57307+ case by enabling the 'Disable privileged I/O' option. Though nothing
57308+ legitimately writes to /dev/kmem, XFree86 does need to write to /dev/mem,
57309+ but only to video memory, which is the only writing we allow in this
57310+ case. If /dev/kmem or /dev/mem are mmaped without PROT_WRITE, they will
57311+ not be allowed to mprotect it with PROT_WRITE later.
57312+ Enabling this feature will prevent the "cpupower" and "powertop" tools
57313+ from working.
57314+
57315+ It is highly recommended that you say Y here if you meet all the
57316+ conditions above.
57317+
57318+config GRKERNSEC_VM86
57319+ bool "Restrict VM86 mode"
57320+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57321+ depends on X86_32
57322+
57323+ help
57324+ If you say Y here, only processes with CAP_SYS_RAWIO will be able to
57325+ make use of a special execution mode on 32bit x86 processors called
57326+ Virtual 8086 (VM86) mode. XFree86 may need vm86 mode for certain
57327+ video cards and will still work with this option enabled. The purpose
57328+ of the option is to prevent exploitation of emulation errors in
57329+ virtualization of vm86 mode like the one discovered in VMWare in 2009.
57330+ Nearly all users should be able to enable this option.
57331+
57332+config GRKERNSEC_IO
57333+ bool "Disable privileged I/O"
57334+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57335+ depends on X86
57336+ select RTC_CLASS
57337+ select RTC_INTF_DEV
57338+ select RTC_DRV_CMOS
57339+
57340+ help
57341+ If you say Y here, all ioperm and iopl calls will return an error.
57342+ Ioperm and iopl can be used to modify the running kernel.
57343+ Unfortunately, some programs need this access to operate properly,
57344+ the most notable of which are XFree86 and hwclock. hwclock can be
57345+ remedied by having RTC support in the kernel, so real-time
57346+ clock support is enabled if this option is enabled, to ensure
57347+ that hwclock operates correctly. XFree86 still will not
57348+ operate correctly with this option enabled, so DO NOT CHOOSE Y
57349+ IF YOU USE XFree86. If you use XFree86 and you still want to
57350+ protect your kernel against modification, use the RBAC system.
57351+
57352+config GRKERNSEC_JIT_HARDEN
57353+ bool "Harden BPF JIT against spray attacks"
57354+ default y if GRKERNSEC_CONFIG_AUTO
57355+ depends on BPF_JIT
57356+ help
57357+ If you say Y here, the native code generated by the kernel's Berkeley
57358+ Packet Filter (BPF) JIT engine will be hardened against JIT-spraying
57359+ attacks that attempt to fit attacker-beneficial instructions in
57360+ 32bit immediate fields of JIT-generated native instructions. The
57361+ attacker will generally aim to cause an unintended instruction sequence
57362+ of JIT-generated native code to execute by jumping into the middle of
57363+ a generated instruction. This feature effectively randomizes the 32bit
57364+ immediate constants present in the generated code to thwart such attacks.
57365+
57366+ If you're using KERNEXEC, it's recommended that you enable this option
57367+ to supplement the hardening of the kernel.
57368+
57369+config GRKERNSEC_PERF_HARDEN
57370+ bool "Disable unprivileged PERF_EVENTS usage by default"
57371+ default y if GRKERNSEC_CONFIG_AUTO
57372+ depends on PERF_EVENTS
57373+ help
57374+ If you say Y here, the range of acceptable values for the
57375+ /proc/sys/kernel/perf_event_paranoid sysctl will be expanded to allow and
57376+ default to a new value: 3. When the sysctl is set to this value, no
57377+ unprivileged use of the PERF_EVENTS syscall interface will be permitted.
57378+
57379+ Though PERF_EVENTS can be used legitimately for performance monitoring
57380+ and low-level application profiling, it is forced on regardless of
57381+ configuration, has been at fault for several vulnerabilities, and
57382+ creates new opportunities for side channels and other information leaks.
57383+
57384+ This feature puts PERF_EVENTS into a secure default state and permits
57385+ the administrator to change out of it temporarily if unprivileged
57386+ application profiling is needed.
57387+
57388+config GRKERNSEC_RAND_THREADSTACK
57389+ bool "Insert random gaps between thread stacks"
57390+ default y if GRKERNSEC_CONFIG_AUTO
57391+ depends on PAX_RANDMMAP && !PPC
57392+ help
57393+ If you say Y here, a random-sized gap will be enforced between allocated
57394+ thread stacks. Glibc's NPTL and other threading libraries that
57395+ pass MAP_STACK to the kernel for thread stack allocation are supported.
57396+ The implementation currently provides 8 bits of entropy for the gap.
57397+
57398+ Many distributions do not compile threaded remote services with the
57399+ -fstack-check argument to GCC, causing the variable-sized stack-based
57400+ allocator, alloca(), to not probe the stack on allocation. This
57401+ permits an unbounded alloca() to skip over any guard page and potentially
57402+ modify another thread's stack reliably. An enforced random gap
57403+ reduces the reliability of such an attack and increases the chance
57404+ that such a read/write to another thread's stack instead lands in
57405+ an unmapped area, causing a crash and triggering grsecurity's
57406+ anti-bruteforcing logic.
57407+
57408+config GRKERNSEC_PROC_MEMMAP
57409+ bool "Harden ASLR against information leaks and entropy reduction"
57410+ default y if (GRKERNSEC_CONFIG_AUTO || PAX_NOEXEC || PAX_ASLR)
57411+ depends on PAX_NOEXEC || PAX_ASLR
57412+ help
57413+ If you say Y here, the /proc/<pid>/maps and /proc/<pid>/stat files will
57414+ give no information about the addresses of its mappings if
57415+ PaX features that rely on random addresses are enabled on the task.
57416+ In addition to sanitizing this information and disabling other
57417+ dangerous sources of information, this option causes reads of sensitive
57418+ /proc/<pid> entries where the file descriptor was opened in a different
57419+ task than the one performing the read. Such attempts are logged.
57420+ This option also limits argv/env strings for suid/sgid binaries
57421+ to 512KB to prevent a complete exhaustion of the stack entropy provided
57422+ by ASLR. Finally, it places an 8MB stack resource limit on suid/sgid
57423+ binaries to prevent alternative mmap layouts from being abused.
57424+
57425+ If you use PaX it is essential that you say Y here as it closes up
57426+ several holes that make full ASLR useless locally.
57427+
57428+config GRKERNSEC_BRUTE
57429+ bool "Deter exploit bruteforcing"
57430+ default y if GRKERNSEC_CONFIG_AUTO
57431+ help
57432+ If you say Y here, attempts to bruteforce exploits against forking
57433+ daemons such as apache or sshd, as well as against suid/sgid binaries
57434+ will be deterred. When a child of a forking daemon is killed by PaX
57435+ or crashes due to an illegal instruction or other suspicious signal,
57436+ the parent process will be delayed 30 seconds upon every subsequent
57437+ fork until the administrator is able to assess the situation and
57438+ restart the daemon.
57439+ In the suid/sgid case, the attempt is logged, the user has all their
57440+ existing instances of the suid/sgid binary terminated and will
57441+ be unable to execute any suid/sgid binaries for 15 minutes.
57442+
57443+ It is recommended that you also enable signal logging in the auditing
57444+ section so that logs are generated when a process triggers a suspicious
57445+ signal.
57446+ If the sysctl option is enabled, a sysctl option with name
57447+ "deter_bruteforce" is created.
57448+
57449+
57450+config GRKERNSEC_MODHARDEN
57451+ bool "Harden module auto-loading"
57452+ default y if GRKERNSEC_CONFIG_AUTO
57453+ depends on MODULES
57454+ help
57455+ If you say Y here, module auto-loading in response to use of some
57456+ feature implemented by an unloaded module will be restricted to
57457+ root users. Enabling this option helps defend against attacks
57458+ by unprivileged users who abuse the auto-loading behavior to
57459+ cause a vulnerable module to load that is then exploited.
57460+
57461+ If this option prevents a legitimate use of auto-loading for a
57462+ non-root user, the administrator can execute modprobe manually
57463+ with the exact name of the module mentioned in the alert log.
57464+ Alternatively, the administrator can add the module to the list
57465+ of modules loaded at boot by modifying init scripts.
57466+
57467+ Modification of init scripts will most likely be needed on
57468+ Ubuntu servers with encrypted home directory support enabled,
57469+ as the first non-root user logging in will cause the ecb(aes),
57470+ ecb(aes)-all, cbc(aes), and cbc(aes)-all modules to be loaded.
57471+
57472+config GRKERNSEC_HIDESYM
57473+ bool "Hide kernel symbols"
57474+ default y if GRKERNSEC_CONFIG_AUTO
57475+ select PAX_USERCOPY_SLABS
57476+ help
57477+ If you say Y here, getting information on loaded modules, and
57478+ displaying all kernel symbols through a syscall will be restricted
57479+ to users with CAP_SYS_MODULE. For software compatibility reasons,
57480+ /proc/kallsyms will be restricted to the root user. The RBAC
57481+ system can hide that entry even from root.
57482+
57483+ This option also prevents leaking of kernel addresses through
57484+ several /proc entries.
57485+
57486+ Note that this option is only effective provided the following
57487+ conditions are met:
57488+ 1) The kernel using grsecurity is not precompiled by some distribution
57489+ 2) You have also enabled GRKERNSEC_DMESG
57490+ 3) You are using the RBAC system and hiding other files such as your
57491+ kernel image and System.map. Alternatively, enabling this option
57492+ causes the permissions on /boot, /lib/modules, and the kernel
57493+ source directory to change at compile time to prevent
57494+ reading by non-root users.
57495+ If the above conditions are met, this option will aid in providing a
57496+ useful protection against local kernel exploitation of overflows
57497+ and arbitrary read/write vulnerabilities.
57498+
57499+ It is highly recommended that you enable GRKERNSEC_PERF_HARDEN
57500+ in addition to this feature.
57501+
57502+config GRKERNSEC_KERN_LOCKOUT
57503+ bool "Active kernel exploit response"
57504+ default y if GRKERNSEC_CONFIG_AUTO
57505+ depends on X86 || ARM || PPC || SPARC
57506+ help
57507+ If you say Y here, when a PaX alert is triggered due to suspicious
57508+ activity in the kernel (from KERNEXEC/UDEREF/USERCOPY)
57509+ or an OOPS occurs due to bad memory accesses, instead of just
57510+ terminating the offending process (and potentially allowing
57511+ a subsequent exploit from the same user), we will take one of two
57512+ actions:
57513+ If the user was root, we will panic the system
57514+ If the user was non-root, we will log the attempt, terminate
57515+ all processes owned by the user, then prevent them from creating
57516+ any new processes until the system is restarted
57517+ This deters repeated kernel exploitation/bruteforcing attempts
57518+ and is useful for later forensics.
57519+
57520+endmenu
57521+menu "Role Based Access Control Options"
57522+depends on GRKERNSEC
57523+
57524+config GRKERNSEC_RBAC_DEBUG
57525+ bool
57526+
57527+config GRKERNSEC_NO_RBAC
57528+ bool "Disable RBAC system"
57529+ help
57530+ If you say Y here, the /dev/grsec device will be removed from the kernel,
57531+ preventing the RBAC system from being enabled. You should only say Y
57532+ here if you have no intention of using the RBAC system, so as to prevent
57533+ an attacker with root access from misusing the RBAC system to hide files
57534+ and processes when loadable module support and /dev/[k]mem have been
57535+ locked down.
57536+
57537+config GRKERNSEC_ACL_HIDEKERN
57538+ bool "Hide kernel processes"
57539+ help
57540+ If you say Y here, all kernel threads will be hidden to all
57541+ processes but those whose subject has the "view hidden processes"
57542+ flag.
57543+
57544+config GRKERNSEC_ACL_MAXTRIES
57545+ int "Maximum tries before password lockout"
57546+ default 3
57547+ help
57548+ This option enforces the maximum number of times a user can attempt
57549+ to authorize themselves with the grsecurity RBAC system before being
57550+ denied the ability to attempt authorization again for a specified time.
57551+ The lower the number, the harder it will be to brute-force a password.
57552+
57553+config GRKERNSEC_ACL_TIMEOUT
57554+ int "Time to wait after max password tries, in seconds"
57555+ default 30
57556+ help
57557+ This option specifies the time the user must wait after attempting to
57558+ authorize to the RBAC system with the maximum number of invalid
57559+ passwords. The higher the number, the harder it will be to brute-force
57560+ a password.
57561+
57562+endmenu
57563+menu "Filesystem Protections"
57564+depends on GRKERNSEC
57565+
57566+config GRKERNSEC_PROC
57567+ bool "Proc restrictions"
57568+ default y if GRKERNSEC_CONFIG_AUTO
57569+ help
57570+ If you say Y here, the permissions of the /proc filesystem
57571+ will be altered to enhance system security and privacy. You MUST
57572+ choose either a user only restriction or a user and group restriction.
57573+ Depending upon the option you choose, you can either restrict users to
57574+ see only the processes they themselves run, or choose a group that can
57575+ view all processes and files normally restricted to root if you choose
57576+ the "restrict to user only" option. NOTE: If you're running identd or
57577+ ntpd as a non-root user, you will have to run it as the group you
57578+ specify here.
57579+
57580+config GRKERNSEC_PROC_USER
57581+ bool "Restrict /proc to user only"
57582+ depends on GRKERNSEC_PROC
57583+ help
57584+ If you say Y here, non-root users will only be able to view their own
57585+ processes, and restricts them from viewing network-related information,
57586+ and viewing kernel symbol and module information.
57587+
57588+config GRKERNSEC_PROC_USERGROUP
57589+ bool "Allow special group"
57590+ default y if GRKERNSEC_CONFIG_AUTO
57591+ depends on GRKERNSEC_PROC && !GRKERNSEC_PROC_USER
57592+ help
57593+ If you say Y here, you will be able to select a group that will be
57594+ able to view all processes and network-related information. If you've
57595+ enabled GRKERNSEC_HIDESYM, kernel and symbol information may still
57596+ remain hidden. This option is useful if you want to run identd as
57597+ a non-root user. The group you select may also be chosen at boot time
57598+ via "grsec_proc_gid=" on the kernel commandline.
57599+
57600+config GRKERNSEC_PROC_GID
57601+ int "GID for special group"
57602+ depends on GRKERNSEC_PROC_USERGROUP
57603+ default 1001
57604+
57605+config GRKERNSEC_PROC_ADD
57606+ bool "Additional restrictions"
57607+ default y if GRKERNSEC_CONFIG_AUTO
57608+ depends on GRKERNSEC_PROC_USER || GRKERNSEC_PROC_USERGROUP
57609+ help
57610+ If you say Y here, additional restrictions will be placed on
57611+ /proc that keep normal users from viewing device information and
57612+ slabinfo information that could be useful for exploits.
57613+
57614+config GRKERNSEC_LINK
57615+ bool "Linking restrictions"
57616+ default y if GRKERNSEC_CONFIG_AUTO
57617+ help
57618+ If you say Y here, /tmp race exploits will be prevented, since users
57619+ will no longer be able to follow symlinks owned by other users in
57620+ world-writable +t directories (e.g. /tmp), unless the owner of the
57621+ symlink is the owner of the directory. users will also not be
57622+ able to hardlink to files they do not own. If the sysctl option is
57623+ enabled, a sysctl option with name "linking_restrictions" is created.
57624+
57625+config GRKERNSEC_SYMLINKOWN
57626+ bool "Kernel-enforced SymlinksIfOwnerMatch"
57627+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
57628+ help
57629+ Apache's SymlinksIfOwnerMatch option has an inherent race condition
57630+ that prevents it from being used as a security feature. As Apache
57631+ verifies the symlink by performing a stat() against the target of
57632+ the symlink before it is followed, an attacker can setup a symlink
57633+ to point to a same-owned file, then replace the symlink with one
57634+ that targets another user's file just after Apache "validates" the
57635+ symlink -- a classic TOCTOU race. If you say Y here, a complete,
57636+ race-free replacement for Apache's "SymlinksIfOwnerMatch" option
57637+ will be in place for the group you specify. If the sysctl option
57638+ is enabled, a sysctl option with name "enforce_symlinksifowner" is
57639+ created.
57640+
57641+config GRKERNSEC_SYMLINKOWN_GID
57642+ int "GID for users with kernel-enforced SymlinksIfOwnerMatch"
57643+ depends on GRKERNSEC_SYMLINKOWN
57644+ default 1006
57645+ help
57646+ Setting this GID determines what group kernel-enforced
57647+ SymlinksIfOwnerMatch will be enabled for. If the sysctl option
57648+ is enabled, a sysctl option with name "symlinkown_gid" is created.
57649+
57650+config GRKERNSEC_FIFO
57651+ bool "FIFO restrictions"
57652+ default y if GRKERNSEC_CONFIG_AUTO
57653+ help
57654+ If you say Y here, users will not be able to write to FIFOs they don't
57655+ own in world-writable +t directories (e.g. /tmp), unless the owner of
57656+ the FIFO is the same owner of the directory it's held in. If the sysctl
57657+ option is enabled, a sysctl option with name "fifo_restrictions" is
57658+ created.
57659+
57660+config GRKERNSEC_SYSFS_RESTRICT
57661+ bool "Sysfs/debugfs restriction"
57662+ default y if (GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER)
57663+ depends on SYSFS
57664+ help
57665+ If you say Y here, sysfs (the pseudo-filesystem mounted at /sys) and
57666+ any filesystem normally mounted under it (e.g. debugfs) will be
57667+ mostly accessible only by root. These filesystems generally provide access
57668+ to hardware and debug information that isn't appropriate for unprivileged
57669+ users of the system. Sysfs and debugfs have also become a large source
57670+ of new vulnerabilities, ranging from infoleaks to local compromise.
57671+ There has been very little oversight with an eye toward security involved
57672+ in adding new exporters of information to these filesystems, so their
57673+ use is discouraged.
57674+ For reasons of compatibility, a few directories have been whitelisted
57675+ for access by non-root users:
57676+ /sys/fs/selinux
57677+ /sys/fs/fuse
57678+ /sys/devices/system/cpu
57679+
57680+config GRKERNSEC_ROFS
57681+ bool "Runtime read-only mount protection"
57682+ help
57683+ If you say Y here, a sysctl option with name "romount_protect" will
57684+ be created. By setting this option to 1 at runtime, filesystems
57685+ will be protected in the following ways:
57686+ * No new writable mounts will be allowed
57687+ * Existing read-only mounts won't be able to be remounted read/write
57688+ * Write operations will be denied on all block devices
57689+ This option acts independently of grsec_lock: once it is set to 1,
57690+ it cannot be turned off. Therefore, please be mindful of the resulting
57691+ behavior if this option is enabled in an init script on a read-only
57692+ filesystem. This feature is mainly intended for secure embedded systems.
57693+
57694+config GRKERNSEC_DEVICE_SIDECHANNEL
57695+ bool "Eliminate stat/notify-based device sidechannels"
57696+ default y if GRKERNSEC_CONFIG_AUTO
57697+ help
57698+ If you say Y here, timing analyses on block or character
57699+ devices like /dev/ptmx using stat or inotify/dnotify/fanotify
57700+ will be thwarted for unprivileged users. If a process without
57701+ CAP_MKNOD stats such a device, the last access and last modify times
57702+ will match the device's create time. No access or modify events
57703+ will be triggered through inotify/dnotify/fanotify for such devices.
57704+ This feature will prevent attacks that may at a minimum
57705+ allow an attacker to determine the administrator's password length.
57706+
57707+config GRKERNSEC_CHROOT
57708+ bool "Chroot jail restrictions"
57709+ default y if GRKERNSEC_CONFIG_AUTO
57710+ help
57711+ If you say Y here, you will be able to choose several options that will
57712+ make breaking out of a chrooted jail much more difficult. If you
57713+ encounter no software incompatibilities with the following options, it
57714+ is recommended that you enable each one.
57715+
57716+config GRKERNSEC_CHROOT_MOUNT
57717+ bool "Deny mounts"
57718+ default y if GRKERNSEC_CONFIG_AUTO
57719+ depends on GRKERNSEC_CHROOT
57720+ help
57721+ If you say Y here, processes inside a chroot will not be able to
57722+ mount or remount filesystems. If the sysctl option is enabled, a
57723+ sysctl option with name "chroot_deny_mount" is created.
57724+
57725+config GRKERNSEC_CHROOT_DOUBLE
57726+ bool "Deny double-chroots"
57727+ default y if GRKERNSEC_CONFIG_AUTO
57728+ depends on GRKERNSEC_CHROOT
57729+ help
57730+ If you say Y here, processes inside a chroot will not be able to chroot
57731+ again outside the chroot. This is a widely used method of breaking
57732+ out of a chroot jail and should not be allowed. If the sysctl
57733+ option is enabled, a sysctl option with name
57734+ "chroot_deny_chroot" is created.
57735+
57736+config GRKERNSEC_CHROOT_PIVOT
57737+ bool "Deny pivot_root in chroot"
57738+ default y if GRKERNSEC_CONFIG_AUTO
57739+ depends on GRKERNSEC_CHROOT
57740+ help
57741+ If you say Y here, processes inside a chroot will not be able to use
57742+ a function called pivot_root() that was introduced in Linux 2.3.41. It
57743+ works similar to chroot in that it changes the root filesystem. This
57744+ function could be misused in a chrooted process to attempt to break out
57745+ of the chroot, and therefore should not be allowed. If the sysctl
57746+ option is enabled, a sysctl option with name "chroot_deny_pivot" is
57747+ created.
57748+
57749+config GRKERNSEC_CHROOT_CHDIR
57750+ bool "Enforce chdir(\"/\") on all chroots"
57751+ default y if GRKERNSEC_CONFIG_AUTO
57752+ depends on GRKERNSEC_CHROOT
57753+ help
57754+ If you say Y here, the current working directory of all newly-chrooted
57755+ applications will be set to the the root directory of the chroot.
57756+ The man page on chroot(2) states:
57757+ Note that this call does not change the current working
57758+ directory, so that `.' can be outside the tree rooted at
57759+ `/'. In particular, the super-user can escape from a
57760+ `chroot jail' by doing `mkdir foo; chroot foo; cd ..'.
57761+
57762+ It is recommended that you say Y here, since it's not known to break
57763+ any software. If the sysctl option is enabled, a sysctl option with
57764+ name "chroot_enforce_chdir" is created.
57765+
57766+config GRKERNSEC_CHROOT_CHMOD
57767+ bool "Deny (f)chmod +s"
57768+ default y if GRKERNSEC_CONFIG_AUTO
57769+ depends on GRKERNSEC_CHROOT
57770+ help
57771+ If you say Y here, processes inside a chroot will not be able to chmod
57772+ or fchmod files to make them have suid or sgid bits. This protects
57773+ against another published method of breaking a chroot. If the sysctl
57774+ option is enabled, a sysctl option with name "chroot_deny_chmod" is
57775+ created.
57776+
57777+config GRKERNSEC_CHROOT_FCHDIR
57778+ bool "Deny fchdir out of chroot"
57779+ default y if GRKERNSEC_CONFIG_AUTO
57780+ depends on GRKERNSEC_CHROOT
57781+ help
57782+ If you say Y here, a well-known method of breaking chroots by fchdir'ing
57783+ to a file descriptor of the chrooting process that points to a directory
57784+ outside the filesystem will be stopped. If the sysctl option
57785+ is enabled, a sysctl option with name "chroot_deny_fchdir" is created.
57786+
57787+config GRKERNSEC_CHROOT_MKNOD
57788+ bool "Deny mknod"
57789+ default y if GRKERNSEC_CONFIG_AUTO
57790+ depends on GRKERNSEC_CHROOT
57791+ help
57792+ If you say Y here, processes inside a chroot will not be allowed to
57793+ mknod. The problem with using mknod inside a chroot is that it
57794+ would allow an attacker to create a device entry that is the same
57795+ as one on the physical root of your system, which could range from
57796+ anything from the console device to a device for your harddrive (which
57797+ they could then use to wipe the drive or steal data). It is recommended
57798+ that you say Y here, unless you run into software incompatibilities.
57799+ If the sysctl option is enabled, a sysctl option with name
57800+ "chroot_deny_mknod" is created.
57801+
57802+config GRKERNSEC_CHROOT_SHMAT
57803+ bool "Deny shmat() out of chroot"
57804+ default y if GRKERNSEC_CONFIG_AUTO
57805+ depends on GRKERNSEC_CHROOT
57806+ help
57807+ If you say Y here, processes inside a chroot will not be able to attach
57808+ to shared memory segments that were created outside of the chroot jail.
57809+ It is recommended that you say Y here. If the sysctl option is enabled,
57810+ a sysctl option with name "chroot_deny_shmat" is created.
57811+
57812+config GRKERNSEC_CHROOT_UNIX
57813+ bool "Deny access to abstract AF_UNIX sockets out of chroot"
57814+ default y if GRKERNSEC_CONFIG_AUTO
57815+ depends on GRKERNSEC_CHROOT
57816+ help
57817+ If you say Y here, processes inside a chroot will not be able to
57818+ connect to abstract (meaning not belonging to a filesystem) Unix
57819+ domain sockets that were bound outside of a chroot. It is recommended
57820+ that you say Y here. If the sysctl option is enabled, a sysctl option
57821+ with name "chroot_deny_unix" is created.
57822+
57823+config GRKERNSEC_CHROOT_FINDTASK
57824+ bool "Protect outside processes"
57825+ default y if GRKERNSEC_CONFIG_AUTO
57826+ depends on GRKERNSEC_CHROOT
57827+ help
57828+ If you say Y here, processes inside a chroot will not be able to
57829+ kill, send signals with fcntl, ptrace, capget, getpgid, setpgid,
57830+ getsid, or view any process outside of the chroot. If the sysctl
57831+ option is enabled, a sysctl option with name "chroot_findtask" is
57832+ created.
57833+
57834+config GRKERNSEC_CHROOT_NICE
57835+ bool "Restrict priority changes"
57836+ default y if GRKERNSEC_CONFIG_AUTO
57837+ depends on GRKERNSEC_CHROOT
57838+ help
57839+ If you say Y here, processes inside a chroot will not be able to raise
57840+ the priority of processes in the chroot, or alter the priority of
57841+ processes outside the chroot. This provides more security than simply
57842+ removing CAP_SYS_NICE from the process' capability set. If the
57843+ sysctl option is enabled, a sysctl option with name "chroot_restrict_nice"
57844+ is created.
57845+
57846+config GRKERNSEC_CHROOT_SYSCTL
57847+ bool "Deny sysctl writes"
57848+ default y if GRKERNSEC_CONFIG_AUTO
57849+ depends on GRKERNSEC_CHROOT
57850+ help
57851+ If you say Y here, an attacker in a chroot will not be able to
57852+ write to sysctl entries, either by sysctl(2) or through a /proc
57853+ interface. It is strongly recommended that you say Y here. If the
57854+ sysctl option is enabled, a sysctl option with name
57855+ "chroot_deny_sysctl" is created.
57856+
57857+config GRKERNSEC_CHROOT_CAPS
57858+ bool "Capability restrictions"
57859+ default y if GRKERNSEC_CONFIG_AUTO
57860+ depends on GRKERNSEC_CHROOT
57861+ help
57862+ If you say Y here, the capabilities on all processes within a
57863+ chroot jail will be lowered to stop module insertion, raw i/o,
57864+ system and net admin tasks, rebooting the system, modifying immutable
57865+ files, modifying IPC owned by another, and changing the system time.
57866+ This is left an option because it can break some apps. Disable this
57867+ if your chrooted apps are having problems performing those kinds of
57868+ tasks. If the sysctl option is enabled, a sysctl option with
57869+ name "chroot_caps" is created.
57870+
57871+config GRKERNSEC_CHROOT_INITRD
57872+ bool "Exempt initrd tasks from restrictions"
57873+ default y if GRKERNSEC_CONFIG_AUTO
57874+ depends on GRKERNSEC_CHROOT && BLK_DEV_RAM
57875+ help
57876+ If you say Y here, tasks started prior to init will be exempted from
57877+ grsecurity's chroot restrictions. This option is mainly meant to
57878+ resolve Plymouth's performing privileged operations unnecessarily
57879+ in a chroot.
57880+
57881+endmenu
57882+menu "Kernel Auditing"
57883+depends on GRKERNSEC
57884+
57885+config GRKERNSEC_AUDIT_GROUP
57886+ bool "Single group for auditing"
57887+ help
57888+ If you say Y here, the exec and chdir logging features will only operate
57889+ on a group you specify. This option is recommended if you only want to
57890+ watch certain users instead of having a large amount of logs from the
57891+ entire system. If the sysctl option is enabled, a sysctl option with
57892+ name "audit_group" is created.
57893+
57894+config GRKERNSEC_AUDIT_GID
57895+ int "GID for auditing"
57896+ depends on GRKERNSEC_AUDIT_GROUP
57897+ default 1007
57898+
57899+config GRKERNSEC_EXECLOG
57900+ bool "Exec logging"
57901+ help
57902+ If you say Y here, all execve() calls will be logged (since the
57903+ other exec*() calls are frontends to execve(), all execution
57904+ will be logged). Useful for shell-servers that like to keep track
57905+ of their users. If the sysctl option is enabled, a sysctl option with
57906+ name "exec_logging" is created.
57907+ WARNING: This option when enabled will produce a LOT of logs, especially
57908+ on an active system.
57909+
57910+config GRKERNSEC_RESLOG
57911+ bool "Resource logging"
57912+ default y if GRKERNSEC_CONFIG_AUTO
57913+ help
57914+ If you say Y here, all attempts to overstep resource limits will
57915+ be logged with the resource name, the requested size, and the current
57916+ limit. It is highly recommended that you say Y here. If the sysctl
57917+ option is enabled, a sysctl option with name "resource_logging" is
57918+ created. If the RBAC system is enabled, the sysctl value is ignored.
57919+
57920+config GRKERNSEC_CHROOT_EXECLOG
57921+ bool "Log execs within chroot"
57922+ help
57923+ If you say Y here, all executions inside a chroot jail will be logged
57924+ to syslog. This can cause a large amount of logs if certain
57925+ applications (eg. djb's daemontools) are installed on the system, and
57926+ is therefore left as an option. If the sysctl option is enabled, a
57927+ sysctl option with name "chroot_execlog" is created.
57928+
57929+config GRKERNSEC_AUDIT_PTRACE
57930+ bool "Ptrace logging"
57931+ help
57932+ If you say Y here, all attempts to attach to a process via ptrace
57933+ will be logged. If the sysctl option is enabled, a sysctl option
57934+ with name "audit_ptrace" is created.
57935+
57936+config GRKERNSEC_AUDIT_CHDIR
57937+ bool "Chdir logging"
57938+ help
57939+ If you say Y here, all chdir() calls will be logged. If the sysctl
57940+ option is enabled, a sysctl option with name "audit_chdir" is created.
57941+
57942+config GRKERNSEC_AUDIT_MOUNT
57943+ bool "(Un)Mount logging"
57944+ help
57945+ If you say Y here, all mounts and unmounts will be logged. If the
57946+ sysctl option is enabled, a sysctl option with name "audit_mount" is
57947+ created.
57948+
57949+config GRKERNSEC_SIGNAL
57950+ bool "Signal logging"
57951+ default y if GRKERNSEC_CONFIG_AUTO
57952+ help
57953+ If you say Y here, certain important signals will be logged, such as
57954+ SIGSEGV, which will as a result inform you of when a error in a program
57955+ occurred, which in some cases could mean a possible exploit attempt.
57956+ If the sysctl option is enabled, a sysctl option with name
57957+ "signal_logging" is created.
57958+
57959+config GRKERNSEC_FORKFAIL
57960+ bool "Fork failure logging"
57961+ help
57962+ If you say Y here, all failed fork() attempts will be logged.
57963+ This could suggest a fork bomb, or someone attempting to overstep
57964+ their process limit. If the sysctl option is enabled, a sysctl option
57965+ with name "forkfail_logging" is created.
57966+
57967+config GRKERNSEC_TIME
57968+ bool "Time change logging"
57969+ default y if GRKERNSEC_CONFIG_AUTO
57970+ help
57971+ If you say Y here, any changes of the system clock will be logged.
57972+ If the sysctl option is enabled, a sysctl option with name
57973+ "timechange_logging" is created.
57974+
57975+config GRKERNSEC_PROC_IPADDR
57976+ bool "/proc/<pid>/ipaddr support"
57977+ default y if GRKERNSEC_CONFIG_AUTO
57978+ help
57979+ If you say Y here, a new entry will be added to each /proc/<pid>
57980+ directory that contains the IP address of the person using the task.
57981+ The IP is carried across local TCP and AF_UNIX stream sockets.
57982+ This information can be useful for IDS/IPSes to perform remote response
57983+ to a local attack. The entry is readable by only the owner of the
57984+ process (and root if he has CAP_DAC_OVERRIDE, which can be removed via
57985+ the RBAC system), and thus does not create privacy concerns.
57986+
57987+config GRKERNSEC_RWXMAP_LOG
57988+ bool 'Denied RWX mmap/mprotect logging'
57989+ default y if GRKERNSEC_CONFIG_AUTO
57990+ depends on PAX_MPROTECT && !PAX_EMUPLT && !PAX_EMUSIGRT
57991+ help
57992+ If you say Y here, calls to mmap() and mprotect() with explicit
57993+ usage of PROT_WRITE and PROT_EXEC together will be logged when
57994+ denied by the PAX_MPROTECT feature. If the sysctl option is
57995+ enabled, a sysctl option with name "rwxmap_logging" is created.
57996+
57997+config GRKERNSEC_AUDIT_TEXTREL
57998+ bool 'ELF text relocations logging (READ HELP)'
57999+ depends on PAX_MPROTECT
58000+ help
58001+ If you say Y here, text relocations will be logged with the filename
58002+ of the offending library or binary. The purpose of the feature is
58003+ to help Linux distribution developers get rid of libraries and
58004+ binaries that need text relocations which hinder the future progress
58005+ of PaX. Only Linux distribution developers should say Y here, and
58006+ never on a production machine, as this option creates an information
58007+ leak that could aid an attacker in defeating the randomization of
58008+ a single memory region. If the sysctl option is enabled, a sysctl
58009+ option with name "audit_textrel" is created.
58010+
58011+endmenu
58012+
58013+menu "Executable Protections"
58014+depends on GRKERNSEC
58015+
58016+config GRKERNSEC_DMESG
58017+ bool "Dmesg(8) restriction"
58018+ default y if GRKERNSEC_CONFIG_AUTO
58019+ help
58020+ If you say Y here, non-root users will not be able to use dmesg(8)
58021+ to view the contents of the kernel's circular log buffer.
58022+ The kernel's log buffer often contains kernel addresses and other
58023+ identifying information useful to an attacker in fingerprinting a
58024+ system for a targeted exploit.
58025+ If the sysctl option is enabled, a sysctl option with name "dmesg" is
58026+ created.
58027+
58028+config GRKERNSEC_HARDEN_PTRACE
58029+ bool "Deter ptrace-based process snooping"
58030+ default y if GRKERNSEC_CONFIG_AUTO
58031+ help
58032+ If you say Y here, TTY sniffers and other malicious monitoring
58033+ programs implemented through ptrace will be defeated. If you
58034+ have been using the RBAC system, this option has already been
58035+ enabled for several years for all users, with the ability to make
58036+ fine-grained exceptions.
58037+
58038+ This option only affects the ability of non-root users to ptrace
58039+ processes that are not a descendent of the ptracing process.
58040+ This means that strace ./binary and gdb ./binary will still work,
58041+ but attaching to arbitrary processes will not. If the sysctl
58042+ option is enabled, a sysctl option with name "harden_ptrace" is
58043+ created.
58044+
58045+config GRKERNSEC_PTRACE_READEXEC
58046+ bool "Require read access to ptrace sensitive binaries"
58047+ default y if GRKERNSEC_CONFIG_AUTO
58048+ help
58049+ If you say Y here, unprivileged users will not be able to ptrace unreadable
58050+ binaries. This option is useful in environments that
58051+ remove the read bits (e.g. file mode 4711) from suid binaries to
58052+ prevent infoleaking of their contents. This option adds
58053+ consistency to the use of that file mode, as the binary could normally
58054+ be read out when run without privileges while ptracing.
58055+
58056+ If the sysctl option is enabled, a sysctl option with name "ptrace_readexec"
58057+ is created.
58058+
58059+config GRKERNSEC_SETXID
58060+ bool "Enforce consistent multithreaded privileges"
58061+ default y if GRKERNSEC_CONFIG_AUTO
58062+ depends on (X86 || SPARC64 || PPC || ARM || MIPS)
58063+ help
58064+ If you say Y here, a change from a root uid to a non-root uid
58065+ in a multithreaded application will cause the resulting uids,
58066+ gids, supplementary groups, and capabilities in that thread
58067+ to be propagated to the other threads of the process. In most
58068+ cases this is unnecessary, as glibc will emulate this behavior
58069+ on behalf of the application. Other libcs do not act in the
58070+ same way, allowing the other threads of the process to continue
58071+ running with root privileges. If the sysctl option is enabled,
58072+ a sysctl option with name "consistent_setxid" is created.
58073+
58074+config GRKERNSEC_TPE
58075+ bool "Trusted Path Execution (TPE)"
58076+ default y if GRKERNSEC_CONFIG_AUTO && GRKERNSEC_CONFIG_SERVER
58077+ help
58078+ If you say Y here, you will be able to choose a gid to add to the
58079+ supplementary groups of users you want to mark as "untrusted."
58080+ These users will not be able to execute any files that are not in
58081+ root-owned directories writable only by root. If the sysctl option
58082+ is enabled, a sysctl option with name "tpe" is created.
58083+
58084+config GRKERNSEC_TPE_ALL
58085+ bool "Partially restrict all non-root users"
58086+ depends on GRKERNSEC_TPE
58087+ help
58088+ If you say Y here, all non-root users will be covered under
58089+ a weaker TPE restriction. This is separate from, and in addition to,
58090+ the main TPE options that you have selected elsewhere. Thus, if a
58091+ "trusted" GID is chosen, this restriction applies to even that GID.
58092+ Under this restriction, all non-root users will only be allowed to
58093+ execute files in directories they own that are not group or
58094+ world-writable, or in directories owned by root and writable only by
58095+ root. If the sysctl option is enabled, a sysctl option with name
58096+ "tpe_restrict_all" is created.
58097+
58098+config GRKERNSEC_TPE_INVERT
58099+ bool "Invert GID option"
58100+ depends on GRKERNSEC_TPE
58101+ help
58102+ If you say Y here, the group you specify in the TPE configuration will
58103+ decide what group TPE restrictions will be *disabled* for. This
58104+ option is useful if you want TPE restrictions to be applied to most
58105+ users on the system. If the sysctl option is enabled, a sysctl option
58106+ with name "tpe_invert" is created. Unlike other sysctl options, this
58107+ entry will default to on for backward-compatibility.
58108+
58109+config GRKERNSEC_TPE_GID
58110+ int
58111+ default GRKERNSEC_TPE_UNTRUSTED_GID if (GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT)
58112+ default GRKERNSEC_TPE_TRUSTED_GID if (GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT)
58113+
58114+config GRKERNSEC_TPE_UNTRUSTED_GID
58115+ int "GID for TPE-untrusted users"
58116+ depends on GRKERNSEC_TPE && !GRKERNSEC_TPE_INVERT
58117+ default 1005
58118+ help
58119+ Setting this GID determines what group TPE restrictions will be
58120+ *enabled* for. If the sysctl option is enabled, a sysctl option
58121+ with name "tpe_gid" is created.
58122+
58123+config GRKERNSEC_TPE_TRUSTED_GID
58124+ int "GID for TPE-trusted users"
58125+ depends on GRKERNSEC_TPE && GRKERNSEC_TPE_INVERT
58126+ default 1005
58127+ help
58128+ Setting this GID determines what group TPE restrictions will be
58129+ *disabled* for. If the sysctl option is enabled, a sysctl option
58130+ with name "tpe_gid" is created.
58131+
58132+endmenu
58133+menu "Network Protections"
58134+depends on GRKERNSEC
58135+
58136+config GRKERNSEC_RANDNET
58137+ bool "Larger entropy pools"
58138+ default y if GRKERNSEC_CONFIG_AUTO
58139+ help
58140+ If you say Y here, the entropy pools used for many features of Linux
58141+ and grsecurity will be doubled in size. Since several grsecurity
58142+ features use additional randomness, it is recommended that you say Y
58143+ here. Saying Y here has a similar effect as modifying
58144+ /proc/sys/kernel/random/poolsize.
58145+
58146+config GRKERNSEC_BLACKHOLE
58147+ bool "TCP/UDP blackhole and LAST_ACK DoS prevention"
58148+ default y if GRKERNSEC_CONFIG_AUTO
58149+ depends on NET
58150+ help
58151+ If you say Y here, neither TCP resets nor ICMP
58152+ destination-unreachable packets will be sent in response to packets
58153+ sent to ports for which no associated listening process exists.
58154+ This feature supports both IPV4 and IPV6 and exempts the
58155+ loopback interface from blackholing. Enabling this feature
58156+ makes a host more resilient to DoS attacks and reduces network
58157+ visibility against scanners.
58158+
58159+ The blackhole feature as-implemented is equivalent to the FreeBSD
58160+ blackhole feature, as it prevents RST responses to all packets, not
58161+ just SYNs. Under most application behavior this causes no
58162+ problems, but applications (like haproxy) may not close certain
58163+ connections in a way that cleanly terminates them on the remote
58164+ end, leaving the remote host in LAST_ACK state. Because of this
58165+ side-effect and to prevent intentional LAST_ACK DoSes, this
58166+ feature also adds automatic mitigation against such attacks.
58167+ The mitigation drastically reduces the amount of time a socket
58168+ can spend in LAST_ACK state. If you're using haproxy and not
58169+ all servers it connects to have this option enabled, consider
58170+ disabling this feature on the haproxy host.
58171+
58172+ If the sysctl option is enabled, two sysctl options with names
58173+ "ip_blackhole" and "lastack_retries" will be created.
58174+ While "ip_blackhole" takes the standard zero/non-zero on/off
58175+ toggle, "lastack_retries" uses the same kinds of values as
58176+ "tcp_retries1" and "tcp_retries2". The default value of 4
58177+ prevents a socket from lasting more than 45 seconds in LAST_ACK
58178+ state.
58179+
58180+config GRKERNSEC_NO_SIMULT_CONNECT
58181+ bool "Disable TCP Simultaneous Connect"
58182+ default y if GRKERNSEC_CONFIG_AUTO
58183+ depends on NET
58184+ help
58185+ If you say Y here, a feature by Willy Tarreau will be enabled that
58186+ removes a weakness in Linux's strict implementation of TCP that
58187+ allows two clients to connect to each other without either entering
58188+ a listening state. The weakness allows an attacker to easily prevent
58189+ a client from connecting to a known server provided the source port
58190+ for the connection is guessed correctly.
58191+
58192+ As the weakness could be used to prevent an antivirus or IPS from
58193+ fetching updates, or prevent an SSL gateway from fetching a CRL,
58194+ it should be eliminated by enabling this option. Though Linux is
58195+ one of few operating systems supporting simultaneous connect, it
58196+ has no legitimate use in practice and is rarely supported by firewalls.
58197+
58198+config GRKERNSEC_SOCKET
58199+ bool "Socket restrictions"
58200+ depends on NET
58201+ help
58202+ If you say Y here, you will be able to choose from several options.
58203+ If you assign a GID on your system and add it to the supplementary
58204+ groups of users you want to restrict socket access to, this patch
58205+ will perform up to three things, based on the option(s) you choose.
58206+
58207+config GRKERNSEC_SOCKET_ALL
58208+ bool "Deny any sockets to group"
58209+ depends on GRKERNSEC_SOCKET
58210+ help
58211+ If you say Y here, you will be able to choose a GID of whose users will
58212+ be unable to connect to other hosts from your machine or run server
58213+ applications from your machine. If the sysctl option is enabled, a
58214+ sysctl option with name "socket_all" is created.
58215+
58216+config GRKERNSEC_SOCKET_ALL_GID
58217+ int "GID to deny all sockets for"
58218+ depends on GRKERNSEC_SOCKET_ALL
58219+ default 1004
58220+ help
58221+ Here you can choose the GID to disable socket access for. Remember to
58222+ add the users you want socket access disabled for to the GID
58223+ specified here. If the sysctl option is enabled, a sysctl option
58224+ with name "socket_all_gid" is created.
58225+
58226+config GRKERNSEC_SOCKET_CLIENT
58227+ bool "Deny client sockets to group"
58228+ depends on GRKERNSEC_SOCKET
58229+ help
58230+ If you say Y here, you will be able to choose a GID of whose users will
58231+ be unable to connect to other hosts from your machine, but will be
58232+ able to run servers. If this option is enabled, all users in the group
58233+ you specify will have to use passive mode when initiating ftp transfers
58234+ from the shell on your machine. If the sysctl option is enabled, a
58235+ sysctl option with name "socket_client" is created.
58236+
58237+config GRKERNSEC_SOCKET_CLIENT_GID
58238+ int "GID to deny client sockets for"
58239+ depends on GRKERNSEC_SOCKET_CLIENT
58240+ default 1003
58241+ help
58242+ Here you can choose the GID to disable client socket access for.
58243+ Remember to add the users you want client socket access disabled for to
58244+ the GID specified here. If the sysctl option is enabled, a sysctl
58245+ option with name "socket_client_gid" is created.
58246+
58247+config GRKERNSEC_SOCKET_SERVER
58248+ bool "Deny server sockets to group"
58249+ depends on GRKERNSEC_SOCKET
58250+ help
58251+ If you say Y here, you will be able to choose a GID of whose users will
58252+ be unable to run server applications from your machine. If the sysctl
58253+ option is enabled, a sysctl option with name "socket_server" is created.
58254+
58255+config GRKERNSEC_SOCKET_SERVER_GID
58256+ int "GID to deny server sockets for"
58257+ depends on GRKERNSEC_SOCKET_SERVER
58258+ default 1002
58259+ help
58260+ Here you can choose the GID to disable server socket access for.
58261+ Remember to add the users you want server socket access disabled for to
58262+ the GID specified here. If the sysctl option is enabled, a sysctl
58263+ option with name "socket_server_gid" is created.
58264+
58265+endmenu
58266+menu "Sysctl Support"
58267+depends on GRKERNSEC && SYSCTL
58268+
58269+config GRKERNSEC_SYSCTL
58270+ bool "Sysctl support"
58271+ default y if GRKERNSEC_CONFIG_AUTO
58272+ help
58273+ If you say Y here, you will be able to change the options that
58274+ grsecurity runs with at bootup, without having to recompile your
58275+ kernel. You can echo values to files in /proc/sys/kernel/grsecurity
58276+ to enable (1) or disable (0) various features. All the sysctl entries
58277+ are mutable until the "grsec_lock" entry is set to a non-zero value.
58278+ All features enabled in the kernel configuration are disabled at boot
58279+ if you do not say Y to the "Turn on features by default" option.
58280+ All options should be set at startup, and the grsec_lock entry should
58281+ be set to a non-zero value after all the options are set.
58282+ *THIS IS EXTREMELY IMPORTANT*
58283+
58284+config GRKERNSEC_SYSCTL_DISTRO
58285+ bool "Extra sysctl support for distro makers (READ HELP)"
58286+ depends on GRKERNSEC_SYSCTL && GRKERNSEC_IO
58287+ help
58288+ If you say Y here, additional sysctl options will be created
58289+ for features that affect processes running as root. Therefore,
58290+ it is critical when using this option that the grsec_lock entry be
58291+ enabled after boot. Only distros with prebuilt kernel packages
58292+ with this option enabled that can ensure grsec_lock is enabled
58293+ after boot should use this option.
58294+ *Failure to set grsec_lock after boot makes all grsec features
58295+ this option covers useless*
58296+
58297+ Currently this option creates the following sysctl entries:
58298+ "Disable Privileged I/O": "disable_priv_io"
58299+
58300+config GRKERNSEC_SYSCTL_ON
58301+ bool "Turn on features by default"
58302+ default y if GRKERNSEC_CONFIG_AUTO
58303+ depends on GRKERNSEC_SYSCTL
58304+ help
58305+ If you say Y here, instead of having all features enabled in the
58306+ kernel configuration disabled at boot time, the features will be
58307+ enabled at boot time. It is recommended you say Y here unless
58308+ there is some reason you would want all sysctl-tunable features to
58309+ be disabled by default. As mentioned elsewhere, it is important
58310+ to enable the grsec_lock entry once you have finished modifying
58311+ the sysctl entries.
58312+
58313+endmenu
58314+menu "Logging Options"
58315+depends on GRKERNSEC
58316+
58317+config GRKERNSEC_FLOODTIME
58318+ int "Seconds in between log messages (minimum)"
58319+ default 10
58320+ help
58321+ This option allows you to enforce the number of seconds between
58322+ grsecurity log messages. The default should be suitable for most
58323+ people, however, if you choose to change it, choose a value small enough
58324+ to allow informative logs to be produced, but large enough to
58325+ prevent flooding.
58326+
58327+config GRKERNSEC_FLOODBURST
58328+ int "Number of messages in a burst (maximum)"
58329+ default 6
58330+ help
58331+ This option allows you to choose the maximum number of messages allowed
58332+ within the flood time interval you chose in a separate option. The
58333+ default should be suitable for most people, however if you find that
58334+ many of your logs are being interpreted as flooding, you may want to
58335+ raise this value.
58336+
58337+endmenu
58338diff --git a/grsecurity/Makefile b/grsecurity/Makefile
58339new file mode 100644
58340index 0000000..1b9afa9
58341--- /dev/null
58342+++ b/grsecurity/Makefile
58343@@ -0,0 +1,38 @@
58344+# grsecurity's ACL system was originally written in 2001 by Michael Dalton
58345+# during 2001-2009 it has been completely redesigned by Brad Spengler
58346+# into an RBAC system
58347+#
58348+# All code in this directory and various hooks inserted throughout the kernel
58349+# are copyright Brad Spengler - Open Source Security, Inc., and released
58350+# under the GPL v2 or higher
58351+
58352+KBUILD_CFLAGS += -Werror
58353+
58354+obj-y = grsec_chdir.o grsec_chroot.o grsec_exec.o grsec_fifo.o grsec_fork.o \
58355+ grsec_mount.o grsec_sig.o grsec_sysctl.o \
58356+ grsec_time.o grsec_tpe.o grsec_link.o grsec_pax.o grsec_ptrace.o
58357+
58358+obj-$(CONFIG_GRKERNSEC) += grsec_init.o grsum.o gracl.o gracl_segv.o \
58359+ gracl_cap.o gracl_alloc.o gracl_shm.o grsec_mem.o gracl_fs.o \
58360+ gracl_learn.o grsec_log.o
58361+obj-$(CONFIG_GRKERNSEC_RESLOG) += gracl_res.o
58362+
58363+ifdef CONFIG_NET
58364+obj-y += grsec_sock.o
58365+obj-$(CONFIG_GRKERNSEC) += gracl_ip.o
58366+endif
58367+
58368+ifndef CONFIG_GRKERNSEC
58369+obj-y += grsec_disabled.o
58370+endif
58371+
58372+ifdef CONFIG_GRKERNSEC_HIDESYM
58373+extra-y := grsec_hidesym.o
58374+$(obj)/grsec_hidesym.o:
58375+ @-chmod -f 500 /boot
58376+ @-chmod -f 500 /lib/modules
58377+ @-chmod -f 500 /lib64/modules
58378+ @-chmod -f 500 /lib32/modules
58379+ @-chmod -f 700 .
58380+ @echo ' grsec: protected kernel image paths'
58381+endif
58382diff --git a/grsecurity/gracl.c b/grsecurity/gracl.c
58383new file mode 100644
58384index 0000000..0d5c602
58385--- /dev/null
58386+++ b/grsecurity/gracl.c
58387@@ -0,0 +1,4073 @@
58388+#include <linux/kernel.h>
58389+#include <linux/module.h>
58390+#include <linux/sched.h>
58391+#include <linux/mm.h>
58392+#include <linux/file.h>
58393+#include <linux/fs.h>
58394+#include <linux/namei.h>
58395+#include <linux/mount.h>
58396+#include <linux/tty.h>
58397+#include <linux/proc_fs.h>
58398+#include <linux/lglock.h>
58399+#include <linux/slab.h>
58400+#include <linux/vmalloc.h>
58401+#include <linux/types.h>
58402+#include <linux/sysctl.h>
58403+#include <linux/netdevice.h>
58404+#include <linux/ptrace.h>
58405+#include <linux/gracl.h>
58406+#include <linux/gralloc.h>
58407+#include <linux/security.h>
58408+#include <linux/grinternal.h>
58409+#include <linux/pid_namespace.h>
58410+#include <linux/stop_machine.h>
58411+#include <linux/fdtable.h>
58412+#include <linux/percpu.h>
58413+#include <linux/lglock.h>
58414+#include <linux/hugetlb.h>
58415+#include <linux/posix-timers.h>
58416+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
58417+#include <linux/magic.h>
58418+#include <linux/pagemap.h>
58419+#include "../fs/btrfs/async-thread.h"
58420+#include "../fs/btrfs/ctree.h"
58421+#include "../fs/btrfs/btrfs_inode.h"
58422+#endif
58423+#include "../fs/mount.h"
58424+
58425+#include <asm/uaccess.h>
58426+#include <asm/errno.h>
58427+#include <asm/mman.h>
58428+
58429+extern struct lglock vfsmount_lock;
58430+
58431+static struct acl_role_db acl_role_set;
58432+static struct name_db name_set;
58433+static struct inodev_db inodev_set;
58434+
58435+/* for keeping track of userspace pointers used for subjects, so we
58436+ can share references in the kernel as well
58437+*/
58438+
58439+static struct path real_root;
58440+
58441+static struct acl_subj_map_db subj_map_set;
58442+
58443+static struct acl_role_label *default_role;
58444+
58445+static struct acl_role_label *role_list;
58446+
58447+static u16 acl_sp_role_value;
58448+
58449+extern char *gr_shared_page[4];
58450+static DEFINE_MUTEX(gr_dev_mutex);
58451+DEFINE_RWLOCK(gr_inode_lock);
58452+
58453+struct gr_arg *gr_usermode;
58454+
58455+static unsigned int gr_status __read_only = GR_STATUS_INIT;
58456+
58457+extern int chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum);
58458+extern void gr_clear_learn_entries(void);
58459+
58460+unsigned char *gr_system_salt;
58461+unsigned char *gr_system_sum;
58462+
58463+static struct sprole_pw **acl_special_roles = NULL;
58464+static __u16 num_sprole_pws = 0;
58465+
58466+static struct acl_role_label *kernel_role = NULL;
58467+
58468+static unsigned int gr_auth_attempts = 0;
58469+static unsigned long gr_auth_expires = 0UL;
58470+
58471+#ifdef CONFIG_NET
58472+extern struct vfsmount *sock_mnt;
58473+#endif
58474+
58475+extern struct vfsmount *pipe_mnt;
58476+extern struct vfsmount *shm_mnt;
58477+
58478+#ifdef CONFIG_HUGETLBFS
58479+extern struct vfsmount *hugetlbfs_vfsmount[HUGE_MAX_HSTATE];
58480+#endif
58481+
58482+static struct acl_object_label *fakefs_obj_rw;
58483+static struct acl_object_label *fakefs_obj_rwx;
58484+
58485+extern int gr_init_uidset(void);
58486+extern void gr_free_uidset(void);
58487+extern void gr_remove_uid(uid_t uid);
58488+extern int gr_find_uid(uid_t uid);
58489+
58490+__inline__ int
58491+gr_acl_is_enabled(void)
58492+{
58493+ return (gr_status & GR_READY);
58494+}
58495+
58496+static inline dev_t __get_dev(const struct dentry *dentry)
58497+{
58498+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
58499+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
58500+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
58501+ else
58502+#endif
58503+ return dentry->d_sb->s_dev;
58504+}
58505+
58506+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
58507+{
58508+ return __get_dev(dentry);
58509+}
58510+
58511+static char gr_task_roletype_to_char(struct task_struct *task)
58512+{
58513+ switch (task->role->roletype &
58514+ (GR_ROLE_DEFAULT | GR_ROLE_USER | GR_ROLE_GROUP |
58515+ GR_ROLE_SPECIAL)) {
58516+ case GR_ROLE_DEFAULT:
58517+ return 'D';
58518+ case GR_ROLE_USER:
58519+ return 'U';
58520+ case GR_ROLE_GROUP:
58521+ return 'G';
58522+ case GR_ROLE_SPECIAL:
58523+ return 'S';
58524+ }
58525+
58526+ return 'X';
58527+}
58528+
58529+char gr_roletype_to_char(void)
58530+{
58531+ return gr_task_roletype_to_char(current);
58532+}
58533+
58534+__inline__ int
58535+gr_acl_tpe_check(void)
58536+{
58537+ if (unlikely(!(gr_status & GR_READY)))
58538+ return 0;
58539+ if (current->role->roletype & GR_ROLE_TPE)
58540+ return 1;
58541+ else
58542+ return 0;
58543+}
58544+
58545+int
58546+gr_handle_rawio(const struct inode *inode)
58547+{
58548+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
58549+ if (inode && S_ISBLK(inode->i_mode) &&
58550+ grsec_enable_chroot_caps && proc_is_chrooted(current) &&
58551+ !capable(CAP_SYS_RAWIO))
58552+ return 1;
58553+#endif
58554+ return 0;
58555+}
58556+
58557+static int
58558+gr_streq(const char *a, const char *b, const unsigned int lena, const unsigned int lenb)
58559+{
58560+ if (likely(lena != lenb))
58561+ return 0;
58562+
58563+ return !memcmp(a, b, lena);
58564+}
58565+
58566+static int prepend(char **buffer, int *buflen, const char *str, int namelen)
58567+{
58568+ *buflen -= namelen;
58569+ if (*buflen < 0)
58570+ return -ENAMETOOLONG;
58571+ *buffer -= namelen;
58572+ memcpy(*buffer, str, namelen);
58573+ return 0;
58574+}
58575+
58576+static int prepend_name(char **buffer, int *buflen, struct qstr *name)
58577+{
58578+ return prepend(buffer, buflen, name->name, name->len);
58579+}
58580+
58581+static int prepend_path(const struct path *path, struct path *root,
58582+ char **buffer, int *buflen)
58583+{
58584+ struct dentry *dentry = path->dentry;
58585+ struct vfsmount *vfsmnt = path->mnt;
58586+ struct mount *mnt = real_mount(vfsmnt);
58587+ bool slash = false;
58588+ int error = 0;
58589+
58590+ while (dentry != root->dentry || vfsmnt != root->mnt) {
58591+ struct dentry * parent;
58592+
58593+ if (dentry == vfsmnt->mnt_root || IS_ROOT(dentry)) {
58594+ /* Global root? */
58595+ if (!mnt_has_parent(mnt)) {
58596+ goto out;
58597+ }
58598+ dentry = mnt->mnt_mountpoint;
58599+ mnt = mnt->mnt_parent;
58600+ vfsmnt = &mnt->mnt;
58601+ continue;
58602+ }
58603+ parent = dentry->d_parent;
58604+ prefetch(parent);
58605+ spin_lock(&dentry->d_lock);
58606+ error = prepend_name(buffer, buflen, &dentry->d_name);
58607+ spin_unlock(&dentry->d_lock);
58608+ if (!error)
58609+ error = prepend(buffer, buflen, "/", 1);
58610+ if (error)
58611+ break;
58612+
58613+ slash = true;
58614+ dentry = parent;
58615+ }
58616+
58617+out:
58618+ if (!error && !slash)
58619+ error = prepend(buffer, buflen, "/", 1);
58620+
58621+ return error;
58622+}
58623+
58624+/* this must be called with vfsmount_lock and rename_lock held */
58625+
58626+static char *__our_d_path(const struct path *path, struct path *root,
58627+ char *buf, int buflen)
58628+{
58629+ char *res = buf + buflen;
58630+ int error;
58631+
58632+ prepend(&res, &buflen, "\0", 1);
58633+ error = prepend_path(path, root, &res, &buflen);
58634+ if (error)
58635+ return ERR_PTR(error);
58636+
58637+ return res;
58638+}
58639+
58640+static char *
58641+gen_full_path(struct path *path, struct path *root, char *buf, int buflen)
58642+{
58643+ char *retval;
58644+
58645+ retval = __our_d_path(path, root, buf, buflen);
58646+ if (unlikely(IS_ERR(retval)))
58647+ retval = strcpy(buf, "<path too long>");
58648+ else if (unlikely(retval[1] == '/' && retval[2] == '\0'))
58649+ retval[1] = '\0';
58650+
58651+ return retval;
58652+}
58653+
58654+static char *
58655+__d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58656+ char *buf, int buflen)
58657+{
58658+ struct path path;
58659+ char *res;
58660+
58661+ path.dentry = (struct dentry *)dentry;
58662+ path.mnt = (struct vfsmount *)vfsmnt;
58663+
58664+ /* we can use real_root.dentry, real_root.mnt, because this is only called
58665+ by the RBAC system */
58666+ res = gen_full_path(&path, &real_root, buf, buflen);
58667+
58668+ return res;
58669+}
58670+
58671+static char *
58672+d_real_path(const struct dentry *dentry, const struct vfsmount *vfsmnt,
58673+ char *buf, int buflen)
58674+{
58675+ char *res;
58676+ struct path path;
58677+ struct path root;
58678+ struct task_struct *reaper = init_pid_ns.child_reaper;
58679+
58680+ path.dentry = (struct dentry *)dentry;
58681+ path.mnt = (struct vfsmount *)vfsmnt;
58682+
58683+ /* we can't use real_root.dentry, real_root.mnt, because they belong only to the RBAC system */
58684+ get_fs_root(reaper->fs, &root);
58685+
58686+ br_read_lock(&vfsmount_lock);
58687+ write_seqlock(&rename_lock);
58688+ res = gen_full_path(&path, &root, buf, buflen);
58689+ write_sequnlock(&rename_lock);
58690+ br_read_unlock(&vfsmount_lock);
58691+
58692+ path_put(&root);
58693+ return res;
58694+}
58695+
58696+static char *
58697+gr_to_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58698+{
58699+ char *ret;
58700+ br_read_lock(&vfsmount_lock);
58701+ write_seqlock(&rename_lock);
58702+ ret = __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58703+ PAGE_SIZE);
58704+ write_sequnlock(&rename_lock);
58705+ br_read_unlock(&vfsmount_lock);
58706+ return ret;
58707+}
58708+
58709+static char *
58710+gr_to_proc_filename_rbac(const struct dentry *dentry, const struct vfsmount *mnt)
58711+{
58712+ char *ret;
58713+ char *buf;
58714+ int buflen;
58715+
58716+ br_read_lock(&vfsmount_lock);
58717+ write_seqlock(&rename_lock);
58718+ buf = per_cpu_ptr(gr_shared_page[0], smp_processor_id());
58719+ ret = __d_real_path(dentry, mnt, buf, PAGE_SIZE - 6);
58720+ buflen = (int)(ret - buf);
58721+ if (buflen >= 5)
58722+ prepend(&ret, &buflen, "/proc", 5);
58723+ else
58724+ ret = strcpy(buf, "<path too long>");
58725+ write_sequnlock(&rename_lock);
58726+ br_read_unlock(&vfsmount_lock);
58727+ return ret;
58728+}
58729+
58730+char *
58731+gr_to_filename_nolock(const struct dentry *dentry, const struct vfsmount *mnt)
58732+{
58733+ return __d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0],smp_processor_id()),
58734+ PAGE_SIZE);
58735+}
58736+
58737+char *
58738+gr_to_filename(const struct dentry *dentry, const struct vfsmount *mnt)
58739+{
58740+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
58741+ PAGE_SIZE);
58742+}
58743+
58744+char *
58745+gr_to_filename1(const struct dentry *dentry, const struct vfsmount *mnt)
58746+{
58747+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[1], smp_processor_id()),
58748+ PAGE_SIZE);
58749+}
58750+
58751+char *
58752+gr_to_filename2(const struct dentry *dentry, const struct vfsmount *mnt)
58753+{
58754+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[2], smp_processor_id()),
58755+ PAGE_SIZE);
58756+}
58757+
58758+char *
58759+gr_to_filename3(const struct dentry *dentry, const struct vfsmount *mnt)
58760+{
58761+ return d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[3], smp_processor_id()),
58762+ PAGE_SIZE);
58763+}
58764+
58765+__inline__ __u32
58766+to_gr_audit(const __u32 reqmode)
58767+{
58768+ /* masks off auditable permission flags, then shifts them to create
58769+ auditing flags, and adds the special case of append auditing if
58770+ we're requesting write */
58771+ return (((reqmode & ~GR_AUDITS) << 10) | ((reqmode & GR_WRITE) ? GR_AUDIT_APPEND : 0));
58772+}
58773+
58774+struct acl_subject_label *
58775+lookup_subject_map(const struct acl_subject_label *userp)
58776+{
58777+ unsigned int index = gr_shash(userp, subj_map_set.s_size);
58778+ struct subject_map *match;
58779+
58780+ match = subj_map_set.s_hash[index];
58781+
58782+ while (match && match->user != userp)
58783+ match = match->next;
58784+
58785+ if (match != NULL)
58786+ return match->kernel;
58787+ else
58788+ return NULL;
58789+}
58790+
58791+static void
58792+insert_subj_map_entry(struct subject_map *subjmap)
58793+{
58794+ unsigned int index = gr_shash(subjmap->user, subj_map_set.s_size);
58795+ struct subject_map **curr;
58796+
58797+ subjmap->prev = NULL;
58798+
58799+ curr = &subj_map_set.s_hash[index];
58800+ if (*curr != NULL)
58801+ (*curr)->prev = subjmap;
58802+
58803+ subjmap->next = *curr;
58804+ *curr = subjmap;
58805+
58806+ return;
58807+}
58808+
58809+static struct acl_role_label *
58810+lookup_acl_role_label(const struct task_struct *task, const uid_t uid,
58811+ const gid_t gid)
58812+{
58813+ unsigned int index = gr_rhash(uid, GR_ROLE_USER, acl_role_set.r_size);
58814+ struct acl_role_label *match;
58815+ struct role_allowed_ip *ipp;
58816+ unsigned int x;
58817+ u32 curr_ip = task->signal->curr_ip;
58818+
58819+ task->signal->saved_ip = curr_ip;
58820+
58821+ match = acl_role_set.r_hash[index];
58822+
58823+ while (match) {
58824+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_USER)) == (GR_ROLE_DOMAIN | GR_ROLE_USER)) {
58825+ for (x = 0; x < match->domain_child_num; x++) {
58826+ if (match->domain_children[x] == uid)
58827+ goto found;
58828+ }
58829+ } else if (match->uidgid == uid && match->roletype & GR_ROLE_USER)
58830+ break;
58831+ match = match->next;
58832+ }
58833+found:
58834+ if (match == NULL) {
58835+ try_group:
58836+ index = gr_rhash(gid, GR_ROLE_GROUP, acl_role_set.r_size);
58837+ match = acl_role_set.r_hash[index];
58838+
58839+ while (match) {
58840+ if ((match->roletype & (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) == (GR_ROLE_DOMAIN | GR_ROLE_GROUP)) {
58841+ for (x = 0; x < match->domain_child_num; x++) {
58842+ if (match->domain_children[x] == gid)
58843+ goto found2;
58844+ }
58845+ } else if (match->uidgid == gid && match->roletype & GR_ROLE_GROUP)
58846+ break;
58847+ match = match->next;
58848+ }
58849+found2:
58850+ if (match == NULL)
58851+ match = default_role;
58852+ if (match->allowed_ips == NULL)
58853+ return match;
58854+ else {
58855+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58856+ if (likely
58857+ ((ntohl(curr_ip) & ipp->netmask) ==
58858+ (ntohl(ipp->addr) & ipp->netmask)))
58859+ return match;
58860+ }
58861+ match = default_role;
58862+ }
58863+ } else if (match->allowed_ips == NULL) {
58864+ return match;
58865+ } else {
58866+ for (ipp = match->allowed_ips; ipp; ipp = ipp->next) {
58867+ if (likely
58868+ ((ntohl(curr_ip) & ipp->netmask) ==
58869+ (ntohl(ipp->addr) & ipp->netmask)))
58870+ return match;
58871+ }
58872+ goto try_group;
58873+ }
58874+
58875+ return match;
58876+}
58877+
58878+struct acl_subject_label *
58879+lookup_acl_subj_label(const ino_t ino, const dev_t dev,
58880+ const struct acl_role_label *role)
58881+{
58882+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58883+ struct acl_subject_label *match;
58884+
58885+ match = role->subj_hash[index];
58886+
58887+ while (match && (match->inode != ino || match->device != dev ||
58888+ (match->mode & GR_DELETED))) {
58889+ match = match->next;
58890+ }
58891+
58892+ if (match && !(match->mode & GR_DELETED))
58893+ return match;
58894+ else
58895+ return NULL;
58896+}
58897+
58898+struct acl_subject_label *
58899+lookup_acl_subj_label_deleted(const ino_t ino, const dev_t dev,
58900+ const struct acl_role_label *role)
58901+{
58902+ unsigned int index = gr_fhash(ino, dev, role->subj_hash_size);
58903+ struct acl_subject_label *match;
58904+
58905+ match = role->subj_hash[index];
58906+
58907+ while (match && (match->inode != ino || match->device != dev ||
58908+ !(match->mode & GR_DELETED))) {
58909+ match = match->next;
58910+ }
58911+
58912+ if (match && (match->mode & GR_DELETED))
58913+ return match;
58914+ else
58915+ return NULL;
58916+}
58917+
58918+static struct acl_object_label *
58919+lookup_acl_obj_label(const ino_t ino, const dev_t dev,
58920+ const struct acl_subject_label *subj)
58921+{
58922+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58923+ struct acl_object_label *match;
58924+
58925+ match = subj->obj_hash[index];
58926+
58927+ while (match && (match->inode != ino || match->device != dev ||
58928+ (match->mode & GR_DELETED))) {
58929+ match = match->next;
58930+ }
58931+
58932+ if (match && !(match->mode & GR_DELETED))
58933+ return match;
58934+ else
58935+ return NULL;
58936+}
58937+
58938+static struct acl_object_label *
58939+lookup_acl_obj_label_create(const ino_t ino, const dev_t dev,
58940+ const struct acl_subject_label *subj)
58941+{
58942+ unsigned int index = gr_fhash(ino, dev, subj->obj_hash_size);
58943+ struct acl_object_label *match;
58944+
58945+ match = subj->obj_hash[index];
58946+
58947+ while (match && (match->inode != ino || match->device != dev ||
58948+ !(match->mode & GR_DELETED))) {
58949+ match = match->next;
58950+ }
58951+
58952+ if (match && (match->mode & GR_DELETED))
58953+ return match;
58954+
58955+ match = subj->obj_hash[index];
58956+
58957+ while (match && (match->inode != ino || match->device != dev ||
58958+ (match->mode & GR_DELETED))) {
58959+ match = match->next;
58960+ }
58961+
58962+ if (match && !(match->mode & GR_DELETED))
58963+ return match;
58964+ else
58965+ return NULL;
58966+}
58967+
58968+static struct name_entry *
58969+lookup_name_entry(const char *name)
58970+{
58971+ unsigned int len = strlen(name);
58972+ unsigned int key = full_name_hash(name, len);
58973+ unsigned int index = key % name_set.n_size;
58974+ struct name_entry *match;
58975+
58976+ match = name_set.n_hash[index];
58977+
58978+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len)))
58979+ match = match->next;
58980+
58981+ return match;
58982+}
58983+
58984+static struct name_entry *
58985+lookup_name_entry_create(const char *name)
58986+{
58987+ unsigned int len = strlen(name);
58988+ unsigned int key = full_name_hash(name, len);
58989+ unsigned int index = key % name_set.n_size;
58990+ struct name_entry *match;
58991+
58992+ match = name_set.n_hash[index];
58993+
58994+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
58995+ !match->deleted))
58996+ match = match->next;
58997+
58998+ if (match && match->deleted)
58999+ return match;
59000+
59001+ match = name_set.n_hash[index];
59002+
59003+ while (match && (match->key != key || !gr_streq(match->name, name, match->len, len) ||
59004+ match->deleted))
59005+ match = match->next;
59006+
59007+ if (match && !match->deleted)
59008+ return match;
59009+ else
59010+ return NULL;
59011+}
59012+
59013+static struct inodev_entry *
59014+lookup_inodev_entry(const ino_t ino, const dev_t dev)
59015+{
59016+ unsigned int index = gr_fhash(ino, dev, inodev_set.i_size);
59017+ struct inodev_entry *match;
59018+
59019+ match = inodev_set.i_hash[index];
59020+
59021+ while (match && (match->nentry->inode != ino || match->nentry->device != dev))
59022+ match = match->next;
59023+
59024+ return match;
59025+}
59026+
59027+static void
59028+insert_inodev_entry(struct inodev_entry *entry)
59029+{
59030+ unsigned int index = gr_fhash(entry->nentry->inode, entry->nentry->device,
59031+ inodev_set.i_size);
59032+ struct inodev_entry **curr;
59033+
59034+ entry->prev = NULL;
59035+
59036+ curr = &inodev_set.i_hash[index];
59037+ if (*curr != NULL)
59038+ (*curr)->prev = entry;
59039+
59040+ entry->next = *curr;
59041+ *curr = entry;
59042+
59043+ return;
59044+}
59045+
59046+static void
59047+__insert_acl_role_label(struct acl_role_label *role, uid_t uidgid)
59048+{
59049+ unsigned int index =
59050+ gr_rhash(uidgid, role->roletype & (GR_ROLE_USER | GR_ROLE_GROUP), acl_role_set.r_size);
59051+ struct acl_role_label **curr;
59052+ struct acl_role_label *tmp, *tmp2;
59053+
59054+ curr = &acl_role_set.r_hash[index];
59055+
59056+ /* simple case, slot is empty, just set it to our role */
59057+ if (*curr == NULL) {
59058+ *curr = role;
59059+ } else {
59060+ /* example:
59061+ 1 -> 2 -> 3 (adding 2 -> 3 to here)
59062+ 2 -> 3
59063+ */
59064+ /* first check to see if we can already be reached via this slot */
59065+ tmp = *curr;
59066+ while (tmp && tmp != role)
59067+ tmp = tmp->next;
59068+ if (tmp == role) {
59069+ /* we don't need to add ourselves to this slot's chain */
59070+ return;
59071+ }
59072+ /* we need to add ourselves to this chain, two cases */
59073+ if (role->next == NULL) {
59074+ /* simple case, append the current chain to our role */
59075+ role->next = *curr;
59076+ *curr = role;
59077+ } else {
59078+ /* 1 -> 2 -> 3 -> 4
59079+ 2 -> 3 -> 4
59080+ 3 -> 4 (adding 1 -> 2 -> 3 -> 4 to here)
59081+ */
59082+ /* trickier case: walk our role's chain until we find
59083+ the role for the start of the current slot's chain */
59084+ tmp = role;
59085+ tmp2 = *curr;
59086+ while (tmp->next && tmp->next != tmp2)
59087+ tmp = tmp->next;
59088+ if (tmp->next == tmp2) {
59089+ /* from example above, we found 3, so just
59090+ replace this slot's chain with ours */
59091+ *curr = role;
59092+ } else {
59093+ /* we didn't find a subset of our role's chain
59094+ in the current slot's chain, so append their
59095+ chain to ours, and set us as the first role in
59096+ the slot's chain
59097+
59098+ we could fold this case with the case above,
59099+ but making it explicit for clarity
59100+ */
59101+ tmp->next = tmp2;
59102+ *curr = role;
59103+ }
59104+ }
59105+ }
59106+
59107+ return;
59108+}
59109+
59110+static void
59111+insert_acl_role_label(struct acl_role_label *role)
59112+{
59113+ int i;
59114+
59115+ if (role_list == NULL) {
59116+ role_list = role;
59117+ role->prev = NULL;
59118+ } else {
59119+ role->prev = role_list;
59120+ role_list = role;
59121+ }
59122+
59123+ /* used for hash chains */
59124+ role->next = NULL;
59125+
59126+ if (role->roletype & GR_ROLE_DOMAIN) {
59127+ for (i = 0; i < role->domain_child_num; i++)
59128+ __insert_acl_role_label(role, role->domain_children[i]);
59129+ } else
59130+ __insert_acl_role_label(role, role->uidgid);
59131+}
59132+
59133+static int
59134+insert_name_entry(char *name, const ino_t inode, const dev_t device, __u8 deleted)
59135+{
59136+ struct name_entry **curr, *nentry;
59137+ struct inodev_entry *ientry;
59138+ unsigned int len = strlen(name);
59139+ unsigned int key = full_name_hash(name, len);
59140+ unsigned int index = key % name_set.n_size;
59141+
59142+ curr = &name_set.n_hash[index];
59143+
59144+ while (*curr && ((*curr)->key != key || !gr_streq((*curr)->name, name, (*curr)->len, len)))
59145+ curr = &((*curr)->next);
59146+
59147+ if (*curr != NULL)
59148+ return 1;
59149+
59150+ nentry = acl_alloc(sizeof (struct name_entry));
59151+ if (nentry == NULL)
59152+ return 0;
59153+ ientry = acl_alloc(sizeof (struct inodev_entry));
59154+ if (ientry == NULL)
59155+ return 0;
59156+ ientry->nentry = nentry;
59157+
59158+ nentry->key = key;
59159+ nentry->name = name;
59160+ nentry->inode = inode;
59161+ nentry->device = device;
59162+ nentry->len = len;
59163+ nentry->deleted = deleted;
59164+
59165+ nentry->prev = NULL;
59166+ curr = &name_set.n_hash[index];
59167+ if (*curr != NULL)
59168+ (*curr)->prev = nentry;
59169+ nentry->next = *curr;
59170+ *curr = nentry;
59171+
59172+ /* insert us into the table searchable by inode/dev */
59173+ insert_inodev_entry(ientry);
59174+
59175+ return 1;
59176+}
59177+
59178+static void
59179+insert_acl_obj_label(struct acl_object_label *obj,
59180+ struct acl_subject_label *subj)
59181+{
59182+ unsigned int index =
59183+ gr_fhash(obj->inode, obj->device, subj->obj_hash_size);
59184+ struct acl_object_label **curr;
59185+
59186+
59187+ obj->prev = NULL;
59188+
59189+ curr = &subj->obj_hash[index];
59190+ if (*curr != NULL)
59191+ (*curr)->prev = obj;
59192+
59193+ obj->next = *curr;
59194+ *curr = obj;
59195+
59196+ return;
59197+}
59198+
59199+static void
59200+insert_acl_subj_label(struct acl_subject_label *obj,
59201+ struct acl_role_label *role)
59202+{
59203+ unsigned int index = gr_fhash(obj->inode, obj->device, role->subj_hash_size);
59204+ struct acl_subject_label **curr;
59205+
59206+ obj->prev = NULL;
59207+
59208+ curr = &role->subj_hash[index];
59209+ if (*curr != NULL)
59210+ (*curr)->prev = obj;
59211+
59212+ obj->next = *curr;
59213+ *curr = obj;
59214+
59215+ return;
59216+}
59217+
59218+/* allocating chained hash tables, so optimal size is where lambda ~ 1 */
59219+
59220+static void *
59221+create_table(__u32 * len, int elementsize)
59222+{
59223+ unsigned int table_sizes[] = {
59224+ 7, 13, 31, 61, 127, 251, 509, 1021, 2039, 4093, 8191, 16381,
59225+ 32749, 65521, 131071, 262139, 524287, 1048573, 2097143,
59226+ 4194301, 8388593, 16777213, 33554393, 67108859
59227+ };
59228+ void *newtable = NULL;
59229+ unsigned int pwr = 0;
59230+
59231+ while ((pwr < ((sizeof (table_sizes) / sizeof (table_sizes[0])) - 1)) &&
59232+ table_sizes[pwr] <= *len)
59233+ pwr++;
59234+
59235+ if (table_sizes[pwr] <= *len || (table_sizes[pwr] > ULONG_MAX / elementsize))
59236+ return newtable;
59237+
59238+ if ((table_sizes[pwr] * elementsize) <= PAGE_SIZE)
59239+ newtable =
59240+ kmalloc(table_sizes[pwr] * elementsize, GFP_KERNEL);
59241+ else
59242+ newtable = vmalloc(table_sizes[pwr] * elementsize);
59243+
59244+ *len = table_sizes[pwr];
59245+
59246+ return newtable;
59247+}
59248+
59249+static int
59250+init_variables(const struct gr_arg *arg)
59251+{
59252+ struct task_struct *reaper = init_pid_ns.child_reaper;
59253+ unsigned int stacksize;
59254+
59255+ subj_map_set.s_size = arg->role_db.num_subjects;
59256+ acl_role_set.r_size = arg->role_db.num_roles + arg->role_db.num_domain_children;
59257+ name_set.n_size = arg->role_db.num_objects;
59258+ inodev_set.i_size = arg->role_db.num_objects;
59259+
59260+ if (!subj_map_set.s_size || !acl_role_set.r_size ||
59261+ !name_set.n_size || !inodev_set.i_size)
59262+ return 1;
59263+
59264+ if (!gr_init_uidset())
59265+ return 1;
59266+
59267+ /* set up the stack that holds allocation info */
59268+
59269+ stacksize = arg->role_db.num_pointers + 5;
59270+
59271+ if (!acl_alloc_stack_init(stacksize))
59272+ return 1;
59273+
59274+ /* grab reference for the real root dentry and vfsmount */
59275+ get_fs_root(reaper->fs, &real_root);
59276+
59277+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59278+ printk(KERN_ALERT "Obtained real root device=%d, inode=%lu\n", __get_dev(real_root.dentry), real_root.dentry->d_inode->i_ino);
59279+#endif
59280+
59281+ fakefs_obj_rw = acl_alloc(sizeof(struct acl_object_label));
59282+ if (fakefs_obj_rw == NULL)
59283+ return 1;
59284+ fakefs_obj_rw->mode = GR_FIND | GR_READ | GR_WRITE;
59285+
59286+ fakefs_obj_rwx = acl_alloc(sizeof(struct acl_object_label));
59287+ if (fakefs_obj_rwx == NULL)
59288+ return 1;
59289+ fakefs_obj_rwx->mode = GR_FIND | GR_READ | GR_WRITE | GR_EXEC;
59290+
59291+ subj_map_set.s_hash =
59292+ (struct subject_map **) create_table(&subj_map_set.s_size, sizeof(void *));
59293+ acl_role_set.r_hash =
59294+ (struct acl_role_label **) create_table(&acl_role_set.r_size, sizeof(void *));
59295+ name_set.n_hash = (struct name_entry **) create_table(&name_set.n_size, sizeof(void *));
59296+ inodev_set.i_hash =
59297+ (struct inodev_entry **) create_table(&inodev_set.i_size, sizeof(void *));
59298+
59299+ if (!subj_map_set.s_hash || !acl_role_set.r_hash ||
59300+ !name_set.n_hash || !inodev_set.i_hash)
59301+ return 1;
59302+
59303+ memset(subj_map_set.s_hash, 0,
59304+ sizeof(struct subject_map *) * subj_map_set.s_size);
59305+ memset(acl_role_set.r_hash, 0,
59306+ sizeof (struct acl_role_label *) * acl_role_set.r_size);
59307+ memset(name_set.n_hash, 0,
59308+ sizeof (struct name_entry *) * name_set.n_size);
59309+ memset(inodev_set.i_hash, 0,
59310+ sizeof (struct inodev_entry *) * inodev_set.i_size);
59311+
59312+ return 0;
59313+}
59314+
59315+/* free information not needed after startup
59316+ currently contains user->kernel pointer mappings for subjects
59317+*/
59318+
59319+static void
59320+free_init_variables(void)
59321+{
59322+ __u32 i;
59323+
59324+ if (subj_map_set.s_hash) {
59325+ for (i = 0; i < subj_map_set.s_size; i++) {
59326+ if (subj_map_set.s_hash[i]) {
59327+ kfree(subj_map_set.s_hash[i]);
59328+ subj_map_set.s_hash[i] = NULL;
59329+ }
59330+ }
59331+
59332+ if ((subj_map_set.s_size * sizeof (struct subject_map *)) <=
59333+ PAGE_SIZE)
59334+ kfree(subj_map_set.s_hash);
59335+ else
59336+ vfree(subj_map_set.s_hash);
59337+ }
59338+
59339+ return;
59340+}
59341+
59342+static void
59343+free_variables(void)
59344+{
59345+ struct acl_subject_label *s;
59346+ struct acl_role_label *r;
59347+ struct task_struct *task, *task2;
59348+ unsigned int x;
59349+
59350+ gr_clear_learn_entries();
59351+
59352+ read_lock(&tasklist_lock);
59353+ do_each_thread(task2, task) {
59354+ task->acl_sp_role = 0;
59355+ task->acl_role_id = 0;
59356+ task->acl = NULL;
59357+ task->role = NULL;
59358+ } while_each_thread(task2, task);
59359+ read_unlock(&tasklist_lock);
59360+
59361+ /* release the reference to the real root dentry and vfsmount */
59362+ path_put(&real_root);
59363+ memset(&real_root, 0, sizeof(real_root));
59364+
59365+ /* free all object hash tables */
59366+
59367+ FOR_EACH_ROLE_START(r)
59368+ if (r->subj_hash == NULL)
59369+ goto next_role;
59370+ FOR_EACH_SUBJECT_START(r, s, x)
59371+ if (s->obj_hash == NULL)
59372+ break;
59373+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59374+ kfree(s->obj_hash);
59375+ else
59376+ vfree(s->obj_hash);
59377+ FOR_EACH_SUBJECT_END(s, x)
59378+ FOR_EACH_NESTED_SUBJECT_START(r, s)
59379+ if (s->obj_hash == NULL)
59380+ break;
59381+ if ((s->obj_hash_size * sizeof (struct acl_object_label *)) <= PAGE_SIZE)
59382+ kfree(s->obj_hash);
59383+ else
59384+ vfree(s->obj_hash);
59385+ FOR_EACH_NESTED_SUBJECT_END(s)
59386+ if ((r->subj_hash_size * sizeof (struct acl_subject_label *)) <= PAGE_SIZE)
59387+ kfree(r->subj_hash);
59388+ else
59389+ vfree(r->subj_hash);
59390+ r->subj_hash = NULL;
59391+next_role:
59392+ FOR_EACH_ROLE_END(r)
59393+
59394+ acl_free_all();
59395+
59396+ if (acl_role_set.r_hash) {
59397+ if ((acl_role_set.r_size * sizeof (struct acl_role_label *)) <=
59398+ PAGE_SIZE)
59399+ kfree(acl_role_set.r_hash);
59400+ else
59401+ vfree(acl_role_set.r_hash);
59402+ }
59403+ if (name_set.n_hash) {
59404+ if ((name_set.n_size * sizeof (struct name_entry *)) <=
59405+ PAGE_SIZE)
59406+ kfree(name_set.n_hash);
59407+ else
59408+ vfree(name_set.n_hash);
59409+ }
59410+
59411+ if (inodev_set.i_hash) {
59412+ if ((inodev_set.i_size * sizeof (struct inodev_entry *)) <=
59413+ PAGE_SIZE)
59414+ kfree(inodev_set.i_hash);
59415+ else
59416+ vfree(inodev_set.i_hash);
59417+ }
59418+
59419+ gr_free_uidset();
59420+
59421+ memset(&name_set, 0, sizeof (struct name_db));
59422+ memset(&inodev_set, 0, sizeof (struct inodev_db));
59423+ memset(&acl_role_set, 0, sizeof (struct acl_role_db));
59424+ memset(&subj_map_set, 0, sizeof (struct acl_subj_map_db));
59425+
59426+ default_role = NULL;
59427+ kernel_role = NULL;
59428+ role_list = NULL;
59429+
59430+ return;
59431+}
59432+
59433+static __u32
59434+count_user_objs(struct acl_object_label *userp)
59435+{
59436+ struct acl_object_label o_tmp;
59437+ __u32 num = 0;
59438+
59439+ while (userp) {
59440+ if (copy_from_user(&o_tmp, userp,
59441+ sizeof (struct acl_object_label)))
59442+ break;
59443+
59444+ userp = o_tmp.prev;
59445+ num++;
59446+ }
59447+
59448+ return num;
59449+}
59450+
59451+static struct acl_subject_label *
59452+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied);
59453+
59454+static int
59455+copy_user_glob(struct acl_object_label *obj)
59456+{
59457+ struct acl_object_label *g_tmp, **guser;
59458+ unsigned int len;
59459+ char *tmp;
59460+
59461+ if (obj->globbed == NULL)
59462+ return 0;
59463+
59464+ guser = &obj->globbed;
59465+ while (*guser) {
59466+ g_tmp = (struct acl_object_label *)
59467+ acl_alloc(sizeof (struct acl_object_label));
59468+ if (g_tmp == NULL)
59469+ return -ENOMEM;
59470+
59471+ if (copy_from_user(g_tmp, *guser,
59472+ sizeof (struct acl_object_label)))
59473+ return -EFAULT;
59474+
59475+ len = strnlen_user(g_tmp->filename, PATH_MAX);
59476+
59477+ if (!len || len >= PATH_MAX)
59478+ return -EINVAL;
59479+
59480+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59481+ return -ENOMEM;
59482+
59483+ if (copy_from_user(tmp, g_tmp->filename, len))
59484+ return -EFAULT;
59485+ tmp[len-1] = '\0';
59486+ g_tmp->filename = tmp;
59487+
59488+ *guser = g_tmp;
59489+ guser = &(g_tmp->next);
59490+ }
59491+
59492+ return 0;
59493+}
59494+
59495+static int
59496+copy_user_objs(struct acl_object_label *userp, struct acl_subject_label *subj,
59497+ struct acl_role_label *role)
59498+{
59499+ struct acl_object_label *o_tmp;
59500+ unsigned int len;
59501+ int ret;
59502+ char *tmp;
59503+
59504+ while (userp) {
59505+ if ((o_tmp = (struct acl_object_label *)
59506+ acl_alloc(sizeof (struct acl_object_label))) == NULL)
59507+ return -ENOMEM;
59508+
59509+ if (copy_from_user(o_tmp, userp,
59510+ sizeof (struct acl_object_label)))
59511+ return -EFAULT;
59512+
59513+ userp = o_tmp->prev;
59514+
59515+ len = strnlen_user(o_tmp->filename, PATH_MAX);
59516+
59517+ if (!len || len >= PATH_MAX)
59518+ return -EINVAL;
59519+
59520+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59521+ return -ENOMEM;
59522+
59523+ if (copy_from_user(tmp, o_tmp->filename, len))
59524+ return -EFAULT;
59525+ tmp[len-1] = '\0';
59526+ o_tmp->filename = tmp;
59527+
59528+ insert_acl_obj_label(o_tmp, subj);
59529+ if (!insert_name_entry(o_tmp->filename, o_tmp->inode,
59530+ o_tmp->device, (o_tmp->mode & GR_DELETED) ? 1 : 0))
59531+ return -ENOMEM;
59532+
59533+ ret = copy_user_glob(o_tmp);
59534+ if (ret)
59535+ return ret;
59536+
59537+ if (o_tmp->nested) {
59538+ int already_copied;
59539+
59540+ o_tmp->nested = do_copy_user_subj(o_tmp->nested, role, &already_copied);
59541+ if (IS_ERR(o_tmp->nested))
59542+ return PTR_ERR(o_tmp->nested);
59543+
59544+ /* insert into nested subject list if we haven't copied this one yet
59545+ to prevent duplicate entries */
59546+ if (!already_copied) {
59547+ o_tmp->nested->next = role->hash->first;
59548+ role->hash->first = o_tmp->nested;
59549+ }
59550+ }
59551+ }
59552+
59553+ return 0;
59554+}
59555+
59556+static __u32
59557+count_user_subjs(struct acl_subject_label *userp)
59558+{
59559+ struct acl_subject_label s_tmp;
59560+ __u32 num = 0;
59561+
59562+ while (userp) {
59563+ if (copy_from_user(&s_tmp, userp,
59564+ sizeof (struct acl_subject_label)))
59565+ break;
59566+
59567+ userp = s_tmp.prev;
59568+ }
59569+
59570+ return num;
59571+}
59572+
59573+static int
59574+copy_user_allowedips(struct acl_role_label *rolep)
59575+{
59576+ struct role_allowed_ip *ruserip, *rtmp = NULL, *rlast;
59577+
59578+ ruserip = rolep->allowed_ips;
59579+
59580+ while (ruserip) {
59581+ rlast = rtmp;
59582+
59583+ if ((rtmp = (struct role_allowed_ip *)
59584+ acl_alloc(sizeof (struct role_allowed_ip))) == NULL)
59585+ return -ENOMEM;
59586+
59587+ if (copy_from_user(rtmp, ruserip,
59588+ sizeof (struct role_allowed_ip)))
59589+ return -EFAULT;
59590+
59591+ ruserip = rtmp->prev;
59592+
59593+ if (!rlast) {
59594+ rtmp->prev = NULL;
59595+ rolep->allowed_ips = rtmp;
59596+ } else {
59597+ rlast->next = rtmp;
59598+ rtmp->prev = rlast;
59599+ }
59600+
59601+ if (!ruserip)
59602+ rtmp->next = NULL;
59603+ }
59604+
59605+ return 0;
59606+}
59607+
59608+static int
59609+copy_user_transitions(struct acl_role_label *rolep)
59610+{
59611+ struct role_transition *rusertp, *rtmp = NULL, *rlast;
59612+
59613+ unsigned int len;
59614+ char *tmp;
59615+
59616+ rusertp = rolep->transitions;
59617+
59618+ while (rusertp) {
59619+ rlast = rtmp;
59620+
59621+ if ((rtmp = (struct role_transition *)
59622+ acl_alloc(sizeof (struct role_transition))) == NULL)
59623+ return -ENOMEM;
59624+
59625+ if (copy_from_user(rtmp, rusertp,
59626+ sizeof (struct role_transition)))
59627+ return -EFAULT;
59628+
59629+ rusertp = rtmp->prev;
59630+
59631+ len = strnlen_user(rtmp->rolename, GR_SPROLE_LEN);
59632+
59633+ if (!len || len >= GR_SPROLE_LEN)
59634+ return -EINVAL;
59635+
59636+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59637+ return -ENOMEM;
59638+
59639+ if (copy_from_user(tmp, rtmp->rolename, len))
59640+ return -EFAULT;
59641+ tmp[len-1] = '\0';
59642+ rtmp->rolename = tmp;
59643+
59644+ if (!rlast) {
59645+ rtmp->prev = NULL;
59646+ rolep->transitions = rtmp;
59647+ } else {
59648+ rlast->next = rtmp;
59649+ rtmp->prev = rlast;
59650+ }
59651+
59652+ if (!rusertp)
59653+ rtmp->next = NULL;
59654+ }
59655+
59656+ return 0;
59657+}
59658+
59659+static struct acl_subject_label *
59660+do_copy_user_subj(struct acl_subject_label *userp, struct acl_role_label *role, int *already_copied)
59661+{
59662+ struct acl_subject_label *s_tmp = NULL, *s_tmp2;
59663+ unsigned int len;
59664+ char *tmp;
59665+ __u32 num_objs;
59666+ struct acl_ip_label **i_tmp, *i_utmp2;
59667+ struct gr_hash_struct ghash;
59668+ struct subject_map *subjmap;
59669+ unsigned int i_num;
59670+ int err;
59671+
59672+ if (already_copied != NULL)
59673+ *already_copied = 0;
59674+
59675+ s_tmp = lookup_subject_map(userp);
59676+
59677+ /* we've already copied this subject into the kernel, just return
59678+ the reference to it, and don't copy it over again
59679+ */
59680+ if (s_tmp) {
59681+ if (already_copied != NULL)
59682+ *already_copied = 1;
59683+ return(s_tmp);
59684+ }
59685+
59686+ if ((s_tmp = (struct acl_subject_label *)
59687+ acl_alloc(sizeof (struct acl_subject_label))) == NULL)
59688+ return ERR_PTR(-ENOMEM);
59689+
59690+ subjmap = (struct subject_map *)kmalloc(sizeof (struct subject_map), GFP_KERNEL);
59691+ if (subjmap == NULL)
59692+ return ERR_PTR(-ENOMEM);
59693+
59694+ subjmap->user = userp;
59695+ subjmap->kernel = s_tmp;
59696+ insert_subj_map_entry(subjmap);
59697+
59698+ if (copy_from_user(s_tmp, userp,
59699+ sizeof (struct acl_subject_label)))
59700+ return ERR_PTR(-EFAULT);
59701+
59702+ len = strnlen_user(s_tmp->filename, PATH_MAX);
59703+
59704+ if (!len || len >= PATH_MAX)
59705+ return ERR_PTR(-EINVAL);
59706+
59707+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59708+ return ERR_PTR(-ENOMEM);
59709+
59710+ if (copy_from_user(tmp, s_tmp->filename, len))
59711+ return ERR_PTR(-EFAULT);
59712+ tmp[len-1] = '\0';
59713+ s_tmp->filename = tmp;
59714+
59715+ if (!strcmp(s_tmp->filename, "/"))
59716+ role->root_label = s_tmp;
59717+
59718+ if (copy_from_user(&ghash, s_tmp->hash, sizeof(struct gr_hash_struct)))
59719+ return ERR_PTR(-EFAULT);
59720+
59721+ /* copy user and group transition tables */
59722+
59723+ if (s_tmp->user_trans_num) {
59724+ uid_t *uidlist;
59725+
59726+ uidlist = (uid_t *)acl_alloc_num(s_tmp->user_trans_num, sizeof(uid_t));
59727+ if (uidlist == NULL)
59728+ return ERR_PTR(-ENOMEM);
59729+ if (copy_from_user(uidlist, s_tmp->user_transitions, s_tmp->user_trans_num * sizeof(uid_t)))
59730+ return ERR_PTR(-EFAULT);
59731+
59732+ s_tmp->user_transitions = uidlist;
59733+ }
59734+
59735+ if (s_tmp->group_trans_num) {
59736+ gid_t *gidlist;
59737+
59738+ gidlist = (gid_t *)acl_alloc_num(s_tmp->group_trans_num, sizeof(gid_t));
59739+ if (gidlist == NULL)
59740+ return ERR_PTR(-ENOMEM);
59741+ if (copy_from_user(gidlist, s_tmp->group_transitions, s_tmp->group_trans_num * sizeof(gid_t)))
59742+ return ERR_PTR(-EFAULT);
59743+
59744+ s_tmp->group_transitions = gidlist;
59745+ }
59746+
59747+ /* set up object hash table */
59748+ num_objs = count_user_objs(ghash.first);
59749+
59750+ s_tmp->obj_hash_size = num_objs;
59751+ s_tmp->obj_hash =
59752+ (struct acl_object_label **)
59753+ create_table(&(s_tmp->obj_hash_size), sizeof(void *));
59754+
59755+ if (!s_tmp->obj_hash)
59756+ return ERR_PTR(-ENOMEM);
59757+
59758+ memset(s_tmp->obj_hash, 0,
59759+ s_tmp->obj_hash_size *
59760+ sizeof (struct acl_object_label *));
59761+
59762+ /* add in objects */
59763+ err = copy_user_objs(ghash.first, s_tmp, role);
59764+
59765+ if (err)
59766+ return ERR_PTR(err);
59767+
59768+ /* set pointer for parent subject */
59769+ if (s_tmp->parent_subject) {
59770+ s_tmp2 = do_copy_user_subj(s_tmp->parent_subject, role, NULL);
59771+
59772+ if (IS_ERR(s_tmp2))
59773+ return s_tmp2;
59774+
59775+ s_tmp->parent_subject = s_tmp2;
59776+ }
59777+
59778+ /* add in ip acls */
59779+
59780+ if (!s_tmp->ip_num) {
59781+ s_tmp->ips = NULL;
59782+ goto insert;
59783+ }
59784+
59785+ i_tmp =
59786+ (struct acl_ip_label **) acl_alloc_num(s_tmp->ip_num,
59787+ sizeof (struct acl_ip_label *));
59788+
59789+ if (!i_tmp)
59790+ return ERR_PTR(-ENOMEM);
59791+
59792+ for (i_num = 0; i_num < s_tmp->ip_num; i_num++) {
59793+ *(i_tmp + i_num) =
59794+ (struct acl_ip_label *)
59795+ acl_alloc(sizeof (struct acl_ip_label));
59796+ if (!*(i_tmp + i_num))
59797+ return ERR_PTR(-ENOMEM);
59798+
59799+ if (copy_from_user
59800+ (&i_utmp2, s_tmp->ips + i_num,
59801+ sizeof (struct acl_ip_label *)))
59802+ return ERR_PTR(-EFAULT);
59803+
59804+ if (copy_from_user
59805+ (*(i_tmp + i_num), i_utmp2,
59806+ sizeof (struct acl_ip_label)))
59807+ return ERR_PTR(-EFAULT);
59808+
59809+ if ((*(i_tmp + i_num))->iface == NULL)
59810+ continue;
59811+
59812+ len = strnlen_user((*(i_tmp + i_num))->iface, IFNAMSIZ);
59813+ if (!len || len >= IFNAMSIZ)
59814+ return ERR_PTR(-EINVAL);
59815+ tmp = acl_alloc(len);
59816+ if (tmp == NULL)
59817+ return ERR_PTR(-ENOMEM);
59818+ if (copy_from_user(tmp, (*(i_tmp + i_num))->iface, len))
59819+ return ERR_PTR(-EFAULT);
59820+ (*(i_tmp + i_num))->iface = tmp;
59821+ }
59822+
59823+ s_tmp->ips = i_tmp;
59824+
59825+insert:
59826+ if (!insert_name_entry(s_tmp->filename, s_tmp->inode,
59827+ s_tmp->device, (s_tmp->mode & GR_DELETED) ? 1 : 0))
59828+ return ERR_PTR(-ENOMEM);
59829+
59830+ return s_tmp;
59831+}
59832+
59833+static int
59834+copy_user_subjs(struct acl_subject_label *userp, struct acl_role_label *role)
59835+{
59836+ struct acl_subject_label s_pre;
59837+ struct acl_subject_label * ret;
59838+ int err;
59839+
59840+ while (userp) {
59841+ if (copy_from_user(&s_pre, userp,
59842+ sizeof (struct acl_subject_label)))
59843+ return -EFAULT;
59844+
59845+ ret = do_copy_user_subj(userp, role, NULL);
59846+
59847+ err = PTR_ERR(ret);
59848+ if (IS_ERR(ret))
59849+ return err;
59850+
59851+ insert_acl_subj_label(ret, role);
59852+
59853+ userp = s_pre.prev;
59854+ }
59855+
59856+ return 0;
59857+}
59858+
59859+static int
59860+copy_user_acl(struct gr_arg *arg)
59861+{
59862+ struct acl_role_label *r_tmp = NULL, **r_utmp, *r_utmp2;
59863+ struct acl_subject_label *subj_list;
59864+ struct sprole_pw *sptmp;
59865+ struct gr_hash_struct *ghash;
59866+ uid_t *domainlist;
59867+ unsigned int r_num;
59868+ unsigned int len;
59869+ char *tmp;
59870+ int err = 0;
59871+ __u16 i;
59872+ __u32 num_subjs;
59873+
59874+ /* we need a default and kernel role */
59875+ if (arg->role_db.num_roles < 2)
59876+ return -EINVAL;
59877+
59878+ /* copy special role authentication info from userspace */
59879+
59880+ num_sprole_pws = arg->num_sprole_pws;
59881+ acl_special_roles = (struct sprole_pw **) acl_alloc_num(num_sprole_pws, sizeof(struct sprole_pw *));
59882+
59883+ if (!acl_special_roles && num_sprole_pws)
59884+ return -ENOMEM;
59885+
59886+ for (i = 0; i < num_sprole_pws; i++) {
59887+ sptmp = (struct sprole_pw *) acl_alloc(sizeof(struct sprole_pw));
59888+ if (!sptmp)
59889+ return -ENOMEM;
59890+ if (copy_from_user(sptmp, arg->sprole_pws + i,
59891+ sizeof (struct sprole_pw)))
59892+ return -EFAULT;
59893+
59894+ len = strnlen_user(sptmp->rolename, GR_SPROLE_LEN);
59895+
59896+ if (!len || len >= GR_SPROLE_LEN)
59897+ return -EINVAL;
59898+
59899+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59900+ return -ENOMEM;
59901+
59902+ if (copy_from_user(tmp, sptmp->rolename, len))
59903+ return -EFAULT;
59904+
59905+ tmp[len-1] = '\0';
59906+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
59907+ printk(KERN_ALERT "Copying special role %s\n", tmp);
59908+#endif
59909+ sptmp->rolename = tmp;
59910+ acl_special_roles[i] = sptmp;
59911+ }
59912+
59913+ r_utmp = (struct acl_role_label **) arg->role_db.r_table;
59914+
59915+ for (r_num = 0; r_num < arg->role_db.num_roles; r_num++) {
59916+ r_tmp = acl_alloc(sizeof (struct acl_role_label));
59917+
59918+ if (!r_tmp)
59919+ return -ENOMEM;
59920+
59921+ if (copy_from_user(&r_utmp2, r_utmp + r_num,
59922+ sizeof (struct acl_role_label *)))
59923+ return -EFAULT;
59924+
59925+ if (copy_from_user(r_tmp, r_utmp2,
59926+ sizeof (struct acl_role_label)))
59927+ return -EFAULT;
59928+
59929+ len = strnlen_user(r_tmp->rolename, GR_SPROLE_LEN);
59930+
59931+ if (!len || len >= PATH_MAX)
59932+ return -EINVAL;
59933+
59934+ if ((tmp = (char *) acl_alloc(len)) == NULL)
59935+ return -ENOMEM;
59936+
59937+ if (copy_from_user(tmp, r_tmp->rolename, len))
59938+ return -EFAULT;
59939+
59940+ tmp[len-1] = '\0';
59941+ r_tmp->rolename = tmp;
59942+
59943+ if (!strcmp(r_tmp->rolename, "default")
59944+ && (r_tmp->roletype & GR_ROLE_DEFAULT)) {
59945+ default_role = r_tmp;
59946+ } else if (!strcmp(r_tmp->rolename, ":::kernel:::")) {
59947+ kernel_role = r_tmp;
59948+ }
59949+
59950+ if ((ghash = (struct gr_hash_struct *) acl_alloc(sizeof(struct gr_hash_struct))) == NULL)
59951+ return -ENOMEM;
59952+
59953+ if (copy_from_user(ghash, r_tmp->hash, sizeof(struct gr_hash_struct)))
59954+ return -EFAULT;
59955+
59956+ r_tmp->hash = ghash;
59957+
59958+ num_subjs = count_user_subjs(r_tmp->hash->first);
59959+
59960+ r_tmp->subj_hash_size = num_subjs;
59961+ r_tmp->subj_hash =
59962+ (struct acl_subject_label **)
59963+ create_table(&(r_tmp->subj_hash_size), sizeof(void *));
59964+
59965+ if (!r_tmp->subj_hash)
59966+ return -ENOMEM;
59967+
59968+ err = copy_user_allowedips(r_tmp);
59969+ if (err)
59970+ return err;
59971+
59972+ /* copy domain info */
59973+ if (r_tmp->domain_children != NULL) {
59974+ domainlist = acl_alloc_num(r_tmp->domain_child_num, sizeof(uid_t));
59975+ if (domainlist == NULL)
59976+ return -ENOMEM;
59977+
59978+ if (copy_from_user(domainlist, r_tmp->domain_children, r_tmp->domain_child_num * sizeof(uid_t)))
59979+ return -EFAULT;
59980+
59981+ r_tmp->domain_children = domainlist;
59982+ }
59983+
59984+ err = copy_user_transitions(r_tmp);
59985+ if (err)
59986+ return err;
59987+
59988+ memset(r_tmp->subj_hash, 0,
59989+ r_tmp->subj_hash_size *
59990+ sizeof (struct acl_subject_label *));
59991+
59992+ /* acquire the list of subjects, then NULL out
59993+ the list prior to parsing the subjects for this role,
59994+ as during this parsing the list is replaced with a list
59995+ of *nested* subjects for the role
59996+ */
59997+ subj_list = r_tmp->hash->first;
59998+
59999+ /* set nested subject list to null */
60000+ r_tmp->hash->first = NULL;
60001+
60002+ err = copy_user_subjs(subj_list, r_tmp);
60003+
60004+ if (err)
60005+ return err;
60006+
60007+ insert_acl_role_label(r_tmp);
60008+ }
60009+
60010+ if (default_role == NULL || kernel_role == NULL)
60011+ return -EINVAL;
60012+
60013+ return err;
60014+}
60015+
60016+static int
60017+gracl_init(struct gr_arg *args)
60018+{
60019+ int error = 0;
60020+
60021+ memcpy(gr_system_salt, args->salt, GR_SALT_LEN);
60022+ memcpy(gr_system_sum, args->sum, GR_SHA_LEN);
60023+
60024+ if (init_variables(args)) {
60025+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_INITF_ACL_MSG, GR_VERSION);
60026+ error = -ENOMEM;
60027+ free_variables();
60028+ goto out;
60029+ }
60030+
60031+ error = copy_user_acl(args);
60032+ free_init_variables();
60033+ if (error) {
60034+ free_variables();
60035+ goto out;
60036+ }
60037+
60038+ if ((error = gr_set_acls(0))) {
60039+ free_variables();
60040+ goto out;
60041+ }
60042+
60043+ pax_open_kernel();
60044+ gr_status |= GR_READY;
60045+ pax_close_kernel();
60046+
60047+ out:
60048+ return error;
60049+}
60050+
60051+/* derived from glibc fnmatch() 0: match, 1: no match*/
60052+
60053+static int
60054+glob_match(const char *p, const char *n)
60055+{
60056+ char c;
60057+
60058+ while ((c = *p++) != '\0') {
60059+ switch (c) {
60060+ case '?':
60061+ if (*n == '\0')
60062+ return 1;
60063+ else if (*n == '/')
60064+ return 1;
60065+ break;
60066+ case '\\':
60067+ if (*n != c)
60068+ return 1;
60069+ break;
60070+ case '*':
60071+ for (c = *p++; c == '?' || c == '*'; c = *p++) {
60072+ if (*n == '/')
60073+ return 1;
60074+ else if (c == '?') {
60075+ if (*n == '\0')
60076+ return 1;
60077+ else
60078+ ++n;
60079+ }
60080+ }
60081+ if (c == '\0') {
60082+ return 0;
60083+ } else {
60084+ const char *endp;
60085+
60086+ if ((endp = strchr(n, '/')) == NULL)
60087+ endp = n + strlen(n);
60088+
60089+ if (c == '[') {
60090+ for (--p; n < endp; ++n)
60091+ if (!glob_match(p, n))
60092+ return 0;
60093+ } else if (c == '/') {
60094+ while (*n != '\0' && *n != '/')
60095+ ++n;
60096+ if (*n == '/' && !glob_match(p, n + 1))
60097+ return 0;
60098+ } else {
60099+ for (--p; n < endp; ++n)
60100+ if (*n == c && !glob_match(p, n))
60101+ return 0;
60102+ }
60103+
60104+ return 1;
60105+ }
60106+ case '[':
60107+ {
60108+ int not;
60109+ char cold;
60110+
60111+ if (*n == '\0' || *n == '/')
60112+ return 1;
60113+
60114+ not = (*p == '!' || *p == '^');
60115+ if (not)
60116+ ++p;
60117+
60118+ c = *p++;
60119+ for (;;) {
60120+ unsigned char fn = (unsigned char)*n;
60121+
60122+ if (c == '\0')
60123+ return 1;
60124+ else {
60125+ if (c == fn)
60126+ goto matched;
60127+ cold = c;
60128+ c = *p++;
60129+
60130+ if (c == '-' && *p != ']') {
60131+ unsigned char cend = *p++;
60132+
60133+ if (cend == '\0')
60134+ return 1;
60135+
60136+ if (cold <= fn && fn <= cend)
60137+ goto matched;
60138+
60139+ c = *p++;
60140+ }
60141+ }
60142+
60143+ if (c == ']')
60144+ break;
60145+ }
60146+ if (!not)
60147+ return 1;
60148+ break;
60149+ matched:
60150+ while (c != ']') {
60151+ if (c == '\0')
60152+ return 1;
60153+
60154+ c = *p++;
60155+ }
60156+ if (not)
60157+ return 1;
60158+ }
60159+ break;
60160+ default:
60161+ if (c != *n)
60162+ return 1;
60163+ }
60164+
60165+ ++n;
60166+ }
60167+
60168+ if (*n == '\0')
60169+ return 0;
60170+
60171+ if (*n == '/')
60172+ return 0;
60173+
60174+ return 1;
60175+}
60176+
60177+static struct acl_object_label *
60178+chk_glob_label(struct acl_object_label *globbed,
60179+ const struct dentry *dentry, const struct vfsmount *mnt, char **path)
60180+{
60181+ struct acl_object_label *tmp;
60182+
60183+ if (*path == NULL)
60184+ *path = gr_to_filename_nolock(dentry, mnt);
60185+
60186+ tmp = globbed;
60187+
60188+ while (tmp) {
60189+ if (!glob_match(tmp->filename, *path))
60190+ return tmp;
60191+ tmp = tmp->next;
60192+ }
60193+
60194+ return NULL;
60195+}
60196+
60197+static struct acl_object_label *
60198+__full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
60199+ const ino_t curr_ino, const dev_t curr_dev,
60200+ const struct acl_subject_label *subj, char **path, const int checkglob)
60201+{
60202+ struct acl_subject_label *tmpsubj;
60203+ struct acl_object_label *retval;
60204+ struct acl_object_label *retval2;
60205+
60206+ tmpsubj = (struct acl_subject_label *) subj;
60207+ read_lock(&gr_inode_lock);
60208+ do {
60209+ retval = lookup_acl_obj_label(curr_ino, curr_dev, tmpsubj);
60210+ if (retval) {
60211+ if (checkglob && retval->globbed) {
60212+ retval2 = chk_glob_label(retval->globbed, orig_dentry, orig_mnt, path);
60213+ if (retval2)
60214+ retval = retval2;
60215+ }
60216+ break;
60217+ }
60218+ } while ((tmpsubj = tmpsubj->parent_subject));
60219+ read_unlock(&gr_inode_lock);
60220+
60221+ return retval;
60222+}
60223+
60224+static __inline__ struct acl_object_label *
60225+full_lookup(const struct dentry *orig_dentry, const struct vfsmount *orig_mnt,
60226+ struct dentry *curr_dentry,
60227+ const struct acl_subject_label *subj, char **path, const int checkglob)
60228+{
60229+ int newglob = checkglob;
60230+ ino_t inode;
60231+ dev_t device;
60232+
60233+ /* if we aren't checking a subdirectory of the original path yet, don't do glob checking
60234+ as we don't want a / * rule to match instead of the / object
60235+ don't do this for create lookups that call this function though, since they're looking up
60236+ on the parent and thus need globbing checks on all paths
60237+ */
60238+ if (orig_dentry == curr_dentry && newglob != GR_CREATE_GLOB)
60239+ newglob = GR_NO_GLOB;
60240+
60241+ spin_lock(&curr_dentry->d_lock);
60242+ inode = curr_dentry->d_inode->i_ino;
60243+ device = __get_dev(curr_dentry);
60244+ spin_unlock(&curr_dentry->d_lock);
60245+
60246+ return __full_lookup(orig_dentry, orig_mnt, inode, device, subj, path, newglob);
60247+}
60248+
60249+#ifdef CONFIG_HUGETLBFS
60250+static inline bool
60251+is_hugetlbfs_mnt(const struct vfsmount *mnt)
60252+{
60253+ int i;
60254+ for (i = 0; i < HUGE_MAX_HSTATE; i++) {
60255+ if (unlikely(hugetlbfs_vfsmount[i] == mnt))
60256+ return true;
60257+ }
60258+
60259+ return false;
60260+}
60261+#endif
60262+
60263+static struct acl_object_label *
60264+__chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60265+ const struct acl_subject_label *subj, char *path, const int checkglob)
60266+{
60267+ struct dentry *dentry = (struct dentry *) l_dentry;
60268+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60269+ struct mount *real_mnt = real_mount(mnt);
60270+ struct acl_object_label *retval;
60271+ struct dentry *parent;
60272+
60273+ br_read_lock(&vfsmount_lock);
60274+ write_seqlock(&rename_lock);
60275+
60276+ if (unlikely((mnt == shm_mnt && dentry->d_inode->i_nlink == 0) || mnt == pipe_mnt ||
60277+#ifdef CONFIG_NET
60278+ mnt == sock_mnt ||
60279+#endif
60280+#ifdef CONFIG_HUGETLBFS
60281+ (is_hugetlbfs_mnt(mnt) && dentry->d_inode->i_nlink == 0) ||
60282+#endif
60283+ /* ignore Eric Biederman */
60284+ IS_PRIVATE(l_dentry->d_inode))) {
60285+ retval = (subj->mode & GR_SHMEXEC) ? fakefs_obj_rwx : fakefs_obj_rw;
60286+ goto out;
60287+ }
60288+
60289+ for (;;) {
60290+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60291+ break;
60292+
60293+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60294+ if (!mnt_has_parent(real_mnt))
60295+ break;
60296+
60297+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60298+ if (retval != NULL)
60299+ goto out;
60300+
60301+ dentry = real_mnt->mnt_mountpoint;
60302+ real_mnt = real_mnt->mnt_parent;
60303+ mnt = &real_mnt->mnt;
60304+ continue;
60305+ }
60306+
60307+ parent = dentry->d_parent;
60308+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60309+ if (retval != NULL)
60310+ goto out;
60311+
60312+ dentry = parent;
60313+ }
60314+
60315+ retval = full_lookup(l_dentry, l_mnt, dentry, subj, &path, checkglob);
60316+
60317+ /* real_root is pinned so we don't have to hold a reference */
60318+ if (retval == NULL)
60319+ retval = full_lookup(l_dentry, l_mnt, real_root.dentry, subj, &path, checkglob);
60320+out:
60321+ write_sequnlock(&rename_lock);
60322+ br_read_unlock(&vfsmount_lock);
60323+
60324+ BUG_ON(retval == NULL);
60325+
60326+ return retval;
60327+}
60328+
60329+static __inline__ struct acl_object_label *
60330+chk_obj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60331+ const struct acl_subject_label *subj)
60332+{
60333+ char *path = NULL;
60334+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_REG_GLOB);
60335+}
60336+
60337+static __inline__ struct acl_object_label *
60338+chk_obj_label_noglob(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60339+ const struct acl_subject_label *subj)
60340+{
60341+ char *path = NULL;
60342+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_NO_GLOB);
60343+}
60344+
60345+static __inline__ struct acl_object_label *
60346+chk_obj_create_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60347+ const struct acl_subject_label *subj, char *path)
60348+{
60349+ return __chk_obj_label(l_dentry, l_mnt, subj, path, GR_CREATE_GLOB);
60350+}
60351+
60352+static struct acl_subject_label *
60353+chk_subj_label(const struct dentry *l_dentry, const struct vfsmount *l_mnt,
60354+ const struct acl_role_label *role)
60355+{
60356+ struct dentry *dentry = (struct dentry *) l_dentry;
60357+ struct vfsmount *mnt = (struct vfsmount *) l_mnt;
60358+ struct mount *real_mnt = real_mount(mnt);
60359+ struct acl_subject_label *retval;
60360+ struct dentry *parent;
60361+
60362+ br_read_lock(&vfsmount_lock);
60363+ write_seqlock(&rename_lock);
60364+
60365+ for (;;) {
60366+ if (dentry == real_root.dentry && mnt == real_root.mnt)
60367+ break;
60368+ if (dentry == mnt->mnt_root || IS_ROOT(dentry)) {
60369+ if (!mnt_has_parent(real_mnt))
60370+ break;
60371+
60372+ spin_lock(&dentry->d_lock);
60373+ read_lock(&gr_inode_lock);
60374+ retval =
60375+ lookup_acl_subj_label(dentry->d_inode->i_ino,
60376+ __get_dev(dentry), role);
60377+ read_unlock(&gr_inode_lock);
60378+ spin_unlock(&dentry->d_lock);
60379+ if (retval != NULL)
60380+ goto out;
60381+
60382+ dentry = real_mnt->mnt_mountpoint;
60383+ real_mnt = real_mnt->mnt_parent;
60384+ mnt = &real_mnt->mnt;
60385+ continue;
60386+ }
60387+
60388+ spin_lock(&dentry->d_lock);
60389+ read_lock(&gr_inode_lock);
60390+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60391+ __get_dev(dentry), role);
60392+ read_unlock(&gr_inode_lock);
60393+ parent = dentry->d_parent;
60394+ spin_unlock(&dentry->d_lock);
60395+
60396+ if (retval != NULL)
60397+ goto out;
60398+
60399+ dentry = parent;
60400+ }
60401+
60402+ spin_lock(&dentry->d_lock);
60403+ read_lock(&gr_inode_lock);
60404+ retval = lookup_acl_subj_label(dentry->d_inode->i_ino,
60405+ __get_dev(dentry), role);
60406+ read_unlock(&gr_inode_lock);
60407+ spin_unlock(&dentry->d_lock);
60408+
60409+ if (unlikely(retval == NULL)) {
60410+ /* real_root is pinned, we don't need to hold a reference */
60411+ read_lock(&gr_inode_lock);
60412+ retval = lookup_acl_subj_label(real_root.dentry->d_inode->i_ino,
60413+ __get_dev(real_root.dentry), role);
60414+ read_unlock(&gr_inode_lock);
60415+ }
60416+out:
60417+ write_sequnlock(&rename_lock);
60418+ br_read_unlock(&vfsmount_lock);
60419+
60420+ BUG_ON(retval == NULL);
60421+
60422+ return retval;
60423+}
60424+
60425+static void
60426+gr_log_learn(const struct dentry *dentry, const struct vfsmount *mnt, const __u32 mode)
60427+{
60428+ struct task_struct *task = current;
60429+ const struct cred *cred = current_cred();
60430+
60431+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename, task->role->roletype,
60432+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60433+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60434+ 1UL, 1UL, gr_to_filename(dentry, mnt), (unsigned long) mode, &task->signal->saved_ip);
60435+
60436+ return;
60437+}
60438+
60439+static void
60440+gr_log_learn_uid_change(const kuid_t real, const kuid_t effective, const kuid_t fs)
60441+{
60442+ struct task_struct *task = current;
60443+ const struct cred *cred = current_cred();
60444+
60445+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60446+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60447+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60448+ 'u', GR_GLOBAL_UID(real), GR_GLOBAL_UID(effective), GR_GLOBAL_UID(fs), &task->signal->saved_ip);
60449+
60450+ return;
60451+}
60452+
60453+static void
60454+gr_log_learn_gid_change(const kgid_t real, const kgid_t effective, const kgid_t fs)
60455+{
60456+ struct task_struct *task = current;
60457+ const struct cred *cred = current_cred();
60458+
60459+ security_learn(GR_ID_LEARN_MSG, task->role->rolename, task->role->roletype,
60460+ GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), task->exec_file ? gr_to_filename1(task->exec_file->f_path.dentry,
60461+ task->exec_file->f_path.mnt) : task->acl->filename, task->acl->filename,
60462+ 'g', GR_GLOBAL_GID(real), GR_GLOBAL_GID(effective), GR_GLOBAL_GID(fs), &task->signal->saved_ip);
60463+
60464+ return;
60465+}
60466+
60467+__u32
60468+gr_search_file(const struct dentry * dentry, const __u32 mode,
60469+ const struct vfsmount * mnt)
60470+{
60471+ __u32 retval = mode;
60472+ struct acl_subject_label *curracl;
60473+ struct acl_object_label *currobj;
60474+
60475+ if (unlikely(!(gr_status & GR_READY)))
60476+ return (mode & ~GR_AUDITS);
60477+
60478+ curracl = current->acl;
60479+
60480+ currobj = chk_obj_label(dentry, mnt, curracl);
60481+ retval = currobj->mode & mode;
60482+
60483+ /* if we're opening a specified transfer file for writing
60484+ (e.g. /dev/initctl), then transfer our role to init
60485+ */
60486+ if (unlikely(currobj->mode & GR_INIT_TRANSFER && retval & GR_WRITE &&
60487+ current->role->roletype & GR_ROLE_PERSIST)) {
60488+ struct task_struct *task = init_pid_ns.child_reaper;
60489+
60490+ if (task->role != current->role) {
60491+ task->acl_sp_role = 0;
60492+ task->acl_role_id = current->acl_role_id;
60493+ task->role = current->role;
60494+ rcu_read_lock();
60495+ read_lock(&grsec_exec_file_lock);
60496+ gr_apply_subject_to_task(task);
60497+ read_unlock(&grsec_exec_file_lock);
60498+ rcu_read_unlock();
60499+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_INIT_TRANSFER_MSG);
60500+ }
60501+ }
60502+
60503+ if (unlikely
60504+ ((curracl->mode & (GR_LEARN | GR_INHERITLEARN)) && !(mode & GR_NOPTRACE)
60505+ && (retval != (mode & ~(GR_AUDITS | GR_SUPPRESS))))) {
60506+ __u32 new_mode = mode;
60507+
60508+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60509+
60510+ retval = new_mode;
60511+
60512+ if (new_mode & GR_EXEC && curracl->mode & GR_INHERITLEARN)
60513+ new_mode |= GR_INHERIT;
60514+
60515+ if (!(mode & GR_NOLEARN))
60516+ gr_log_learn(dentry, mnt, new_mode);
60517+ }
60518+
60519+ return retval;
60520+}
60521+
60522+struct acl_object_label *gr_get_create_object(const struct dentry *new_dentry,
60523+ const struct dentry *parent,
60524+ const struct vfsmount *mnt)
60525+{
60526+ struct name_entry *match;
60527+ struct acl_object_label *matchpo;
60528+ struct acl_subject_label *curracl;
60529+ char *path;
60530+
60531+ if (unlikely(!(gr_status & GR_READY)))
60532+ return NULL;
60533+
60534+ preempt_disable();
60535+ path = gr_to_filename_rbac(new_dentry, mnt);
60536+ match = lookup_name_entry_create(path);
60537+
60538+ curracl = current->acl;
60539+
60540+ if (match) {
60541+ read_lock(&gr_inode_lock);
60542+ matchpo = lookup_acl_obj_label_create(match->inode, match->device, curracl);
60543+ read_unlock(&gr_inode_lock);
60544+
60545+ if (matchpo) {
60546+ preempt_enable();
60547+ return matchpo;
60548+ }
60549+ }
60550+
60551+ // lookup parent
60552+
60553+ matchpo = chk_obj_create_label(parent, mnt, curracl, path);
60554+
60555+ preempt_enable();
60556+ return matchpo;
60557+}
60558+
60559+__u32
60560+gr_check_create(const struct dentry * new_dentry, const struct dentry * parent,
60561+ const struct vfsmount * mnt, const __u32 mode)
60562+{
60563+ struct acl_object_label *matchpo;
60564+ __u32 retval;
60565+
60566+ if (unlikely(!(gr_status & GR_READY)))
60567+ return (mode & ~GR_AUDITS);
60568+
60569+ matchpo = gr_get_create_object(new_dentry, parent, mnt);
60570+
60571+ retval = matchpo->mode & mode;
60572+
60573+ if ((retval != (mode & ~(GR_AUDITS | GR_SUPPRESS)))
60574+ && (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
60575+ __u32 new_mode = mode;
60576+
60577+ new_mode &= ~(GR_AUDITS | GR_SUPPRESS);
60578+
60579+ gr_log_learn(new_dentry, mnt, new_mode);
60580+ return new_mode;
60581+ }
60582+
60583+ return retval;
60584+}
60585+
60586+__u32
60587+gr_check_link(const struct dentry * new_dentry,
60588+ const struct dentry * parent_dentry,
60589+ const struct vfsmount * parent_mnt,
60590+ const struct dentry * old_dentry, const struct vfsmount * old_mnt)
60591+{
60592+ struct acl_object_label *obj;
60593+ __u32 oldmode, newmode;
60594+ __u32 needmode;
60595+ __u32 checkmodes = GR_FIND | GR_APPEND | GR_WRITE | GR_EXEC | GR_SETID | GR_READ |
60596+ GR_DELETE | GR_INHERIT;
60597+
60598+ if (unlikely(!(gr_status & GR_READY)))
60599+ return (GR_CREATE | GR_LINK);
60600+
60601+ obj = chk_obj_label(old_dentry, old_mnt, current->acl);
60602+ oldmode = obj->mode;
60603+
60604+ obj = gr_get_create_object(new_dentry, parent_dentry, parent_mnt);
60605+ newmode = obj->mode;
60606+
60607+ needmode = newmode & checkmodes;
60608+
60609+ // old name for hardlink must have at least the permissions of the new name
60610+ if ((oldmode & needmode) != needmode)
60611+ goto bad;
60612+
60613+ // if old name had restrictions/auditing, make sure the new name does as well
60614+ needmode = oldmode & (GR_NOPTRACE | GR_PTRACERD | GR_INHERIT | GR_AUDITS);
60615+
60616+ // don't allow hardlinking of suid/sgid/fcapped files without permission
60617+ if (is_privileged_binary(old_dentry))
60618+ needmode |= GR_SETID;
60619+
60620+ if ((newmode & needmode) != needmode)
60621+ goto bad;
60622+
60623+ // enforce minimum permissions
60624+ if ((newmode & (GR_CREATE | GR_LINK)) == (GR_CREATE | GR_LINK))
60625+ return newmode;
60626+bad:
60627+ needmode = oldmode;
60628+ if (is_privileged_binary(old_dentry))
60629+ needmode |= GR_SETID;
60630+
60631+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN)) {
60632+ gr_log_learn(old_dentry, old_mnt, needmode | GR_CREATE | GR_LINK);
60633+ return (GR_CREATE | GR_LINK);
60634+ } else if (newmode & GR_SUPPRESS)
60635+ return GR_SUPPRESS;
60636+ else
60637+ return 0;
60638+}
60639+
60640+int
60641+gr_check_hidden_task(const struct task_struct *task)
60642+{
60643+ if (unlikely(!(gr_status & GR_READY)))
60644+ return 0;
60645+
60646+ if (!(task->acl->mode & GR_PROCFIND) && !(current->acl->mode & GR_VIEW))
60647+ return 1;
60648+
60649+ return 0;
60650+}
60651+
60652+int
60653+gr_check_protected_task(const struct task_struct *task)
60654+{
60655+ if (unlikely(!(gr_status & GR_READY) || !task))
60656+ return 0;
60657+
60658+ if ((task->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60659+ task->acl != current->acl)
60660+ return 1;
60661+
60662+ return 0;
60663+}
60664+
60665+int
60666+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
60667+{
60668+ struct task_struct *p;
60669+ int ret = 0;
60670+
60671+ if (unlikely(!(gr_status & GR_READY) || !pid))
60672+ return ret;
60673+
60674+ read_lock(&tasklist_lock);
60675+ do_each_pid_task(pid, type, p) {
60676+ if ((p->acl->mode & GR_PROTECTED) && !(current->acl->mode & GR_KILL) &&
60677+ p->acl != current->acl) {
60678+ ret = 1;
60679+ goto out;
60680+ }
60681+ } while_each_pid_task(pid, type, p);
60682+out:
60683+ read_unlock(&tasklist_lock);
60684+
60685+ return ret;
60686+}
60687+
60688+void
60689+gr_copy_label(struct task_struct *tsk)
60690+{
60691+ tsk->signal->used_accept = 0;
60692+ tsk->acl_sp_role = 0;
60693+ tsk->acl_role_id = current->acl_role_id;
60694+ tsk->acl = current->acl;
60695+ tsk->role = current->role;
60696+ tsk->signal->curr_ip = current->signal->curr_ip;
60697+ tsk->signal->saved_ip = current->signal->saved_ip;
60698+ if (current->exec_file)
60699+ get_file(current->exec_file);
60700+ tsk->exec_file = current->exec_file;
60701+ tsk->is_writable = current->is_writable;
60702+ if (unlikely(current->signal->used_accept)) {
60703+ current->signal->curr_ip = 0;
60704+ current->signal->saved_ip = 0;
60705+ }
60706+
60707+ return;
60708+}
60709+
60710+static void
60711+gr_set_proc_res(struct task_struct *task)
60712+{
60713+ struct acl_subject_label *proc;
60714+ unsigned short i;
60715+
60716+ proc = task->acl;
60717+
60718+ if (proc->mode & (GR_LEARN | GR_INHERITLEARN))
60719+ return;
60720+
60721+ for (i = 0; i < RLIM_NLIMITS; i++) {
60722+ if (!(proc->resmask & (1U << i)))
60723+ continue;
60724+
60725+ task->signal->rlim[i].rlim_cur = proc->res[i].rlim_cur;
60726+ task->signal->rlim[i].rlim_max = proc->res[i].rlim_max;
60727+
60728+ if (i == RLIMIT_CPU)
60729+ update_rlimit_cpu(task, proc->res[i].rlim_cur);
60730+ }
60731+
60732+ return;
60733+}
60734+
60735+extern int gr_process_kernel_setuid_ban(struct user_struct *user);
60736+
60737+int
60738+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
60739+{
60740+ unsigned int i;
60741+ __u16 num;
60742+ uid_t *uidlist;
60743+ uid_t curuid;
60744+ int realok = 0;
60745+ int effectiveok = 0;
60746+ int fsok = 0;
60747+ uid_t globalreal, globaleffective, globalfs;
60748+
60749+#if defined(CONFIG_GRKERNSEC_KERN_LOCKOUT)
60750+ struct user_struct *user;
60751+
60752+ if (!uid_valid(real))
60753+ goto skipit;
60754+
60755+ /* find user based on global namespace */
60756+
60757+ globalreal = GR_GLOBAL_UID(real);
60758+
60759+ user = find_user(make_kuid(&init_user_ns, globalreal));
60760+ if (user == NULL)
60761+ goto skipit;
60762+
60763+ if (gr_process_kernel_setuid_ban(user)) {
60764+ /* for find_user */
60765+ free_uid(user);
60766+ return 1;
60767+ }
60768+
60769+ /* for find_user */
60770+ free_uid(user);
60771+
60772+skipit:
60773+#endif
60774+
60775+ if (unlikely(!(gr_status & GR_READY)))
60776+ return 0;
60777+
60778+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60779+ gr_log_learn_uid_change(real, effective, fs);
60780+
60781+ num = current->acl->user_trans_num;
60782+ uidlist = current->acl->user_transitions;
60783+
60784+ if (uidlist == NULL)
60785+ return 0;
60786+
60787+ if (!uid_valid(real)) {
60788+ realok = 1;
60789+ globalreal = (uid_t)-1;
60790+ } else {
60791+ globalreal = GR_GLOBAL_UID(real);
60792+ }
60793+ if (!uid_valid(effective)) {
60794+ effectiveok = 1;
60795+ globaleffective = (uid_t)-1;
60796+ } else {
60797+ globaleffective = GR_GLOBAL_UID(effective);
60798+ }
60799+ if (!uid_valid(fs)) {
60800+ fsok = 1;
60801+ globalfs = (uid_t)-1;
60802+ } else {
60803+ globalfs = GR_GLOBAL_UID(fs);
60804+ }
60805+
60806+ if (current->acl->user_trans_type & GR_ID_ALLOW) {
60807+ for (i = 0; i < num; i++) {
60808+ curuid = uidlist[i];
60809+ if (globalreal == curuid)
60810+ realok = 1;
60811+ if (globaleffective == curuid)
60812+ effectiveok = 1;
60813+ if (globalfs == curuid)
60814+ fsok = 1;
60815+ }
60816+ } else if (current->acl->user_trans_type & GR_ID_DENY) {
60817+ for (i = 0; i < num; i++) {
60818+ curuid = uidlist[i];
60819+ if (globalreal == curuid)
60820+ break;
60821+ if (globaleffective == curuid)
60822+ break;
60823+ if (globalfs == curuid)
60824+ break;
60825+ }
60826+ /* not in deny list */
60827+ if (i == num) {
60828+ realok = 1;
60829+ effectiveok = 1;
60830+ fsok = 1;
60831+ }
60832+ }
60833+
60834+ if (realok && effectiveok && fsok)
60835+ return 0;
60836+ else {
60837+ gr_log_int(GR_DONT_AUDIT, GR_USRCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60838+ return 1;
60839+ }
60840+}
60841+
60842+int
60843+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
60844+{
60845+ unsigned int i;
60846+ __u16 num;
60847+ gid_t *gidlist;
60848+ gid_t curgid;
60849+ int realok = 0;
60850+ int effectiveok = 0;
60851+ int fsok = 0;
60852+ gid_t globalreal, globaleffective, globalfs;
60853+
60854+ if (unlikely(!(gr_status & GR_READY)))
60855+ return 0;
60856+
60857+ if (current->acl->mode & (GR_LEARN | GR_INHERITLEARN))
60858+ gr_log_learn_gid_change(real, effective, fs);
60859+
60860+ num = current->acl->group_trans_num;
60861+ gidlist = current->acl->group_transitions;
60862+
60863+ if (gidlist == NULL)
60864+ return 0;
60865+
60866+ if (!gid_valid(real)) {
60867+ realok = 1;
60868+ globalreal = (gid_t)-1;
60869+ } else {
60870+ globalreal = GR_GLOBAL_GID(real);
60871+ }
60872+ if (!gid_valid(effective)) {
60873+ effectiveok = 1;
60874+ globaleffective = (gid_t)-1;
60875+ } else {
60876+ globaleffective = GR_GLOBAL_GID(effective);
60877+ }
60878+ if (!gid_valid(fs)) {
60879+ fsok = 1;
60880+ globalfs = (gid_t)-1;
60881+ } else {
60882+ globalfs = GR_GLOBAL_GID(fs);
60883+ }
60884+
60885+ if (current->acl->group_trans_type & GR_ID_ALLOW) {
60886+ for (i = 0; i < num; i++) {
60887+ curgid = gidlist[i];
60888+ if (globalreal == curgid)
60889+ realok = 1;
60890+ if (globaleffective == curgid)
60891+ effectiveok = 1;
60892+ if (globalfs == curgid)
60893+ fsok = 1;
60894+ }
60895+ } else if (current->acl->group_trans_type & GR_ID_DENY) {
60896+ for (i = 0; i < num; i++) {
60897+ curgid = gidlist[i];
60898+ if (globalreal == curgid)
60899+ break;
60900+ if (globaleffective == curgid)
60901+ break;
60902+ if (globalfs == curgid)
60903+ break;
60904+ }
60905+ /* not in deny list */
60906+ if (i == num) {
60907+ realok = 1;
60908+ effectiveok = 1;
60909+ fsok = 1;
60910+ }
60911+ }
60912+
60913+ if (realok && effectiveok && fsok)
60914+ return 0;
60915+ else {
60916+ gr_log_int(GR_DONT_AUDIT, GR_GRPCHANGE_ACL_MSG, realok ? (effectiveok ? (fsok ? 0 : globalfs) : globaleffective) : globalreal);
60917+ return 1;
60918+ }
60919+}
60920+
60921+extern int gr_acl_is_capable(const int cap);
60922+
60923+void
60924+gr_set_role_label(struct task_struct *task, const kuid_t kuid, const kgid_t kgid)
60925+{
60926+ struct acl_role_label *role = task->role;
60927+ struct acl_subject_label *subj = NULL;
60928+ struct acl_object_label *obj;
60929+ struct file *filp;
60930+ uid_t uid;
60931+ gid_t gid;
60932+
60933+ if (unlikely(!(gr_status & GR_READY)))
60934+ return;
60935+
60936+ uid = GR_GLOBAL_UID(kuid);
60937+ gid = GR_GLOBAL_GID(kgid);
60938+
60939+ filp = task->exec_file;
60940+
60941+ /* kernel process, we'll give them the kernel role */
60942+ if (unlikely(!filp)) {
60943+ task->role = kernel_role;
60944+ task->acl = kernel_role->root_label;
60945+ return;
60946+ } else if (!task->role || !(task->role->roletype & GR_ROLE_SPECIAL))
60947+ role = lookup_acl_role_label(task, uid, gid);
60948+
60949+ /* don't change the role if we're not a privileged process */
60950+ if (role && task->role != role &&
60951+ (((role->roletype & GR_ROLE_USER) && !gr_acl_is_capable(CAP_SETUID)) ||
60952+ ((role->roletype & GR_ROLE_GROUP) && !gr_acl_is_capable(CAP_SETGID))))
60953+ return;
60954+
60955+ /* perform subject lookup in possibly new role
60956+ we can use this result below in the case where role == task->role
60957+ */
60958+ subj = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, role);
60959+
60960+ /* if we changed uid/gid, but result in the same role
60961+ and are using inheritance, don't lose the inherited subject
60962+ if current subject is other than what normal lookup
60963+ would result in, we arrived via inheritance, don't
60964+ lose subject
60965+ */
60966+ if (role != task->role || (!(task->acl->mode & GR_INHERITLEARN) &&
60967+ (subj == task->acl)))
60968+ task->acl = subj;
60969+
60970+ task->role = role;
60971+
60972+ task->is_writable = 0;
60973+
60974+ /* ignore additional mmap checks for processes that are writable
60975+ by the default ACL */
60976+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
60977+ if (unlikely(obj->mode & GR_WRITE))
60978+ task->is_writable = 1;
60979+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
60980+ if (unlikely(obj->mode & GR_WRITE))
60981+ task->is_writable = 1;
60982+
60983+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
60984+ printk(KERN_ALERT "Set role label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
60985+#endif
60986+
60987+ gr_set_proc_res(task);
60988+
60989+ return;
60990+}
60991+
60992+int
60993+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
60994+ const int unsafe_flags)
60995+{
60996+ struct task_struct *task = current;
60997+ struct acl_subject_label *newacl;
60998+ struct acl_object_label *obj;
60999+ __u32 retmode;
61000+
61001+ if (unlikely(!(gr_status & GR_READY)))
61002+ return 0;
61003+
61004+ newacl = chk_subj_label(dentry, mnt, task->role);
61005+
61006+ /* special handling for if we did an strace -f -p <pid> from an admin role, where pid then
61007+ did an exec
61008+ */
61009+ rcu_read_lock();
61010+ read_lock(&tasklist_lock);
61011+ if (task->ptrace && task->parent && ((task->parent->role->roletype & GR_ROLE_GOD) ||
61012+ (task->parent->acl->mode & GR_POVERRIDE))) {
61013+ read_unlock(&tasklist_lock);
61014+ rcu_read_unlock();
61015+ goto skip_check;
61016+ }
61017+ read_unlock(&tasklist_lock);
61018+ rcu_read_unlock();
61019+
61020+ if (unsafe_flags && !(task->acl->mode & GR_POVERRIDE) && (task->acl != newacl) &&
61021+ !(task->role->roletype & GR_ROLE_GOD) &&
61022+ !gr_search_file(dentry, GR_PTRACERD, mnt) &&
61023+ !(task->acl->mode & (GR_LEARN | GR_INHERITLEARN))) {
61024+ if (unsafe_flags & LSM_UNSAFE_SHARE)
61025+ gr_log_fs_generic(GR_DONT_AUDIT, GR_UNSAFESHARE_EXEC_ACL_MSG, dentry, mnt);
61026+ else
61027+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_EXEC_ACL_MSG, dentry, mnt);
61028+ return -EACCES;
61029+ }
61030+
61031+skip_check:
61032+
61033+ obj = chk_obj_label(dentry, mnt, task->acl);
61034+ retmode = obj->mode & (GR_INHERIT | GR_AUDIT_INHERIT);
61035+
61036+ if (!(task->acl->mode & GR_INHERITLEARN) &&
61037+ ((newacl->mode & GR_LEARN) || !(retmode & GR_INHERIT))) {
61038+ if (obj->nested)
61039+ task->acl = obj->nested;
61040+ else
61041+ task->acl = newacl;
61042+ } else if (retmode & GR_INHERIT && retmode & GR_AUDIT_INHERIT)
61043+ gr_log_str_fs(GR_DO_AUDIT, GR_INHERIT_ACL_MSG, task->acl->filename, dentry, mnt);
61044+
61045+ task->is_writable = 0;
61046+
61047+ /* ignore additional mmap checks for processes that are writable
61048+ by the default ACL */
61049+ obj = chk_obj_label(dentry, mnt, default_role->root_label);
61050+ if (unlikely(obj->mode & GR_WRITE))
61051+ task->is_writable = 1;
61052+ obj = chk_obj_label(dentry, mnt, task->role->root_label);
61053+ if (unlikely(obj->mode & GR_WRITE))
61054+ task->is_writable = 1;
61055+
61056+ gr_set_proc_res(task);
61057+
61058+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61059+ printk(KERN_ALERT "Set subject label for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61060+#endif
61061+ return 0;
61062+}
61063+
61064+/* always called with valid inodev ptr */
61065+static void
61066+do_handle_delete(struct inodev_entry *inodev, const ino_t ino, const dev_t dev)
61067+{
61068+ struct acl_object_label *matchpo;
61069+ struct acl_subject_label *matchps;
61070+ struct acl_subject_label *subj;
61071+ struct acl_role_label *role;
61072+ unsigned int x;
61073+
61074+ FOR_EACH_ROLE_START(role)
61075+ FOR_EACH_SUBJECT_START(role, subj, x)
61076+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
61077+ matchpo->mode |= GR_DELETED;
61078+ FOR_EACH_SUBJECT_END(subj,x)
61079+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
61080+ /* nested subjects aren't in the role's subj_hash table */
61081+ if ((matchpo = lookup_acl_obj_label(ino, dev, subj)) != NULL)
61082+ matchpo->mode |= GR_DELETED;
61083+ FOR_EACH_NESTED_SUBJECT_END(subj)
61084+ if ((matchps = lookup_acl_subj_label(ino, dev, role)) != NULL)
61085+ matchps->mode |= GR_DELETED;
61086+ FOR_EACH_ROLE_END(role)
61087+
61088+ inodev->nentry->deleted = 1;
61089+
61090+ return;
61091+}
61092+
61093+void
61094+gr_handle_delete(const ino_t ino, const dev_t dev)
61095+{
61096+ struct inodev_entry *inodev;
61097+
61098+ if (unlikely(!(gr_status & GR_READY)))
61099+ return;
61100+
61101+ write_lock(&gr_inode_lock);
61102+ inodev = lookup_inodev_entry(ino, dev);
61103+ if (inodev != NULL)
61104+ do_handle_delete(inodev, ino, dev);
61105+ write_unlock(&gr_inode_lock);
61106+
61107+ return;
61108+}
61109+
61110+static void
61111+update_acl_obj_label(const ino_t oldinode, const dev_t olddevice,
61112+ const ino_t newinode, const dev_t newdevice,
61113+ struct acl_subject_label *subj)
61114+{
61115+ unsigned int index = gr_fhash(oldinode, olddevice, subj->obj_hash_size);
61116+ struct acl_object_label *match;
61117+
61118+ match = subj->obj_hash[index];
61119+
61120+ while (match && (match->inode != oldinode ||
61121+ match->device != olddevice ||
61122+ !(match->mode & GR_DELETED)))
61123+ match = match->next;
61124+
61125+ if (match && (match->inode == oldinode)
61126+ && (match->device == olddevice)
61127+ && (match->mode & GR_DELETED)) {
61128+ if (match->prev == NULL) {
61129+ subj->obj_hash[index] = match->next;
61130+ if (match->next != NULL)
61131+ match->next->prev = NULL;
61132+ } else {
61133+ match->prev->next = match->next;
61134+ if (match->next != NULL)
61135+ match->next->prev = match->prev;
61136+ }
61137+ match->prev = NULL;
61138+ match->next = NULL;
61139+ match->inode = newinode;
61140+ match->device = newdevice;
61141+ match->mode &= ~GR_DELETED;
61142+
61143+ insert_acl_obj_label(match, subj);
61144+ }
61145+
61146+ return;
61147+}
61148+
61149+static void
61150+update_acl_subj_label(const ino_t oldinode, const dev_t olddevice,
61151+ const ino_t newinode, const dev_t newdevice,
61152+ struct acl_role_label *role)
61153+{
61154+ unsigned int index = gr_fhash(oldinode, olddevice, role->subj_hash_size);
61155+ struct acl_subject_label *match;
61156+
61157+ match = role->subj_hash[index];
61158+
61159+ while (match && (match->inode != oldinode ||
61160+ match->device != olddevice ||
61161+ !(match->mode & GR_DELETED)))
61162+ match = match->next;
61163+
61164+ if (match && (match->inode == oldinode)
61165+ && (match->device == olddevice)
61166+ && (match->mode & GR_DELETED)) {
61167+ if (match->prev == NULL) {
61168+ role->subj_hash[index] = match->next;
61169+ if (match->next != NULL)
61170+ match->next->prev = NULL;
61171+ } else {
61172+ match->prev->next = match->next;
61173+ if (match->next != NULL)
61174+ match->next->prev = match->prev;
61175+ }
61176+ match->prev = NULL;
61177+ match->next = NULL;
61178+ match->inode = newinode;
61179+ match->device = newdevice;
61180+ match->mode &= ~GR_DELETED;
61181+
61182+ insert_acl_subj_label(match, role);
61183+ }
61184+
61185+ return;
61186+}
61187+
61188+static void
61189+update_inodev_entry(const ino_t oldinode, const dev_t olddevice,
61190+ const ino_t newinode, const dev_t newdevice)
61191+{
61192+ unsigned int index = gr_fhash(oldinode, olddevice, inodev_set.i_size);
61193+ struct inodev_entry *match;
61194+
61195+ match = inodev_set.i_hash[index];
61196+
61197+ while (match && (match->nentry->inode != oldinode ||
61198+ match->nentry->device != olddevice || !match->nentry->deleted))
61199+ match = match->next;
61200+
61201+ if (match && (match->nentry->inode == oldinode)
61202+ && (match->nentry->device == olddevice) &&
61203+ match->nentry->deleted) {
61204+ if (match->prev == NULL) {
61205+ inodev_set.i_hash[index] = match->next;
61206+ if (match->next != NULL)
61207+ match->next->prev = NULL;
61208+ } else {
61209+ match->prev->next = match->next;
61210+ if (match->next != NULL)
61211+ match->next->prev = match->prev;
61212+ }
61213+ match->prev = NULL;
61214+ match->next = NULL;
61215+ match->nentry->inode = newinode;
61216+ match->nentry->device = newdevice;
61217+ match->nentry->deleted = 0;
61218+
61219+ insert_inodev_entry(match);
61220+ }
61221+
61222+ return;
61223+}
61224+
61225+static void
61226+__do_handle_create(const struct name_entry *matchn, ino_t ino, dev_t dev)
61227+{
61228+ struct acl_subject_label *subj;
61229+ struct acl_role_label *role;
61230+ unsigned int x;
61231+
61232+ FOR_EACH_ROLE_START(role)
61233+ update_acl_subj_label(matchn->inode, matchn->device, ino, dev, role);
61234+
61235+ FOR_EACH_NESTED_SUBJECT_START(role, subj)
61236+ if ((subj->inode == ino) && (subj->device == dev)) {
61237+ subj->inode = ino;
61238+ subj->device = dev;
61239+ }
61240+ /* nested subjects aren't in the role's subj_hash table */
61241+ update_acl_obj_label(matchn->inode, matchn->device,
61242+ ino, dev, subj);
61243+ FOR_EACH_NESTED_SUBJECT_END(subj)
61244+ FOR_EACH_SUBJECT_START(role, subj, x)
61245+ update_acl_obj_label(matchn->inode, matchn->device,
61246+ ino, dev, subj);
61247+ FOR_EACH_SUBJECT_END(subj,x)
61248+ FOR_EACH_ROLE_END(role)
61249+
61250+ update_inodev_entry(matchn->inode, matchn->device, ino, dev);
61251+
61252+ return;
61253+}
61254+
61255+static void
61256+do_handle_create(const struct name_entry *matchn, const struct dentry *dentry,
61257+ const struct vfsmount *mnt)
61258+{
61259+ ino_t ino = dentry->d_inode->i_ino;
61260+ dev_t dev = __get_dev(dentry);
61261+
61262+ __do_handle_create(matchn, ino, dev);
61263+
61264+ return;
61265+}
61266+
61267+void
61268+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
61269+{
61270+ struct name_entry *matchn;
61271+
61272+ if (unlikely(!(gr_status & GR_READY)))
61273+ return;
61274+
61275+ preempt_disable();
61276+ matchn = lookup_name_entry(gr_to_filename_rbac(dentry, mnt));
61277+
61278+ if (unlikely((unsigned long)matchn)) {
61279+ write_lock(&gr_inode_lock);
61280+ do_handle_create(matchn, dentry, mnt);
61281+ write_unlock(&gr_inode_lock);
61282+ }
61283+ preempt_enable();
61284+
61285+ return;
61286+}
61287+
61288+void
61289+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
61290+{
61291+ struct name_entry *matchn;
61292+
61293+ if (unlikely(!(gr_status & GR_READY)))
61294+ return;
61295+
61296+ preempt_disable();
61297+ matchn = lookup_name_entry(gr_to_proc_filename_rbac(dentry, init_pid_ns.proc_mnt));
61298+
61299+ if (unlikely((unsigned long)matchn)) {
61300+ write_lock(&gr_inode_lock);
61301+ __do_handle_create(matchn, inode->i_ino, inode->i_sb->s_dev);
61302+ write_unlock(&gr_inode_lock);
61303+ }
61304+ preempt_enable();
61305+
61306+ return;
61307+}
61308+
61309+void
61310+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
61311+ struct dentry *old_dentry,
61312+ struct dentry *new_dentry,
61313+ struct vfsmount *mnt, const __u8 replace)
61314+{
61315+ struct name_entry *matchn;
61316+ struct inodev_entry *inodev;
61317+ struct inode *inode = new_dentry->d_inode;
61318+ ino_t old_ino = old_dentry->d_inode->i_ino;
61319+ dev_t old_dev = __get_dev(old_dentry);
61320+
61321+ /* vfs_rename swaps the name and parent link for old_dentry and
61322+ new_dentry
61323+ at this point, old_dentry has the new name, parent link, and inode
61324+ for the renamed file
61325+ if a file is being replaced by a rename, new_dentry has the inode
61326+ and name for the replaced file
61327+ */
61328+
61329+ if (unlikely(!(gr_status & GR_READY)))
61330+ return;
61331+
61332+ preempt_disable();
61333+ matchn = lookup_name_entry(gr_to_filename_rbac(old_dentry, mnt));
61334+
61335+ /* we wouldn't have to check d_inode if it weren't for
61336+ NFS silly-renaming
61337+ */
61338+
61339+ write_lock(&gr_inode_lock);
61340+ if (unlikely(replace && inode)) {
61341+ ino_t new_ino = inode->i_ino;
61342+ dev_t new_dev = __get_dev(new_dentry);
61343+
61344+ inodev = lookup_inodev_entry(new_ino, new_dev);
61345+ if (inodev != NULL && ((inode->i_nlink <= 1) || S_ISDIR(inode->i_mode)))
61346+ do_handle_delete(inodev, new_ino, new_dev);
61347+ }
61348+
61349+ inodev = lookup_inodev_entry(old_ino, old_dev);
61350+ if (inodev != NULL && ((old_dentry->d_inode->i_nlink <= 1) || S_ISDIR(old_dentry->d_inode->i_mode)))
61351+ do_handle_delete(inodev, old_ino, old_dev);
61352+
61353+ if (unlikely((unsigned long)matchn))
61354+ do_handle_create(matchn, old_dentry, mnt);
61355+
61356+ write_unlock(&gr_inode_lock);
61357+ preempt_enable();
61358+
61359+ return;
61360+}
61361+
61362+static int
61363+lookup_special_role_auth(__u16 mode, const char *rolename, unsigned char **salt,
61364+ unsigned char **sum)
61365+{
61366+ struct acl_role_label *r;
61367+ struct role_allowed_ip *ipp;
61368+ struct role_transition *trans;
61369+ unsigned int i;
61370+ int found = 0;
61371+ u32 curr_ip = current->signal->curr_ip;
61372+
61373+ current->signal->saved_ip = curr_ip;
61374+
61375+ /* check transition table */
61376+
61377+ for (trans = current->role->transitions; trans; trans = trans->next) {
61378+ if (!strcmp(rolename, trans->rolename)) {
61379+ found = 1;
61380+ break;
61381+ }
61382+ }
61383+
61384+ if (!found)
61385+ return 0;
61386+
61387+ /* handle special roles that do not require authentication
61388+ and check ip */
61389+
61390+ FOR_EACH_ROLE_START(r)
61391+ if (!strcmp(rolename, r->rolename) &&
61392+ (r->roletype & GR_ROLE_SPECIAL)) {
61393+ found = 0;
61394+ if (r->allowed_ips != NULL) {
61395+ for (ipp = r->allowed_ips; ipp; ipp = ipp->next) {
61396+ if ((ntohl(curr_ip) & ipp->netmask) ==
61397+ (ntohl(ipp->addr) & ipp->netmask))
61398+ found = 1;
61399+ }
61400+ } else
61401+ found = 2;
61402+ if (!found)
61403+ return 0;
61404+
61405+ if (((mode == GR_SPROLE) && (r->roletype & GR_ROLE_NOPW)) ||
61406+ ((mode == GR_SPROLEPAM) && (r->roletype & GR_ROLE_PAM))) {
61407+ *salt = NULL;
61408+ *sum = NULL;
61409+ return 1;
61410+ }
61411+ }
61412+ FOR_EACH_ROLE_END(r)
61413+
61414+ for (i = 0; i < num_sprole_pws; i++) {
61415+ if (!strcmp(rolename, acl_special_roles[i]->rolename)) {
61416+ *salt = acl_special_roles[i]->salt;
61417+ *sum = acl_special_roles[i]->sum;
61418+ return 1;
61419+ }
61420+ }
61421+
61422+ return 0;
61423+}
61424+
61425+static void
61426+assign_special_role(char *rolename)
61427+{
61428+ struct acl_object_label *obj;
61429+ struct acl_role_label *r;
61430+ struct acl_role_label *assigned = NULL;
61431+ struct task_struct *tsk;
61432+ struct file *filp;
61433+
61434+ FOR_EACH_ROLE_START(r)
61435+ if (!strcmp(rolename, r->rolename) &&
61436+ (r->roletype & GR_ROLE_SPECIAL)) {
61437+ assigned = r;
61438+ break;
61439+ }
61440+ FOR_EACH_ROLE_END(r)
61441+
61442+ if (!assigned)
61443+ return;
61444+
61445+ read_lock(&tasklist_lock);
61446+ read_lock(&grsec_exec_file_lock);
61447+
61448+ tsk = current->real_parent;
61449+ if (tsk == NULL)
61450+ goto out_unlock;
61451+
61452+ filp = tsk->exec_file;
61453+ if (filp == NULL)
61454+ goto out_unlock;
61455+
61456+ tsk->is_writable = 0;
61457+
61458+ tsk->acl_sp_role = 1;
61459+ tsk->acl_role_id = ++acl_sp_role_value;
61460+ tsk->role = assigned;
61461+ tsk->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role);
61462+
61463+ /* ignore additional mmap checks for processes that are writable
61464+ by the default ACL */
61465+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61466+ if (unlikely(obj->mode & GR_WRITE))
61467+ tsk->is_writable = 1;
61468+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, tsk->role->root_label);
61469+ if (unlikely(obj->mode & GR_WRITE))
61470+ tsk->is_writable = 1;
61471+
61472+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61473+ printk(KERN_ALERT "Assigning special role:%s subject:%s to process (%s:%d)\n", tsk->role->rolename, tsk->acl->filename, tsk->comm, task_pid_nr(tsk));
61474+#endif
61475+
61476+out_unlock:
61477+ read_unlock(&grsec_exec_file_lock);
61478+ read_unlock(&tasklist_lock);
61479+ return;
61480+}
61481+
61482+int gr_check_secure_terminal(struct task_struct *task)
61483+{
61484+ struct task_struct *p, *p2, *p3;
61485+ struct files_struct *files;
61486+ struct fdtable *fdt;
61487+ struct file *our_file = NULL, *file;
61488+ int i;
61489+
61490+ if (task->signal->tty == NULL)
61491+ return 1;
61492+
61493+ files = get_files_struct(task);
61494+ if (files != NULL) {
61495+ rcu_read_lock();
61496+ fdt = files_fdtable(files);
61497+ for (i=0; i < fdt->max_fds; i++) {
61498+ file = fcheck_files(files, i);
61499+ if (file && (our_file == NULL) && (file->private_data == task->signal->tty)) {
61500+ get_file(file);
61501+ our_file = file;
61502+ }
61503+ }
61504+ rcu_read_unlock();
61505+ put_files_struct(files);
61506+ }
61507+
61508+ if (our_file == NULL)
61509+ return 1;
61510+
61511+ read_lock(&tasklist_lock);
61512+ do_each_thread(p2, p) {
61513+ files = get_files_struct(p);
61514+ if (files == NULL ||
61515+ (p->signal && p->signal->tty == task->signal->tty)) {
61516+ if (files != NULL)
61517+ put_files_struct(files);
61518+ continue;
61519+ }
61520+ rcu_read_lock();
61521+ fdt = files_fdtable(files);
61522+ for (i=0; i < fdt->max_fds; i++) {
61523+ file = fcheck_files(files, i);
61524+ if (file && S_ISCHR(file->f_path.dentry->d_inode->i_mode) &&
61525+ file->f_path.dentry->d_inode->i_rdev == our_file->f_path.dentry->d_inode->i_rdev) {
61526+ p3 = task;
61527+ while (task_pid_nr(p3) > 0) {
61528+ if (p3 == p)
61529+ break;
61530+ p3 = p3->real_parent;
61531+ }
61532+ if (p3 == p)
61533+ break;
61534+ gr_log_ttysniff(GR_DONT_AUDIT_GOOD, GR_TTYSNIFF_ACL_MSG, p);
61535+ gr_handle_alertkill(p);
61536+ rcu_read_unlock();
61537+ put_files_struct(files);
61538+ read_unlock(&tasklist_lock);
61539+ fput(our_file);
61540+ return 0;
61541+ }
61542+ }
61543+ rcu_read_unlock();
61544+ put_files_struct(files);
61545+ } while_each_thread(p2, p);
61546+ read_unlock(&tasklist_lock);
61547+
61548+ fput(our_file);
61549+ return 1;
61550+}
61551+
61552+static int gr_rbac_disable(void *unused)
61553+{
61554+ pax_open_kernel();
61555+ gr_status &= ~GR_READY;
61556+ pax_close_kernel();
61557+
61558+ return 0;
61559+}
61560+
61561+ssize_t
61562+write_grsec_handler(struct file *file, const char * buf, size_t count, loff_t *ppos)
61563+{
61564+ struct gr_arg_wrapper uwrap;
61565+ unsigned char *sprole_salt = NULL;
61566+ unsigned char *sprole_sum = NULL;
61567+ int error = sizeof (struct gr_arg_wrapper);
61568+ int error2 = 0;
61569+
61570+ mutex_lock(&gr_dev_mutex);
61571+
61572+ if ((gr_status & GR_READY) && !(current->acl->mode & GR_KERNELAUTH)) {
61573+ error = -EPERM;
61574+ goto out;
61575+ }
61576+
61577+ if (count != sizeof (struct gr_arg_wrapper)) {
61578+ gr_log_int_int(GR_DONT_AUDIT_GOOD, GR_DEV_ACL_MSG, (int)count, (int)sizeof(struct gr_arg_wrapper));
61579+ error = -EINVAL;
61580+ goto out;
61581+ }
61582+
61583+
61584+ if (gr_auth_expires && time_after_eq(get_seconds(), gr_auth_expires)) {
61585+ gr_auth_expires = 0;
61586+ gr_auth_attempts = 0;
61587+ }
61588+
61589+ if (copy_from_user(&uwrap, buf, sizeof (struct gr_arg_wrapper))) {
61590+ error = -EFAULT;
61591+ goto out;
61592+ }
61593+
61594+ if ((uwrap.version != GRSECURITY_VERSION) || (uwrap.size != sizeof(struct gr_arg))) {
61595+ error = -EINVAL;
61596+ goto out;
61597+ }
61598+
61599+ if (copy_from_user(gr_usermode, uwrap.arg, sizeof (struct gr_arg))) {
61600+ error = -EFAULT;
61601+ goto out;
61602+ }
61603+
61604+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61605+ gr_auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61606+ time_after(gr_auth_expires, get_seconds())) {
61607+ error = -EBUSY;
61608+ goto out;
61609+ }
61610+
61611+ /* if non-root trying to do anything other than use a special role,
61612+ do not attempt authentication, do not count towards authentication
61613+ locking
61614+ */
61615+
61616+ if (gr_usermode->mode != GR_SPROLE && gr_usermode->mode != GR_STATUS &&
61617+ gr_usermode->mode != GR_UNSPROLE && gr_usermode->mode != GR_SPROLEPAM &&
61618+ gr_is_global_nonroot(current_uid())) {
61619+ error = -EPERM;
61620+ goto out;
61621+ }
61622+
61623+ /* ensure pw and special role name are null terminated */
61624+
61625+ gr_usermode->pw[GR_PW_LEN - 1] = '\0';
61626+ gr_usermode->sp_role[GR_SPROLE_LEN - 1] = '\0';
61627+
61628+ /* Okay.
61629+ * We have our enough of the argument structure..(we have yet
61630+ * to copy_from_user the tables themselves) . Copy the tables
61631+ * only if we need them, i.e. for loading operations. */
61632+
61633+ switch (gr_usermode->mode) {
61634+ case GR_STATUS:
61635+ if (gr_status & GR_READY) {
61636+ error = 1;
61637+ if (!gr_check_secure_terminal(current))
61638+ error = 3;
61639+ } else
61640+ error = 2;
61641+ goto out;
61642+ case GR_SHUTDOWN:
61643+ if ((gr_status & GR_READY)
61644+ && !(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61645+ stop_machine(gr_rbac_disable, NULL, NULL);
61646+ free_variables();
61647+ memset(gr_usermode, 0, sizeof (struct gr_arg));
61648+ memset(gr_system_salt, 0, GR_SALT_LEN);
61649+ memset(gr_system_sum, 0, GR_SHA_LEN);
61650+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTS_ACL_MSG);
61651+ } else if (gr_status & GR_READY) {
61652+ gr_log_noargs(GR_DONT_AUDIT, GR_SHUTF_ACL_MSG);
61653+ error = -EPERM;
61654+ } else {
61655+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SHUTI_ACL_MSG);
61656+ error = -EAGAIN;
61657+ }
61658+ break;
61659+ case GR_ENABLE:
61660+ if (!(gr_status & GR_READY) && !(error2 = gracl_init(gr_usermode)))
61661+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_ENABLE_ACL_MSG, GR_VERSION);
61662+ else {
61663+ if (gr_status & GR_READY)
61664+ error = -EAGAIN;
61665+ else
61666+ error = error2;
61667+ gr_log_str(GR_DONT_AUDIT, GR_ENABLEF_ACL_MSG, GR_VERSION);
61668+ }
61669+ break;
61670+ case GR_RELOAD:
61671+ if (!(gr_status & GR_READY)) {
61672+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOADI_ACL_MSG, GR_VERSION);
61673+ error = -EAGAIN;
61674+ } else if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61675+ stop_machine(gr_rbac_disable, NULL, NULL);
61676+ free_variables();
61677+ error2 = gracl_init(gr_usermode);
61678+ if (!error2)
61679+ gr_log_str(GR_DONT_AUDIT_GOOD, GR_RELOAD_ACL_MSG, GR_VERSION);
61680+ else {
61681+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61682+ error = error2;
61683+ }
61684+ } else {
61685+ gr_log_str(GR_DONT_AUDIT, GR_RELOADF_ACL_MSG, GR_VERSION);
61686+ error = -EPERM;
61687+ }
61688+ break;
61689+ case GR_SEGVMOD:
61690+ if (unlikely(!(gr_status & GR_READY))) {
61691+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODI_ACL_MSG);
61692+ error = -EAGAIN;
61693+ break;
61694+ }
61695+
61696+ if (!(chkpw(gr_usermode, gr_system_salt, gr_system_sum))) {
61697+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SEGVMODS_ACL_MSG);
61698+ if (gr_usermode->segv_device && gr_usermode->segv_inode) {
61699+ struct acl_subject_label *segvacl;
61700+ segvacl =
61701+ lookup_acl_subj_label(gr_usermode->segv_inode,
61702+ gr_usermode->segv_device,
61703+ current->role);
61704+ if (segvacl) {
61705+ segvacl->crashes = 0;
61706+ segvacl->expires = 0;
61707+ }
61708+ } else if (gr_find_uid(gr_usermode->segv_uid) >= 0) {
61709+ gr_remove_uid(gr_usermode->segv_uid);
61710+ }
61711+ } else {
61712+ gr_log_noargs(GR_DONT_AUDIT, GR_SEGVMODF_ACL_MSG);
61713+ error = -EPERM;
61714+ }
61715+ break;
61716+ case GR_SPROLE:
61717+ case GR_SPROLEPAM:
61718+ if (unlikely(!(gr_status & GR_READY))) {
61719+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_SPROLEI_ACL_MSG);
61720+ error = -EAGAIN;
61721+ break;
61722+ }
61723+
61724+ if (current->role->expires && time_after_eq(get_seconds(), current->role->expires)) {
61725+ current->role->expires = 0;
61726+ current->role->auth_attempts = 0;
61727+ }
61728+
61729+ if (current->role->auth_attempts >= CONFIG_GRKERNSEC_ACL_MAXTRIES &&
61730+ time_after(current->role->expires, get_seconds())) {
61731+ error = -EBUSY;
61732+ goto out;
61733+ }
61734+
61735+ if (lookup_special_role_auth
61736+ (gr_usermode->mode, gr_usermode->sp_role, &sprole_salt, &sprole_sum)
61737+ && ((!sprole_salt && !sprole_sum)
61738+ || !(chkpw(gr_usermode, sprole_salt, sprole_sum)))) {
61739+ char *p = "";
61740+ assign_special_role(gr_usermode->sp_role);
61741+ read_lock(&tasklist_lock);
61742+ if (current->real_parent)
61743+ p = current->real_parent->role->rolename;
61744+ read_unlock(&tasklist_lock);
61745+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLES_ACL_MSG,
61746+ p, acl_sp_role_value);
61747+ } else {
61748+ gr_log_str(GR_DONT_AUDIT, GR_SPROLEF_ACL_MSG, gr_usermode->sp_role);
61749+ error = -EPERM;
61750+ if(!(current->role->auth_attempts++))
61751+ current->role->expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61752+
61753+ goto out;
61754+ }
61755+ break;
61756+ case GR_UNSPROLE:
61757+ if (unlikely(!(gr_status & GR_READY))) {
61758+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_UNSPROLEI_ACL_MSG);
61759+ error = -EAGAIN;
61760+ break;
61761+ }
61762+
61763+ if (current->role->roletype & GR_ROLE_SPECIAL) {
61764+ char *p = "";
61765+ int i = 0;
61766+
61767+ read_lock(&tasklist_lock);
61768+ if (current->real_parent) {
61769+ p = current->real_parent->role->rolename;
61770+ i = current->real_parent->acl_role_id;
61771+ }
61772+ read_unlock(&tasklist_lock);
61773+
61774+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_UNSPROLES_ACL_MSG, p, i);
61775+ gr_set_acls(1);
61776+ } else {
61777+ error = -EPERM;
61778+ goto out;
61779+ }
61780+ break;
61781+ default:
61782+ gr_log_int(GR_DONT_AUDIT, GR_INVMODE_ACL_MSG, gr_usermode->mode);
61783+ error = -EINVAL;
61784+ break;
61785+ }
61786+
61787+ if (error != -EPERM)
61788+ goto out;
61789+
61790+ if(!(gr_auth_attempts++))
61791+ gr_auth_expires = get_seconds() + CONFIG_GRKERNSEC_ACL_TIMEOUT;
61792+
61793+ out:
61794+ mutex_unlock(&gr_dev_mutex);
61795+ return error;
61796+}
61797+
61798+/* must be called with
61799+ rcu_read_lock();
61800+ read_lock(&tasklist_lock);
61801+ read_lock(&grsec_exec_file_lock);
61802+*/
61803+int gr_apply_subject_to_task(struct task_struct *task)
61804+{
61805+ struct acl_object_label *obj;
61806+ char *tmpname;
61807+ struct acl_subject_label *tmpsubj;
61808+ struct file *filp;
61809+ struct name_entry *nmatch;
61810+
61811+ filp = task->exec_file;
61812+ if (filp == NULL)
61813+ return 0;
61814+
61815+ /* the following is to apply the correct subject
61816+ on binaries running when the RBAC system
61817+ is enabled, when the binaries have been
61818+ replaced or deleted since their execution
61819+ -----
61820+ when the RBAC system starts, the inode/dev
61821+ from exec_file will be one the RBAC system
61822+ is unaware of. It only knows the inode/dev
61823+ of the present file on disk, or the absence
61824+ of it.
61825+ */
61826+ preempt_disable();
61827+ tmpname = gr_to_filename_rbac(filp->f_path.dentry, filp->f_path.mnt);
61828+
61829+ nmatch = lookup_name_entry(tmpname);
61830+ preempt_enable();
61831+ tmpsubj = NULL;
61832+ if (nmatch) {
61833+ if (nmatch->deleted)
61834+ tmpsubj = lookup_acl_subj_label_deleted(nmatch->inode, nmatch->device, task->role);
61835+ else
61836+ tmpsubj = lookup_acl_subj_label(nmatch->inode, nmatch->device, task->role);
61837+ if (tmpsubj != NULL)
61838+ task->acl = tmpsubj;
61839+ }
61840+ if (tmpsubj == NULL)
61841+ task->acl = chk_subj_label(filp->f_path.dentry, filp->f_path.mnt,
61842+ task->role);
61843+ if (task->acl) {
61844+ task->is_writable = 0;
61845+ /* ignore additional mmap checks for processes that are writable
61846+ by the default ACL */
61847+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
61848+ if (unlikely(obj->mode & GR_WRITE))
61849+ task->is_writable = 1;
61850+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, task->role->root_label);
61851+ if (unlikely(obj->mode & GR_WRITE))
61852+ task->is_writable = 1;
61853+
61854+ gr_set_proc_res(task);
61855+
61856+#ifdef CONFIG_GRKERNSEC_RBAC_DEBUG
61857+ printk(KERN_ALERT "gr_set_acls for (%s:%d): role:%s, subject:%s\n", task->comm, task_pid_nr(task), task->role->rolename, task->acl->filename);
61858+#endif
61859+ } else {
61860+ return 1;
61861+ }
61862+
61863+ return 0;
61864+}
61865+
61866+int
61867+gr_set_acls(const int type)
61868+{
61869+ struct task_struct *task, *task2;
61870+ struct acl_role_label *role = current->role;
61871+ __u16 acl_role_id = current->acl_role_id;
61872+ const struct cred *cred;
61873+ int ret;
61874+
61875+ rcu_read_lock();
61876+ read_lock(&tasklist_lock);
61877+ read_lock(&grsec_exec_file_lock);
61878+ do_each_thread(task2, task) {
61879+ /* check to see if we're called from the exit handler,
61880+ if so, only replace ACLs that have inherited the admin
61881+ ACL */
61882+
61883+ if (type && (task->role != role ||
61884+ task->acl_role_id != acl_role_id))
61885+ continue;
61886+
61887+ task->acl_role_id = 0;
61888+ task->acl_sp_role = 0;
61889+
61890+ if (task->exec_file) {
61891+ cred = __task_cred(task);
61892+ task->role = lookup_acl_role_label(task, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid));
61893+ ret = gr_apply_subject_to_task(task);
61894+ if (ret) {
61895+ read_unlock(&grsec_exec_file_lock);
61896+ read_unlock(&tasklist_lock);
61897+ rcu_read_unlock();
61898+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_DEFACL_MSG, task->comm, task_pid_nr(task));
61899+ return ret;
61900+ }
61901+ } else {
61902+ // it's a kernel process
61903+ task->role = kernel_role;
61904+ task->acl = kernel_role->root_label;
61905+#ifdef CONFIG_GRKERNSEC_ACL_HIDEKERN
61906+ task->acl->mode &= ~GR_PROCFIND;
61907+#endif
61908+ }
61909+ } while_each_thread(task2, task);
61910+ read_unlock(&grsec_exec_file_lock);
61911+ read_unlock(&tasklist_lock);
61912+ rcu_read_unlock();
61913+
61914+ return 0;
61915+}
61916+
61917+#if defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC)
61918+static const unsigned long res_learn_bumps[GR_NLIMITS] = {
61919+ [RLIMIT_CPU] = GR_RLIM_CPU_BUMP,
61920+ [RLIMIT_FSIZE] = GR_RLIM_FSIZE_BUMP,
61921+ [RLIMIT_DATA] = GR_RLIM_DATA_BUMP,
61922+ [RLIMIT_STACK] = GR_RLIM_STACK_BUMP,
61923+ [RLIMIT_CORE] = GR_RLIM_CORE_BUMP,
61924+ [RLIMIT_RSS] = GR_RLIM_RSS_BUMP,
61925+ [RLIMIT_NPROC] = GR_RLIM_NPROC_BUMP,
61926+ [RLIMIT_NOFILE] = GR_RLIM_NOFILE_BUMP,
61927+ [RLIMIT_MEMLOCK] = GR_RLIM_MEMLOCK_BUMP,
61928+ [RLIMIT_AS] = GR_RLIM_AS_BUMP,
61929+ [RLIMIT_LOCKS] = GR_RLIM_LOCKS_BUMP,
61930+ [RLIMIT_SIGPENDING] = GR_RLIM_SIGPENDING_BUMP,
61931+ [RLIMIT_MSGQUEUE] = GR_RLIM_MSGQUEUE_BUMP,
61932+ [RLIMIT_NICE] = GR_RLIM_NICE_BUMP,
61933+ [RLIMIT_RTPRIO] = GR_RLIM_RTPRIO_BUMP,
61934+ [RLIMIT_RTTIME] = GR_RLIM_RTTIME_BUMP
61935+};
61936+
61937+void
61938+gr_learn_resource(const struct task_struct *task,
61939+ const int res, const unsigned long wanted, const int gt)
61940+{
61941+ struct acl_subject_label *acl;
61942+ const struct cred *cred;
61943+
61944+ if (unlikely((gr_status & GR_READY) &&
61945+ task->acl && (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))))
61946+ goto skip_reslog;
61947+
61948+ gr_log_resource(task, res, wanted, gt);
61949+skip_reslog:
61950+
61951+ if (unlikely(!(gr_status & GR_READY) || !wanted || res >= GR_NLIMITS))
61952+ return;
61953+
61954+ acl = task->acl;
61955+
61956+ if (likely(!acl || !(acl->mode & (GR_LEARN | GR_INHERITLEARN)) ||
61957+ !(acl->resmask & (1U << (unsigned short) res))))
61958+ return;
61959+
61960+ if (wanted >= acl->res[res].rlim_cur) {
61961+ unsigned long res_add;
61962+
61963+ res_add = wanted + res_learn_bumps[res];
61964+
61965+ acl->res[res].rlim_cur = res_add;
61966+
61967+ if (wanted > acl->res[res].rlim_max)
61968+ acl->res[res].rlim_max = res_add;
61969+
61970+ /* only log the subject filename, since resource logging is supported for
61971+ single-subject learning only */
61972+ rcu_read_lock();
61973+ cred = __task_cred(task);
61974+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
61975+ task->role->roletype, GR_GLOBAL_UID(cred->uid), GR_GLOBAL_GID(cred->gid), acl->filename,
61976+ acl->filename, acl->res[res].rlim_cur, acl->res[res].rlim_max,
61977+ "", (unsigned long) res, &task->signal->saved_ip);
61978+ rcu_read_unlock();
61979+ }
61980+
61981+ return;
61982+}
61983+EXPORT_SYMBOL(gr_learn_resource);
61984+#endif
61985+
61986+#if defined(CONFIG_PAX_HAVE_ACL_FLAGS) && (defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR))
61987+void
61988+pax_set_initial_flags(struct linux_binprm *bprm)
61989+{
61990+ struct task_struct *task = current;
61991+ struct acl_subject_label *proc;
61992+ unsigned long flags;
61993+
61994+ if (unlikely(!(gr_status & GR_READY)))
61995+ return;
61996+
61997+ flags = pax_get_flags(task);
61998+
61999+ proc = task->acl;
62000+
62001+ if (proc->pax_flags & GR_PAX_DISABLE_PAGEEXEC)
62002+ flags &= ~MF_PAX_PAGEEXEC;
62003+ if (proc->pax_flags & GR_PAX_DISABLE_SEGMEXEC)
62004+ flags &= ~MF_PAX_SEGMEXEC;
62005+ if (proc->pax_flags & GR_PAX_DISABLE_RANDMMAP)
62006+ flags &= ~MF_PAX_RANDMMAP;
62007+ if (proc->pax_flags & GR_PAX_DISABLE_EMUTRAMP)
62008+ flags &= ~MF_PAX_EMUTRAMP;
62009+ if (proc->pax_flags & GR_PAX_DISABLE_MPROTECT)
62010+ flags &= ~MF_PAX_MPROTECT;
62011+
62012+ if (proc->pax_flags & GR_PAX_ENABLE_PAGEEXEC)
62013+ flags |= MF_PAX_PAGEEXEC;
62014+ if (proc->pax_flags & GR_PAX_ENABLE_SEGMEXEC)
62015+ flags |= MF_PAX_SEGMEXEC;
62016+ if (proc->pax_flags & GR_PAX_ENABLE_RANDMMAP)
62017+ flags |= MF_PAX_RANDMMAP;
62018+ if (proc->pax_flags & GR_PAX_ENABLE_EMUTRAMP)
62019+ flags |= MF_PAX_EMUTRAMP;
62020+ if (proc->pax_flags & GR_PAX_ENABLE_MPROTECT)
62021+ flags |= MF_PAX_MPROTECT;
62022+
62023+ pax_set_flags(task, flags);
62024+
62025+ return;
62026+}
62027+#endif
62028+
62029+int
62030+gr_handle_proc_ptrace(struct task_struct *task)
62031+{
62032+ struct file *filp;
62033+ struct task_struct *tmp = task;
62034+ struct task_struct *curtemp = current;
62035+ __u32 retmode;
62036+
62037+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
62038+ if (unlikely(!(gr_status & GR_READY)))
62039+ return 0;
62040+#endif
62041+
62042+ read_lock(&tasklist_lock);
62043+ read_lock(&grsec_exec_file_lock);
62044+ filp = task->exec_file;
62045+
62046+ while (task_pid_nr(tmp) > 0) {
62047+ if (tmp == curtemp)
62048+ break;
62049+ tmp = tmp->real_parent;
62050+ }
62051+
62052+ if (!filp || (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
62053+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE))))) {
62054+ read_unlock(&grsec_exec_file_lock);
62055+ read_unlock(&tasklist_lock);
62056+ return 1;
62057+ }
62058+
62059+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62060+ if (!(gr_status & GR_READY)) {
62061+ read_unlock(&grsec_exec_file_lock);
62062+ read_unlock(&tasklist_lock);
62063+ return 0;
62064+ }
62065+#endif
62066+
62067+ retmode = gr_search_file(filp->f_path.dentry, GR_NOPTRACE, filp->f_path.mnt);
62068+ read_unlock(&grsec_exec_file_lock);
62069+ read_unlock(&tasklist_lock);
62070+
62071+ if (retmode & GR_NOPTRACE)
62072+ return 1;
62073+
62074+ if (!(current->acl->mode & GR_POVERRIDE) && !(current->role->roletype & GR_ROLE_GOD)
62075+ && (current->acl != task->acl || (current->acl != current->role->root_label
62076+ && task_pid_nr(current) != task_pid_nr(task))))
62077+ return 1;
62078+
62079+ return 0;
62080+}
62081+
62082+void task_grsec_rbac(struct seq_file *m, struct task_struct *p)
62083+{
62084+ if (unlikely(!(gr_status & GR_READY)))
62085+ return;
62086+
62087+ if (!(current->role->roletype & GR_ROLE_GOD))
62088+ return;
62089+
62090+ seq_printf(m, "RBAC:\t%.64s:%c:%.950s\n",
62091+ p->role->rolename, gr_task_roletype_to_char(p),
62092+ p->acl->filename);
62093+}
62094+
62095+int
62096+gr_handle_ptrace(struct task_struct *task, const long request)
62097+{
62098+ struct task_struct *tmp = task;
62099+ struct task_struct *curtemp = current;
62100+ __u32 retmode;
62101+
62102+#ifndef CONFIG_GRKERNSEC_HARDEN_PTRACE
62103+ if (unlikely(!(gr_status & GR_READY)))
62104+ return 0;
62105+#endif
62106+ if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
62107+ read_lock(&tasklist_lock);
62108+ while (task_pid_nr(tmp) > 0) {
62109+ if (tmp == curtemp)
62110+ break;
62111+ tmp = tmp->real_parent;
62112+ }
62113+
62114+ if (task_pid_nr(tmp) == 0 && ((grsec_enable_harden_ptrace && gr_is_global_nonroot(current_uid()) && !(gr_status & GR_READY)) ||
62115+ ((gr_status & GR_READY) && !(current->acl->mode & GR_RELAXPTRACE)))) {
62116+ read_unlock(&tasklist_lock);
62117+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62118+ return 1;
62119+ }
62120+ read_unlock(&tasklist_lock);
62121+ }
62122+
62123+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
62124+ if (!(gr_status & GR_READY))
62125+ return 0;
62126+#endif
62127+
62128+ read_lock(&grsec_exec_file_lock);
62129+ if (unlikely(!task->exec_file)) {
62130+ read_unlock(&grsec_exec_file_lock);
62131+ return 0;
62132+ }
62133+
62134+ retmode = gr_search_file(task->exec_file->f_path.dentry, GR_PTRACERD | GR_NOPTRACE, task->exec_file->f_path.mnt);
62135+ read_unlock(&grsec_exec_file_lock);
62136+
62137+ if (retmode & GR_NOPTRACE) {
62138+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62139+ return 1;
62140+ }
62141+
62142+ if (retmode & GR_PTRACERD) {
62143+ switch (request) {
62144+ case PTRACE_SEIZE:
62145+ case PTRACE_POKETEXT:
62146+ case PTRACE_POKEDATA:
62147+ case PTRACE_POKEUSR:
62148+#if !defined(CONFIG_PPC32) && !defined(CONFIG_PPC64) && !defined(CONFIG_PARISC) && !defined(CONFIG_ALPHA) && !defined(CONFIG_IA64)
62149+ case PTRACE_SETREGS:
62150+ case PTRACE_SETFPREGS:
62151+#endif
62152+#ifdef CONFIG_X86
62153+ case PTRACE_SETFPXREGS:
62154+#endif
62155+#ifdef CONFIG_ALTIVEC
62156+ case PTRACE_SETVRREGS:
62157+#endif
62158+ return 1;
62159+ default:
62160+ return 0;
62161+ }
62162+ } else if (!(current->acl->mode & GR_POVERRIDE) &&
62163+ !(current->role->roletype & GR_ROLE_GOD) &&
62164+ (current->acl != task->acl)) {
62165+ gr_log_ptrace(GR_DONT_AUDIT, GR_PTRACE_ACL_MSG, task);
62166+ return 1;
62167+ }
62168+
62169+ return 0;
62170+}
62171+
62172+static int is_writable_mmap(const struct file *filp)
62173+{
62174+ struct task_struct *task = current;
62175+ struct acl_object_label *obj, *obj2;
62176+
62177+ if (gr_status & GR_READY && !(task->acl->mode & GR_OVERRIDE) &&
62178+ !task->is_writable && S_ISREG(filp->f_path.dentry->d_inode->i_mode) && (filp->f_path.mnt != shm_mnt || (filp->f_path.dentry->d_inode->i_nlink > 0))) {
62179+ obj = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt, default_role->root_label);
62180+ obj2 = chk_obj_label(filp->f_path.dentry, filp->f_path.mnt,
62181+ task->role->root_label);
62182+ if (unlikely((obj->mode & GR_WRITE) || (obj2->mode & GR_WRITE))) {
62183+ gr_log_fs_generic(GR_DONT_AUDIT, GR_WRITLIB_ACL_MSG, filp->f_path.dentry, filp->f_path.mnt);
62184+ return 1;
62185+ }
62186+ }
62187+ return 0;
62188+}
62189+
62190+int
62191+gr_acl_handle_mmap(const struct file *file, const unsigned long prot)
62192+{
62193+ __u32 mode;
62194+
62195+ if (unlikely(!file || !(prot & PROT_EXEC)))
62196+ return 1;
62197+
62198+ if (is_writable_mmap(file))
62199+ return 0;
62200+
62201+ mode =
62202+ gr_search_file(file->f_path.dentry,
62203+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62204+ file->f_path.mnt);
62205+
62206+ if (!gr_tpe_allow(file))
62207+ return 0;
62208+
62209+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62210+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62211+ return 0;
62212+ } else if (unlikely(!(mode & GR_EXEC))) {
62213+ return 0;
62214+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62215+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MMAP_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62216+ return 1;
62217+ }
62218+
62219+ return 1;
62220+}
62221+
62222+int
62223+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
62224+{
62225+ __u32 mode;
62226+
62227+ if (unlikely(!file || !(prot & PROT_EXEC)))
62228+ return 1;
62229+
62230+ if (is_writable_mmap(file))
62231+ return 0;
62232+
62233+ mode =
62234+ gr_search_file(file->f_path.dentry,
62235+ GR_EXEC | GR_AUDIT_EXEC | GR_SUPPRESS,
62236+ file->f_path.mnt);
62237+
62238+ if (!gr_tpe_allow(file))
62239+ return 0;
62240+
62241+ if (unlikely(!(mode & GR_EXEC) && !(mode & GR_SUPPRESS))) {
62242+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62243+ return 0;
62244+ } else if (unlikely(!(mode & GR_EXEC))) {
62245+ return 0;
62246+ } else if (unlikely(mode & GR_EXEC && mode & GR_AUDIT_EXEC)) {
62247+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_MPROTECT_ACL_MSG, file->f_path.dentry, file->f_path.mnt);
62248+ return 1;
62249+ }
62250+
62251+ return 1;
62252+}
62253+
62254+void
62255+gr_acl_handle_psacct(struct task_struct *task, const long code)
62256+{
62257+ unsigned long runtime;
62258+ unsigned long cputime;
62259+ unsigned int wday, cday;
62260+ __u8 whr, chr;
62261+ __u8 wmin, cmin;
62262+ __u8 wsec, csec;
62263+ struct timespec timeval;
62264+
62265+ if (unlikely(!(gr_status & GR_READY) || !task->acl ||
62266+ !(task->acl->mode & GR_PROCACCT)))
62267+ return;
62268+
62269+ do_posix_clock_monotonic_gettime(&timeval);
62270+ runtime = timeval.tv_sec - task->start_time.tv_sec;
62271+ wday = runtime / (3600 * 24);
62272+ runtime -= wday * (3600 * 24);
62273+ whr = runtime / 3600;
62274+ runtime -= whr * 3600;
62275+ wmin = runtime / 60;
62276+ runtime -= wmin * 60;
62277+ wsec = runtime;
62278+
62279+ cputime = (task->utime + task->stime) / HZ;
62280+ cday = cputime / (3600 * 24);
62281+ cputime -= cday * (3600 * 24);
62282+ chr = cputime / 3600;
62283+ cputime -= chr * 3600;
62284+ cmin = cputime / 60;
62285+ cputime -= cmin * 60;
62286+ csec = cputime;
62287+
62288+ gr_log_procacct(GR_DO_AUDIT, GR_ACL_PROCACCT_MSG, task, wday, whr, wmin, wsec, cday, chr, cmin, csec, code);
62289+
62290+ return;
62291+}
62292+
62293+void gr_set_kernel_label(struct task_struct *task)
62294+{
62295+ if (gr_status & GR_READY) {
62296+ task->role = kernel_role;
62297+ task->acl = kernel_role->root_label;
62298+ }
62299+ return;
62300+}
62301+
62302+#ifdef CONFIG_TASKSTATS
62303+int gr_is_taskstats_denied(int pid)
62304+{
62305+ struct task_struct *task;
62306+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62307+ const struct cred *cred;
62308+#endif
62309+ int ret = 0;
62310+
62311+ /* restrict taskstats viewing to un-chrooted root users
62312+ who have the 'view' subject flag if the RBAC system is enabled
62313+ */
62314+
62315+ rcu_read_lock();
62316+ read_lock(&tasklist_lock);
62317+ task = find_task_by_vpid(pid);
62318+ if (task) {
62319+#ifdef CONFIG_GRKERNSEC_CHROOT
62320+ if (proc_is_chrooted(task))
62321+ ret = -EACCES;
62322+#endif
62323+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62324+ cred = __task_cred(task);
62325+#ifdef CONFIG_GRKERNSEC_PROC_USER
62326+ if (gr_is_global_nonroot(cred->uid))
62327+ ret = -EACCES;
62328+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
62329+ if (gr_is_global_nonroot(cred->uid) && !groups_search(cred->group_info, grsec_proc_gid))
62330+ ret = -EACCES;
62331+#endif
62332+#endif
62333+ if (gr_status & GR_READY) {
62334+ if (!(task->acl->mode & GR_VIEW))
62335+ ret = -EACCES;
62336+ }
62337+ } else
62338+ ret = -ENOENT;
62339+
62340+ read_unlock(&tasklist_lock);
62341+ rcu_read_unlock();
62342+
62343+ return ret;
62344+}
62345+#endif
62346+
62347+/* AUXV entries are filled via a descendant of search_binary_handler
62348+ after we've already applied the subject for the target
62349+*/
62350+int gr_acl_enable_at_secure(void)
62351+{
62352+ if (unlikely(!(gr_status & GR_READY)))
62353+ return 0;
62354+
62355+ if (current->acl->mode & GR_ATSECURE)
62356+ return 1;
62357+
62358+ return 0;
62359+}
62360+
62361+int gr_acl_handle_filldir(const struct file *file, const char *name, const unsigned int namelen, const ino_t ino)
62362+{
62363+ struct task_struct *task = current;
62364+ struct dentry *dentry = file->f_path.dentry;
62365+ struct vfsmount *mnt = file->f_path.mnt;
62366+ struct acl_object_label *obj, *tmp;
62367+ struct acl_subject_label *subj;
62368+ unsigned int bufsize;
62369+ int is_not_root;
62370+ char *path;
62371+ dev_t dev = __get_dev(dentry);
62372+
62373+ if (unlikely(!(gr_status & GR_READY)))
62374+ return 1;
62375+
62376+ if (task->acl->mode & (GR_LEARN | GR_INHERITLEARN))
62377+ return 1;
62378+
62379+ /* ignore Eric Biederman */
62380+ if (IS_PRIVATE(dentry->d_inode))
62381+ return 1;
62382+
62383+ subj = task->acl;
62384+ read_lock(&gr_inode_lock);
62385+ do {
62386+ obj = lookup_acl_obj_label(ino, dev, subj);
62387+ if (obj != NULL) {
62388+ read_unlock(&gr_inode_lock);
62389+ return (obj->mode & GR_FIND) ? 1 : 0;
62390+ }
62391+ } while ((subj = subj->parent_subject));
62392+ read_unlock(&gr_inode_lock);
62393+
62394+ /* this is purely an optimization since we're looking for an object
62395+ for the directory we're doing a readdir on
62396+ if it's possible for any globbed object to match the entry we're
62397+ filling into the directory, then the object we find here will be
62398+ an anchor point with attached globbed objects
62399+ */
62400+ obj = chk_obj_label_noglob(dentry, mnt, task->acl);
62401+ if (obj->globbed == NULL)
62402+ return (obj->mode & GR_FIND) ? 1 : 0;
62403+
62404+ is_not_root = ((obj->filename[0] == '/') &&
62405+ (obj->filename[1] == '\0')) ? 0 : 1;
62406+ bufsize = PAGE_SIZE - namelen - is_not_root;
62407+
62408+ /* check bufsize > PAGE_SIZE || bufsize == 0 */
62409+ if (unlikely((bufsize - 1) > (PAGE_SIZE - 1)))
62410+ return 1;
62411+
62412+ preempt_disable();
62413+ path = d_real_path(dentry, mnt, per_cpu_ptr(gr_shared_page[0], smp_processor_id()),
62414+ bufsize);
62415+
62416+ bufsize = strlen(path);
62417+
62418+ /* if base is "/", don't append an additional slash */
62419+ if (is_not_root)
62420+ *(path + bufsize) = '/';
62421+ memcpy(path + bufsize + is_not_root, name, namelen);
62422+ *(path + bufsize + namelen + is_not_root) = '\0';
62423+
62424+ tmp = obj->globbed;
62425+ while (tmp) {
62426+ if (!glob_match(tmp->filename, path)) {
62427+ preempt_enable();
62428+ return (tmp->mode & GR_FIND) ? 1 : 0;
62429+ }
62430+ tmp = tmp->next;
62431+ }
62432+ preempt_enable();
62433+ return (obj->mode & GR_FIND) ? 1 : 0;
62434+}
62435+
62436+void gr_put_exec_file(struct task_struct *task)
62437+{
62438+ struct file *filp;
62439+
62440+ write_lock(&grsec_exec_file_lock);
62441+ filp = task->exec_file;
62442+ task->exec_file = NULL;
62443+ write_unlock(&grsec_exec_file_lock);
62444+
62445+ if (filp)
62446+ fput(filp);
62447+
62448+ return;
62449+}
62450+
62451+
62452+#ifdef CONFIG_NETFILTER_XT_MATCH_GRADM_MODULE
62453+EXPORT_SYMBOL(gr_acl_is_enabled);
62454+#endif
62455+EXPORT_SYMBOL(gr_set_kernel_label);
62456+#ifdef CONFIG_SECURITY
62457+EXPORT_SYMBOL(gr_check_user_change);
62458+EXPORT_SYMBOL(gr_check_group_change);
62459+#endif
62460+
62461diff --git a/grsecurity/gracl_alloc.c b/grsecurity/gracl_alloc.c
62462new file mode 100644
62463index 0000000..34fefda
62464--- /dev/null
62465+++ b/grsecurity/gracl_alloc.c
62466@@ -0,0 +1,105 @@
62467+#include <linux/kernel.h>
62468+#include <linux/mm.h>
62469+#include <linux/slab.h>
62470+#include <linux/vmalloc.h>
62471+#include <linux/gracl.h>
62472+#include <linux/grsecurity.h>
62473+
62474+static unsigned long alloc_stack_next = 1;
62475+static unsigned long alloc_stack_size = 1;
62476+static void **alloc_stack;
62477+
62478+static __inline__ int
62479+alloc_pop(void)
62480+{
62481+ if (alloc_stack_next == 1)
62482+ return 0;
62483+
62484+ kfree(alloc_stack[alloc_stack_next - 2]);
62485+
62486+ alloc_stack_next--;
62487+
62488+ return 1;
62489+}
62490+
62491+static __inline__ int
62492+alloc_push(void *buf)
62493+{
62494+ if (alloc_stack_next >= alloc_stack_size)
62495+ return 1;
62496+
62497+ alloc_stack[alloc_stack_next - 1] = buf;
62498+
62499+ alloc_stack_next++;
62500+
62501+ return 0;
62502+}
62503+
62504+void *
62505+acl_alloc(unsigned long len)
62506+{
62507+ void *ret = NULL;
62508+
62509+ if (!len || len > PAGE_SIZE)
62510+ goto out;
62511+
62512+ ret = kmalloc(len, GFP_KERNEL);
62513+
62514+ if (ret) {
62515+ if (alloc_push(ret)) {
62516+ kfree(ret);
62517+ ret = NULL;
62518+ }
62519+ }
62520+
62521+out:
62522+ return ret;
62523+}
62524+
62525+void *
62526+acl_alloc_num(unsigned long num, unsigned long len)
62527+{
62528+ if (!len || (num > (PAGE_SIZE / len)))
62529+ return NULL;
62530+
62531+ return acl_alloc(num * len);
62532+}
62533+
62534+void
62535+acl_free_all(void)
62536+{
62537+ if (gr_acl_is_enabled() || !alloc_stack)
62538+ return;
62539+
62540+ while (alloc_pop()) ;
62541+
62542+ if (alloc_stack) {
62543+ if ((alloc_stack_size * sizeof (void *)) <= PAGE_SIZE)
62544+ kfree(alloc_stack);
62545+ else
62546+ vfree(alloc_stack);
62547+ }
62548+
62549+ alloc_stack = NULL;
62550+ alloc_stack_size = 1;
62551+ alloc_stack_next = 1;
62552+
62553+ return;
62554+}
62555+
62556+int
62557+acl_alloc_stack_init(unsigned long size)
62558+{
62559+ if ((size * sizeof (void *)) <= PAGE_SIZE)
62560+ alloc_stack =
62561+ (void **) kmalloc(size * sizeof (void *), GFP_KERNEL);
62562+ else
62563+ alloc_stack = (void **) vmalloc(size * sizeof (void *));
62564+
62565+ alloc_stack_size = size;
62566+
62567+ if (!alloc_stack)
62568+ return 0;
62569+ else
62570+ return 1;
62571+}
62572diff --git a/grsecurity/gracl_cap.c b/grsecurity/gracl_cap.c
62573new file mode 100644
62574index 0000000..bdd51ea
62575--- /dev/null
62576+++ b/grsecurity/gracl_cap.c
62577@@ -0,0 +1,110 @@
62578+#include <linux/kernel.h>
62579+#include <linux/module.h>
62580+#include <linux/sched.h>
62581+#include <linux/gracl.h>
62582+#include <linux/grsecurity.h>
62583+#include <linux/grinternal.h>
62584+
62585+extern const char *captab_log[];
62586+extern int captab_log_entries;
62587+
62588+int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
62589+{
62590+ struct acl_subject_label *curracl;
62591+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62592+ kernel_cap_t cap_audit = __cap_empty_set;
62593+
62594+ if (!gr_acl_is_enabled())
62595+ return 1;
62596+
62597+ curracl = task->acl;
62598+
62599+ cap_drop = curracl->cap_lower;
62600+ cap_mask = curracl->cap_mask;
62601+ cap_audit = curracl->cap_invert_audit;
62602+
62603+ while ((curracl = curracl->parent_subject)) {
62604+ /* if the cap isn't specified in the current computed mask but is specified in the
62605+ current level subject, and is lowered in the current level subject, then add
62606+ it to the set of dropped capabilities
62607+ otherwise, add the current level subject's mask to the current computed mask
62608+ */
62609+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62610+ cap_raise(cap_mask, cap);
62611+ if (cap_raised(curracl->cap_lower, cap))
62612+ cap_raise(cap_drop, cap);
62613+ if (cap_raised(curracl->cap_invert_audit, cap))
62614+ cap_raise(cap_audit, cap);
62615+ }
62616+ }
62617+
62618+ if (!cap_raised(cap_drop, cap)) {
62619+ if (cap_raised(cap_audit, cap))
62620+ gr_log_cap(GR_DO_AUDIT, GR_CAP_ACL_MSG2, task, captab_log[cap]);
62621+ return 1;
62622+ }
62623+
62624+ curracl = task->acl;
62625+
62626+ if ((curracl->mode & (GR_LEARN | GR_INHERITLEARN))
62627+ && cap_raised(cred->cap_effective, cap)) {
62628+ security_learn(GR_LEARN_AUDIT_MSG, task->role->rolename,
62629+ task->role->roletype, GR_GLOBAL_UID(cred->uid),
62630+ GR_GLOBAL_GID(cred->gid), task->exec_file ?
62631+ gr_to_filename(task->exec_file->f_path.dentry,
62632+ task->exec_file->f_path.mnt) : curracl->filename,
62633+ curracl->filename, 0UL,
62634+ 0UL, "", (unsigned long) cap, &task->signal->saved_ip);
62635+ return 1;
62636+ }
62637+
62638+ if ((cap >= 0) && (cap < captab_log_entries) && cap_raised(cred->cap_effective, cap) && !cap_raised(cap_audit, cap))
62639+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_ACL_MSG, task, captab_log[cap]);
62640+
62641+ return 0;
62642+}
62643+
62644+int
62645+gr_acl_is_capable(const int cap)
62646+{
62647+ return gr_task_acl_is_capable(current, current_cred(), cap);
62648+}
62649+
62650+int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap)
62651+{
62652+ struct acl_subject_label *curracl;
62653+ kernel_cap_t cap_drop = __cap_empty_set, cap_mask = __cap_empty_set;
62654+
62655+ if (!gr_acl_is_enabled())
62656+ return 1;
62657+
62658+ curracl = task->acl;
62659+
62660+ cap_drop = curracl->cap_lower;
62661+ cap_mask = curracl->cap_mask;
62662+
62663+ while ((curracl = curracl->parent_subject)) {
62664+ /* if the cap isn't specified in the current computed mask but is specified in the
62665+ current level subject, and is lowered in the current level subject, then add
62666+ it to the set of dropped capabilities
62667+ otherwise, add the current level subject's mask to the current computed mask
62668+ */
62669+ if (!cap_raised(cap_mask, cap) && cap_raised(curracl->cap_mask, cap)) {
62670+ cap_raise(cap_mask, cap);
62671+ if (cap_raised(curracl->cap_lower, cap))
62672+ cap_raise(cap_drop, cap);
62673+ }
62674+ }
62675+
62676+ if (!cap_raised(cap_drop, cap))
62677+ return 1;
62678+
62679+ return 0;
62680+}
62681+
62682+int
62683+gr_acl_is_capable_nolog(const int cap)
62684+{
62685+ return gr_task_acl_is_capable_nolog(current, cap);
62686+}
62687+
62688diff --git a/grsecurity/gracl_fs.c b/grsecurity/gracl_fs.c
62689new file mode 100644
62690index 0000000..a340c17
62691--- /dev/null
62692+++ b/grsecurity/gracl_fs.c
62693@@ -0,0 +1,431 @@
62694+#include <linux/kernel.h>
62695+#include <linux/sched.h>
62696+#include <linux/types.h>
62697+#include <linux/fs.h>
62698+#include <linux/file.h>
62699+#include <linux/stat.h>
62700+#include <linux/grsecurity.h>
62701+#include <linux/grinternal.h>
62702+#include <linux/gracl.h>
62703+
62704+umode_t
62705+gr_acl_umask(void)
62706+{
62707+ if (unlikely(!gr_acl_is_enabled()))
62708+ return 0;
62709+
62710+ return current->role->umask;
62711+}
62712+
62713+__u32
62714+gr_acl_handle_hidden_file(const struct dentry * dentry,
62715+ const struct vfsmount * mnt)
62716+{
62717+ __u32 mode;
62718+
62719+ if (unlikely(!dentry->d_inode))
62720+ return GR_FIND;
62721+
62722+ mode =
62723+ gr_search_file(dentry, GR_FIND | GR_AUDIT_FIND | GR_SUPPRESS, mnt);
62724+
62725+ if (unlikely(mode & GR_FIND && mode & GR_AUDIT_FIND)) {
62726+ gr_log_fs_rbac_generic(GR_DO_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62727+ return mode;
62728+ } else if (unlikely(!(mode & GR_FIND) && !(mode & GR_SUPPRESS))) {
62729+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, GR_HIDDEN_ACL_MSG, dentry, mnt);
62730+ return 0;
62731+ } else if (unlikely(!(mode & GR_FIND)))
62732+ return 0;
62733+
62734+ return GR_FIND;
62735+}
62736+
62737+__u32
62738+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
62739+ int acc_mode)
62740+{
62741+ __u32 reqmode = GR_FIND;
62742+ __u32 mode;
62743+
62744+ if (unlikely(!dentry->d_inode))
62745+ return reqmode;
62746+
62747+ if (acc_mode & MAY_APPEND)
62748+ reqmode |= GR_APPEND;
62749+ else if (acc_mode & MAY_WRITE)
62750+ reqmode |= GR_WRITE;
62751+ if ((acc_mode & MAY_READ) && !S_ISDIR(dentry->d_inode->i_mode))
62752+ reqmode |= GR_READ;
62753+
62754+ mode =
62755+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62756+ mnt);
62757+
62758+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62759+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62760+ reqmode & GR_READ ? " reading" : "",
62761+ reqmode & GR_WRITE ? " writing" : reqmode &
62762+ GR_APPEND ? " appending" : "");
62763+ return reqmode;
62764+ } else
62765+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62766+ {
62767+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_OPEN_ACL_MSG, dentry, mnt,
62768+ reqmode & GR_READ ? " reading" : "",
62769+ reqmode & GR_WRITE ? " writing" : reqmode &
62770+ GR_APPEND ? " appending" : "");
62771+ return 0;
62772+ } else if (unlikely((mode & reqmode) != reqmode))
62773+ return 0;
62774+
62775+ return reqmode;
62776+}
62777+
62778+__u32
62779+gr_acl_handle_creat(const struct dentry * dentry,
62780+ const struct dentry * p_dentry,
62781+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
62782+ const int imode)
62783+{
62784+ __u32 reqmode = GR_WRITE | GR_CREATE;
62785+ __u32 mode;
62786+
62787+ if (acc_mode & MAY_APPEND)
62788+ reqmode |= GR_APPEND;
62789+ // if a directory was required or the directory already exists, then
62790+ // don't count this open as a read
62791+ if ((acc_mode & MAY_READ) &&
62792+ !((open_flags & O_DIRECTORY) || (dentry->d_inode && S_ISDIR(dentry->d_inode->i_mode))))
62793+ reqmode |= GR_READ;
62794+ if ((open_flags & O_CREAT) &&
62795+ ((imode & S_ISUID) || ((imode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
62796+ reqmode |= GR_SETID;
62797+
62798+ mode =
62799+ gr_check_create(dentry, p_dentry, p_mnt,
62800+ reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
62801+
62802+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62803+ gr_log_fs_rbac_mode2(GR_DO_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62804+ reqmode & GR_READ ? " reading" : "",
62805+ reqmode & GR_WRITE ? " writing" : reqmode &
62806+ GR_APPEND ? " appending" : "");
62807+ return reqmode;
62808+ } else
62809+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62810+ {
62811+ gr_log_fs_rbac_mode2(GR_DONT_AUDIT, GR_CREATE_ACL_MSG, dentry, p_mnt,
62812+ reqmode & GR_READ ? " reading" : "",
62813+ reqmode & GR_WRITE ? " writing" : reqmode &
62814+ GR_APPEND ? " appending" : "");
62815+ return 0;
62816+ } else if (unlikely((mode & reqmode) != reqmode))
62817+ return 0;
62818+
62819+ return reqmode;
62820+}
62821+
62822+__u32
62823+gr_acl_handle_access(const struct dentry * dentry, const struct vfsmount * mnt,
62824+ const int fmode)
62825+{
62826+ __u32 mode, reqmode = GR_FIND;
62827+
62828+ if ((fmode & S_IXOTH) && !S_ISDIR(dentry->d_inode->i_mode))
62829+ reqmode |= GR_EXEC;
62830+ if (fmode & S_IWOTH)
62831+ reqmode |= GR_WRITE;
62832+ if (fmode & S_IROTH)
62833+ reqmode |= GR_READ;
62834+
62835+ mode =
62836+ gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS,
62837+ mnt);
62838+
62839+ if (unlikely(((mode & reqmode) == reqmode) && mode & GR_AUDITS)) {
62840+ gr_log_fs_rbac_mode3(GR_DO_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62841+ reqmode & GR_READ ? " reading" : "",
62842+ reqmode & GR_WRITE ? " writing" : "",
62843+ reqmode & GR_EXEC ? " executing" : "");
62844+ return reqmode;
62845+ } else
62846+ if (unlikely((mode & reqmode) != reqmode && !(mode & GR_SUPPRESS)))
62847+ {
62848+ gr_log_fs_rbac_mode3(GR_DONT_AUDIT, GR_ACCESS_ACL_MSG, dentry, mnt,
62849+ reqmode & GR_READ ? " reading" : "",
62850+ reqmode & GR_WRITE ? " writing" : "",
62851+ reqmode & GR_EXEC ? " executing" : "");
62852+ return 0;
62853+ } else if (unlikely((mode & reqmode) != reqmode))
62854+ return 0;
62855+
62856+ return reqmode;
62857+}
62858+
62859+static __u32 generic_fs_handler(const struct dentry *dentry, const struct vfsmount *mnt, __u32 reqmode, const char *fmt)
62860+{
62861+ __u32 mode;
62862+
62863+ mode = gr_search_file(dentry, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS, mnt);
62864+
62865+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
62866+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, dentry, mnt);
62867+ return mode;
62868+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
62869+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, dentry, mnt);
62870+ return 0;
62871+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
62872+ return 0;
62873+
62874+ return (reqmode);
62875+}
62876+
62877+__u32
62878+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
62879+{
62880+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_RMDIR_ACL_MSG);
62881+}
62882+
62883+__u32
62884+gr_acl_handle_unlink(const struct dentry *dentry, const struct vfsmount *mnt)
62885+{
62886+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_DELETE , GR_UNLINK_ACL_MSG);
62887+}
62888+
62889+__u32
62890+gr_acl_handle_truncate(const struct dentry *dentry, const struct vfsmount *mnt)
62891+{
62892+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_TRUNCATE_ACL_MSG);
62893+}
62894+
62895+__u32
62896+gr_acl_handle_utime(const struct dentry *dentry, const struct vfsmount *mnt)
62897+{
62898+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_ATIME_ACL_MSG);
62899+}
62900+
62901+__u32
62902+gr_acl_handle_chmod(const struct dentry *dentry, const struct vfsmount *mnt,
62903+ umode_t *modeptr)
62904+{
62905+ umode_t mode;
62906+
62907+ *modeptr &= ~gr_acl_umask();
62908+ mode = *modeptr;
62909+
62910+ if (unlikely(dentry->d_inode && S_ISSOCK(dentry->d_inode->i_mode)))
62911+ return 1;
62912+
62913+ if (unlikely(dentry->d_inode && !S_ISDIR(dentry->d_inode->i_mode) &&
62914+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))) {
62915+ return generic_fs_handler(dentry, mnt, GR_WRITE | GR_SETID,
62916+ GR_CHMOD_ACL_MSG);
62917+ } else {
62918+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHMOD_ACL_MSG);
62919+ }
62920+}
62921+
62922+__u32
62923+gr_acl_handle_chown(const struct dentry *dentry, const struct vfsmount *mnt)
62924+{
62925+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_CHOWN_ACL_MSG);
62926+}
62927+
62928+__u32
62929+gr_acl_handle_setxattr(const struct dentry *dentry, const struct vfsmount *mnt)
62930+{
62931+ return generic_fs_handler(dentry, mnt, GR_WRITE, GR_SETXATTR_ACL_MSG);
62932+}
62933+
62934+__u32
62935+gr_acl_handle_execve(const struct dentry *dentry, const struct vfsmount *mnt)
62936+{
62937+ return generic_fs_handler(dentry, mnt, GR_EXEC, GR_EXEC_ACL_MSG);
62938+}
62939+
62940+__u32
62941+gr_acl_handle_unix(const struct dentry *dentry, const struct vfsmount *mnt)
62942+{
62943+ return generic_fs_handler(dentry, mnt, GR_READ | GR_WRITE,
62944+ GR_UNIXCONNECT_ACL_MSG);
62945+}
62946+
62947+/* hardlinks require at minimum create and link permission,
62948+ any additional privilege required is based on the
62949+ privilege of the file being linked to
62950+*/
62951+__u32
62952+gr_acl_handle_link(const struct dentry * new_dentry,
62953+ const struct dentry * parent_dentry,
62954+ const struct vfsmount * parent_mnt,
62955+ const struct dentry * old_dentry,
62956+ const struct vfsmount * old_mnt, const struct filename *to)
62957+{
62958+ __u32 mode;
62959+ __u32 needmode = GR_CREATE | GR_LINK;
62960+ __u32 needaudit = GR_AUDIT_CREATE | GR_AUDIT_LINK;
62961+
62962+ mode =
62963+ gr_check_link(new_dentry, parent_dentry, parent_mnt, old_dentry,
62964+ old_mnt);
62965+
62966+ if (unlikely(((mode & needmode) == needmode) && (mode & needaudit))) {
62967+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62968+ return mode;
62969+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62970+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_LINK_ACL_MSG, old_dentry, old_mnt, to->name);
62971+ return 0;
62972+ } else if (unlikely((mode & needmode) != needmode))
62973+ return 0;
62974+
62975+ return 1;
62976+}
62977+
62978+__u32
62979+gr_acl_handle_symlink(const struct dentry * new_dentry,
62980+ const struct dentry * parent_dentry,
62981+ const struct vfsmount * parent_mnt, const struct filename *from)
62982+{
62983+ __u32 needmode = GR_WRITE | GR_CREATE;
62984+ __u32 mode;
62985+
62986+ mode =
62987+ gr_check_create(new_dentry, parent_dentry, parent_mnt,
62988+ GR_CREATE | GR_AUDIT_CREATE |
62989+ GR_WRITE | GR_AUDIT_WRITE | GR_SUPPRESS);
62990+
62991+ if (unlikely(mode & GR_WRITE && mode & GR_AUDITS)) {
62992+ gr_log_fs_str_rbac(GR_DO_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62993+ return mode;
62994+ } else if (unlikely(((mode & needmode) != needmode) && !(mode & GR_SUPPRESS))) {
62995+ gr_log_fs_str_rbac(GR_DONT_AUDIT, GR_SYMLINK_ACL_MSG, from->name, new_dentry, parent_mnt);
62996+ return 0;
62997+ } else if (unlikely((mode & needmode) != needmode))
62998+ return 0;
62999+
63000+ return (GR_WRITE | GR_CREATE);
63001+}
63002+
63003+static __u32 generic_fs_create_handler(const struct dentry *new_dentry, const struct dentry *parent_dentry, const struct vfsmount *parent_mnt, __u32 reqmode, const char *fmt)
63004+{
63005+ __u32 mode;
63006+
63007+ mode = gr_check_create(new_dentry, parent_dentry, parent_mnt, reqmode | to_gr_audit(reqmode) | GR_SUPPRESS);
63008+
63009+ if (unlikely(((mode & (reqmode)) == (reqmode)) && mode & GR_AUDITS)) {
63010+ gr_log_fs_rbac_generic(GR_DO_AUDIT, fmt, new_dentry, parent_mnt);
63011+ return mode;
63012+ } else if (unlikely((mode & (reqmode)) != (reqmode) && !(mode & GR_SUPPRESS))) {
63013+ gr_log_fs_rbac_generic(GR_DONT_AUDIT, fmt, new_dentry, parent_mnt);
63014+ return 0;
63015+ } else if (unlikely((mode & (reqmode)) != (reqmode)))
63016+ return 0;
63017+
63018+ return (reqmode);
63019+}
63020+
63021+__u32
63022+gr_acl_handle_mknod(const struct dentry * new_dentry,
63023+ const struct dentry * parent_dentry,
63024+ const struct vfsmount * parent_mnt,
63025+ const int mode)
63026+{
63027+ __u32 reqmode = GR_WRITE | GR_CREATE;
63028+ if (unlikely((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))))
63029+ reqmode |= GR_SETID;
63030+
63031+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
63032+ reqmode, GR_MKNOD_ACL_MSG);
63033+}
63034+
63035+__u32
63036+gr_acl_handle_mkdir(const struct dentry *new_dentry,
63037+ const struct dentry *parent_dentry,
63038+ const struct vfsmount *parent_mnt)
63039+{
63040+ return generic_fs_create_handler(new_dentry, parent_dentry, parent_mnt,
63041+ GR_WRITE | GR_CREATE, GR_MKDIR_ACL_MSG);
63042+}
63043+
63044+#define RENAME_CHECK_SUCCESS(old, new) \
63045+ (((old & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)) && \
63046+ ((new & (GR_WRITE | GR_READ)) == (GR_WRITE | GR_READ)))
63047+
63048+int
63049+gr_acl_handle_rename(struct dentry *new_dentry,
63050+ struct dentry *parent_dentry,
63051+ const struct vfsmount *parent_mnt,
63052+ struct dentry *old_dentry,
63053+ struct inode *old_parent_inode,
63054+ struct vfsmount *old_mnt, const struct filename *newname)
63055+{
63056+ __u32 comp1, comp2;
63057+ int error = 0;
63058+
63059+ if (unlikely(!gr_acl_is_enabled()))
63060+ return 0;
63061+
63062+ if (!new_dentry->d_inode) {
63063+ comp1 = gr_check_create(new_dentry, parent_dentry, parent_mnt,
63064+ GR_READ | GR_WRITE | GR_CREATE | GR_AUDIT_READ |
63065+ GR_AUDIT_WRITE | GR_AUDIT_CREATE | GR_SUPPRESS);
63066+ comp2 = gr_search_file(old_dentry, GR_READ | GR_WRITE |
63067+ GR_DELETE | GR_AUDIT_DELETE |
63068+ GR_AUDIT_READ | GR_AUDIT_WRITE |
63069+ GR_SUPPRESS, old_mnt);
63070+ } else {
63071+ comp1 = gr_search_file(new_dentry, GR_READ | GR_WRITE |
63072+ GR_CREATE | GR_DELETE |
63073+ GR_AUDIT_CREATE | GR_AUDIT_DELETE |
63074+ GR_AUDIT_READ | GR_AUDIT_WRITE |
63075+ GR_SUPPRESS, parent_mnt);
63076+ comp2 =
63077+ gr_search_file(old_dentry,
63078+ GR_READ | GR_WRITE | GR_AUDIT_READ |
63079+ GR_DELETE | GR_AUDIT_DELETE |
63080+ GR_AUDIT_WRITE | GR_SUPPRESS, old_mnt);
63081+ }
63082+
63083+ if (RENAME_CHECK_SUCCESS(comp1, comp2) &&
63084+ ((comp1 & GR_AUDITS) || (comp2 & GR_AUDITS)))
63085+ gr_log_fs_rbac_str(GR_DO_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
63086+ else if (!RENAME_CHECK_SUCCESS(comp1, comp2) && !(comp1 & GR_SUPPRESS)
63087+ && !(comp2 & GR_SUPPRESS)) {
63088+ gr_log_fs_rbac_str(GR_DONT_AUDIT, GR_RENAME_ACL_MSG, old_dentry, old_mnt, newname->name);
63089+ error = -EACCES;
63090+ } else if (unlikely(!RENAME_CHECK_SUCCESS(comp1, comp2)))
63091+ error = -EACCES;
63092+
63093+ return error;
63094+}
63095+
63096+void
63097+gr_acl_handle_exit(void)
63098+{
63099+ u16 id;
63100+ char *rolename;
63101+
63102+ if (unlikely(current->acl_sp_role && gr_acl_is_enabled() &&
63103+ !(current->role->roletype & GR_ROLE_PERSIST))) {
63104+ id = current->acl_role_id;
63105+ rolename = current->role->rolename;
63106+ gr_set_acls(1);
63107+ gr_log_str_int(GR_DONT_AUDIT_GOOD, GR_SPROLEL_ACL_MSG, rolename, id);
63108+ }
63109+
63110+ gr_put_exec_file(current);
63111+ return;
63112+}
63113+
63114+int
63115+gr_acl_handle_procpidmem(const struct task_struct *task)
63116+{
63117+ if (unlikely(!gr_acl_is_enabled()))
63118+ return 0;
63119+
63120+ if (task != current && task->acl->mode & GR_PROTPROCFD)
63121+ return -EACCES;
63122+
63123+ return 0;
63124+}
63125diff --git a/grsecurity/gracl_ip.c b/grsecurity/gracl_ip.c
63126new file mode 100644
63127index 0000000..8132048
63128--- /dev/null
63129+++ b/grsecurity/gracl_ip.c
63130@@ -0,0 +1,387 @@
63131+#include <linux/kernel.h>
63132+#include <asm/uaccess.h>
63133+#include <asm/errno.h>
63134+#include <net/sock.h>
63135+#include <linux/file.h>
63136+#include <linux/fs.h>
63137+#include <linux/net.h>
63138+#include <linux/in.h>
63139+#include <linux/skbuff.h>
63140+#include <linux/ip.h>
63141+#include <linux/udp.h>
63142+#include <linux/types.h>
63143+#include <linux/sched.h>
63144+#include <linux/netdevice.h>
63145+#include <linux/inetdevice.h>
63146+#include <linux/gracl.h>
63147+#include <linux/grsecurity.h>
63148+#include <linux/grinternal.h>
63149+
63150+#define GR_BIND 0x01
63151+#define GR_CONNECT 0x02
63152+#define GR_INVERT 0x04
63153+#define GR_BINDOVERRIDE 0x08
63154+#define GR_CONNECTOVERRIDE 0x10
63155+#define GR_SOCK_FAMILY 0x20
63156+
63157+static const char * gr_protocols[IPPROTO_MAX] = {
63158+ "ip", "icmp", "igmp", "ggp", "ipencap", "st", "tcp", "cbt",
63159+ "egp", "igp", "bbn-rcc", "nvp", "pup", "argus", "emcon", "xnet",
63160+ "chaos", "udp", "mux", "dcn", "hmp", "prm", "xns-idp", "trunk-1",
63161+ "trunk-2", "leaf-1", "leaf-2", "rdp", "irtp", "iso-tp4", "netblt", "mfe-nsp",
63162+ "merit-inp", "sep", "3pc", "idpr", "xtp", "ddp", "idpr-cmtp", "tp++",
63163+ "il", "ipv6", "sdrp", "ipv6-route", "ipv6-frag", "idrp", "rsvp", "gre",
63164+ "mhrp", "bna", "ipv6-crypt", "ipv6-auth", "i-nlsp", "swipe", "narp", "mobile",
63165+ "tlsp", "skip", "ipv6-icmp", "ipv6-nonxt", "ipv6-opts", "unknown:61", "cftp", "unknown:63",
63166+ "sat-expak", "kryptolan", "rvd", "ippc", "unknown:68", "sat-mon", "visa", "ipcv",
63167+ "cpnx", "cphb", "wsn", "pvp", "br-sat-mon", "sun-nd", "wb-mon", "wb-expak",
63168+ "iso-ip", "vmtp", "secure-vmtp", "vines", "ttp", "nfsnet-igp", "dgp", "tcf",
63169+ "eigrp", "ospf", "sprite-rpc", "larp", "mtp", "ax.25", "ipip", "micp",
63170+ "scc-sp", "etherip", "encap", "unknown:99", "gmtp", "ifmp", "pnni", "pim",
63171+ "aris", "scps", "qnx", "a/n", "ipcomp", "snp", "compaq-peer", "ipx-in-ip",
63172+ "vrrp", "pgm", "unknown:114", "l2tp", "ddx", "iatp", "stp", "srp",
63173+ "uti", "smp", "sm", "ptp", "isis", "fire", "crtp", "crdup",
63174+ "sscopmce", "iplt", "sps", "pipe", "sctp", "fc", "unkown:134", "unknown:135",
63175+ "unknown:136", "unknown:137", "unknown:138", "unknown:139", "unknown:140", "unknown:141", "unknown:142", "unknown:143",
63176+ "unknown:144", "unknown:145", "unknown:146", "unknown:147", "unknown:148", "unknown:149", "unknown:150", "unknown:151",
63177+ "unknown:152", "unknown:153", "unknown:154", "unknown:155", "unknown:156", "unknown:157", "unknown:158", "unknown:159",
63178+ "unknown:160", "unknown:161", "unknown:162", "unknown:163", "unknown:164", "unknown:165", "unknown:166", "unknown:167",
63179+ "unknown:168", "unknown:169", "unknown:170", "unknown:171", "unknown:172", "unknown:173", "unknown:174", "unknown:175",
63180+ "unknown:176", "unknown:177", "unknown:178", "unknown:179", "unknown:180", "unknown:181", "unknown:182", "unknown:183",
63181+ "unknown:184", "unknown:185", "unknown:186", "unknown:187", "unknown:188", "unknown:189", "unknown:190", "unknown:191",
63182+ "unknown:192", "unknown:193", "unknown:194", "unknown:195", "unknown:196", "unknown:197", "unknown:198", "unknown:199",
63183+ "unknown:200", "unknown:201", "unknown:202", "unknown:203", "unknown:204", "unknown:205", "unknown:206", "unknown:207",
63184+ "unknown:208", "unknown:209", "unknown:210", "unknown:211", "unknown:212", "unknown:213", "unknown:214", "unknown:215",
63185+ "unknown:216", "unknown:217", "unknown:218", "unknown:219", "unknown:220", "unknown:221", "unknown:222", "unknown:223",
63186+ "unknown:224", "unknown:225", "unknown:226", "unknown:227", "unknown:228", "unknown:229", "unknown:230", "unknown:231",
63187+ "unknown:232", "unknown:233", "unknown:234", "unknown:235", "unknown:236", "unknown:237", "unknown:238", "unknown:239",
63188+ "unknown:240", "unknown:241", "unknown:242", "unknown:243", "unknown:244", "unknown:245", "unknown:246", "unknown:247",
63189+ "unknown:248", "unknown:249", "unknown:250", "unknown:251", "unknown:252", "unknown:253", "unknown:254", "unknown:255",
63190+ };
63191+
63192+static const char * gr_socktypes[SOCK_MAX] = {
63193+ "unknown:0", "stream", "dgram", "raw", "rdm", "seqpacket", "unknown:6",
63194+ "unknown:7", "unknown:8", "unknown:9", "packet"
63195+ };
63196+
63197+static const char * gr_sockfamilies[AF_MAX+1] = {
63198+ "unspec", "unix", "inet", "ax25", "ipx", "appletalk", "netrom", "bridge", "atmpvc", "x25",
63199+ "inet6", "rose", "decnet", "netbeui", "security", "key", "netlink", "packet", "ash",
63200+ "econet", "atmsvc", "rds", "sna", "irda", "ppox", "wanpipe", "llc", "fam_27", "fam_28",
63201+ "tipc", "bluetooth", "iucv", "rxrpc", "isdn", "phonet", "ieee802154", "ciaf"
63202+ };
63203+
63204+const char *
63205+gr_proto_to_name(unsigned char proto)
63206+{
63207+ return gr_protocols[proto];
63208+}
63209+
63210+const char *
63211+gr_socktype_to_name(unsigned char type)
63212+{
63213+ return gr_socktypes[type];
63214+}
63215+
63216+const char *
63217+gr_sockfamily_to_name(unsigned char family)
63218+{
63219+ return gr_sockfamilies[family];
63220+}
63221+
63222+int
63223+gr_search_socket(const int domain, const int type, const int protocol)
63224+{
63225+ struct acl_subject_label *curr;
63226+ const struct cred *cred = current_cred();
63227+
63228+ if (unlikely(!gr_acl_is_enabled()))
63229+ goto exit;
63230+
63231+ if ((domain < 0) || (type < 0) || (protocol < 0) ||
63232+ (domain >= AF_MAX) || (type >= SOCK_MAX) || (protocol >= IPPROTO_MAX))
63233+ goto exit; // let the kernel handle it
63234+
63235+ curr = current->acl;
63236+
63237+ if (curr->sock_families[domain / 32] & (1U << (domain % 32))) {
63238+ /* the family is allowed, if this is PF_INET allow it only if
63239+ the extra sock type/protocol checks pass */
63240+ if (domain == PF_INET)
63241+ goto inet_check;
63242+ goto exit;
63243+ } else {
63244+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63245+ __u32 fakeip = 0;
63246+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63247+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63248+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63249+ gr_to_filename(current->exec_file->f_path.dentry,
63250+ current->exec_file->f_path.mnt) :
63251+ curr->filename, curr->filename,
63252+ &fakeip, domain, 0, 0, GR_SOCK_FAMILY,
63253+ &current->signal->saved_ip);
63254+ goto exit;
63255+ }
63256+ goto exit_fail;
63257+ }
63258+
63259+inet_check:
63260+ /* the rest of this checking is for IPv4 only */
63261+ if (!curr->ips)
63262+ goto exit;
63263+
63264+ if ((curr->ip_type & (1U << type)) &&
63265+ (curr->ip_proto[protocol / 32] & (1U << (protocol % 32))))
63266+ goto exit;
63267+
63268+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63269+ /* we don't place acls on raw sockets , and sometimes
63270+ dgram/ip sockets are opened for ioctl and not
63271+ bind/connect, so we'll fake a bind learn log */
63272+ if (type == SOCK_RAW || type == SOCK_PACKET) {
63273+ __u32 fakeip = 0;
63274+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63275+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63276+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63277+ gr_to_filename(current->exec_file->f_path.dentry,
63278+ current->exec_file->f_path.mnt) :
63279+ curr->filename, curr->filename,
63280+ &fakeip, 0, type,
63281+ protocol, GR_CONNECT, &current->signal->saved_ip);
63282+ } else if ((type == SOCK_DGRAM) && (protocol == IPPROTO_IP)) {
63283+ __u32 fakeip = 0;
63284+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63285+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63286+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63287+ gr_to_filename(current->exec_file->f_path.dentry,
63288+ current->exec_file->f_path.mnt) :
63289+ curr->filename, curr->filename,
63290+ &fakeip, 0, type,
63291+ protocol, GR_BIND, &current->signal->saved_ip);
63292+ }
63293+ /* we'll log when they use connect or bind */
63294+ goto exit;
63295+ }
63296+
63297+exit_fail:
63298+ if (domain == PF_INET)
63299+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(domain),
63300+ gr_socktype_to_name(type), gr_proto_to_name(protocol));
63301+ else
63302+#ifndef CONFIG_IPV6
63303+ if (domain != PF_INET6)
63304+#endif
63305+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(domain),
63306+ gr_socktype_to_name(type), protocol);
63307+
63308+ return 0;
63309+exit:
63310+ return 1;
63311+}
63312+
63313+int check_ip_policy(struct acl_ip_label *ip, __u32 ip_addr, __u16 ip_port, __u8 protocol, const int mode, const int type, __u32 our_addr, __u32 our_netmask)
63314+{
63315+ if ((ip->mode & mode) &&
63316+ (ip_port >= ip->low) &&
63317+ (ip_port <= ip->high) &&
63318+ ((ntohl(ip_addr) & our_netmask) ==
63319+ (ntohl(our_addr) & our_netmask))
63320+ && (ip->proto[protocol / 32] & (1U << (protocol % 32)))
63321+ && (ip->type & (1U << type))) {
63322+ if (ip->mode & GR_INVERT)
63323+ return 2; // specifically denied
63324+ else
63325+ return 1; // allowed
63326+ }
63327+
63328+ return 0; // not specifically allowed, may continue parsing
63329+}
63330+
63331+static int
63332+gr_search_connectbind(const int full_mode, struct sock *sk,
63333+ struct sockaddr_in *addr, const int type)
63334+{
63335+ char iface[IFNAMSIZ] = {0};
63336+ struct acl_subject_label *curr;
63337+ struct acl_ip_label *ip;
63338+ struct inet_sock *isk;
63339+ struct net_device *dev;
63340+ struct in_device *idev;
63341+ unsigned long i;
63342+ int ret;
63343+ int mode = full_mode & (GR_BIND | GR_CONNECT);
63344+ __u32 ip_addr = 0;
63345+ __u32 our_addr;
63346+ __u32 our_netmask;
63347+ char *p;
63348+ __u16 ip_port = 0;
63349+ const struct cred *cred = current_cred();
63350+
63351+ if (unlikely(!gr_acl_is_enabled() || sk->sk_family != PF_INET))
63352+ return 0;
63353+
63354+ curr = current->acl;
63355+ isk = inet_sk(sk);
63356+
63357+ /* INADDR_ANY overriding for binds, inaddr_any_override is already in network order */
63358+ if ((full_mode & GR_BINDOVERRIDE) && addr->sin_addr.s_addr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0)
63359+ addr->sin_addr.s_addr = curr->inaddr_any_override;
63360+ if ((full_mode & GR_CONNECT) && isk->inet_saddr == htonl(INADDR_ANY) && curr->inaddr_any_override != 0) {
63361+ struct sockaddr_in saddr;
63362+ int err;
63363+
63364+ saddr.sin_family = AF_INET;
63365+ saddr.sin_addr.s_addr = curr->inaddr_any_override;
63366+ saddr.sin_port = isk->inet_sport;
63367+
63368+ err = security_socket_bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63369+ if (err)
63370+ return err;
63371+
63372+ err = sk->sk_socket->ops->bind(sk->sk_socket, (struct sockaddr *)&saddr, sizeof(struct sockaddr_in));
63373+ if (err)
63374+ return err;
63375+ }
63376+
63377+ if (!curr->ips)
63378+ return 0;
63379+
63380+ ip_addr = addr->sin_addr.s_addr;
63381+ ip_port = ntohs(addr->sin_port);
63382+
63383+ if (curr->mode & (GR_LEARN | GR_INHERITLEARN)) {
63384+ security_learn(GR_IP_LEARN_MSG, current->role->rolename,
63385+ current->role->roletype, GR_GLOBAL_UID(cred->uid),
63386+ GR_GLOBAL_GID(cred->gid), current->exec_file ?
63387+ gr_to_filename(current->exec_file->f_path.dentry,
63388+ current->exec_file->f_path.mnt) :
63389+ curr->filename, curr->filename,
63390+ &ip_addr, ip_port, type,
63391+ sk->sk_protocol, mode, &current->signal->saved_ip);
63392+ return 0;
63393+ }
63394+
63395+ for (i = 0; i < curr->ip_num; i++) {
63396+ ip = *(curr->ips + i);
63397+ if (ip->iface != NULL) {
63398+ strncpy(iface, ip->iface, IFNAMSIZ - 1);
63399+ p = strchr(iface, ':');
63400+ if (p != NULL)
63401+ *p = '\0';
63402+ dev = dev_get_by_name(sock_net(sk), iface);
63403+ if (dev == NULL)
63404+ continue;
63405+ idev = in_dev_get(dev);
63406+ if (idev == NULL) {
63407+ dev_put(dev);
63408+ continue;
63409+ }
63410+ rcu_read_lock();
63411+ for_ifa(idev) {
63412+ if (!strcmp(ip->iface, ifa->ifa_label)) {
63413+ our_addr = ifa->ifa_address;
63414+ our_netmask = 0xffffffff;
63415+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63416+ if (ret == 1) {
63417+ rcu_read_unlock();
63418+ in_dev_put(idev);
63419+ dev_put(dev);
63420+ return 0;
63421+ } else if (ret == 2) {
63422+ rcu_read_unlock();
63423+ in_dev_put(idev);
63424+ dev_put(dev);
63425+ goto denied;
63426+ }
63427+ }
63428+ } endfor_ifa(idev);
63429+ rcu_read_unlock();
63430+ in_dev_put(idev);
63431+ dev_put(dev);
63432+ } else {
63433+ our_addr = ip->addr;
63434+ our_netmask = ip->netmask;
63435+ ret = check_ip_policy(ip, ip_addr, ip_port, sk->sk_protocol, mode, type, our_addr, our_netmask);
63436+ if (ret == 1)
63437+ return 0;
63438+ else if (ret == 2)
63439+ goto denied;
63440+ }
63441+ }
63442+
63443+denied:
63444+ if (mode == GR_BIND)
63445+ gr_log_int5_str2(GR_DONT_AUDIT, GR_BIND_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63446+ else if (mode == GR_CONNECT)
63447+ gr_log_int5_str2(GR_DONT_AUDIT, GR_CONNECT_ACL_MSG, &ip_addr, ip_port, gr_socktype_to_name(type), gr_proto_to_name(sk->sk_protocol));
63448+
63449+ return -EACCES;
63450+}
63451+
63452+int
63453+gr_search_connect(struct socket *sock, struct sockaddr_in *addr)
63454+{
63455+ /* always allow disconnection of dgram sockets with connect */
63456+ if (addr->sin_family == AF_UNSPEC)
63457+ return 0;
63458+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sock->sk, addr, sock->type);
63459+}
63460+
63461+int
63462+gr_search_bind(struct socket *sock, struct sockaddr_in *addr)
63463+{
63464+ return gr_search_connectbind(GR_BIND | GR_BINDOVERRIDE, sock->sk, addr, sock->type);
63465+}
63466+
63467+int gr_search_listen(struct socket *sock)
63468+{
63469+ struct sock *sk = sock->sk;
63470+ struct sockaddr_in addr;
63471+
63472+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63473+ addr.sin_port = inet_sk(sk)->inet_sport;
63474+
63475+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63476+}
63477+
63478+int gr_search_accept(struct socket *sock)
63479+{
63480+ struct sock *sk = sock->sk;
63481+ struct sockaddr_in addr;
63482+
63483+ addr.sin_addr.s_addr = inet_sk(sk)->inet_saddr;
63484+ addr.sin_port = inet_sk(sk)->inet_sport;
63485+
63486+ return gr_search_connectbind(GR_BIND | GR_CONNECTOVERRIDE, sock->sk, &addr, sock->type);
63487+}
63488+
63489+int
63490+gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr)
63491+{
63492+ if (addr)
63493+ return gr_search_connectbind(GR_CONNECT, sk, addr, SOCK_DGRAM);
63494+ else {
63495+ struct sockaddr_in sin;
63496+ const struct inet_sock *inet = inet_sk(sk);
63497+
63498+ sin.sin_addr.s_addr = inet->inet_daddr;
63499+ sin.sin_port = inet->inet_dport;
63500+
63501+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63502+ }
63503+}
63504+
63505+int
63506+gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb)
63507+{
63508+ struct sockaddr_in sin;
63509+
63510+ if (unlikely(skb->len < sizeof (struct udphdr)))
63511+ return 0; // skip this packet
63512+
63513+ sin.sin_addr.s_addr = ip_hdr(skb)->saddr;
63514+ sin.sin_port = udp_hdr(skb)->source;
63515+
63516+ return gr_search_connectbind(GR_CONNECT | GR_CONNECTOVERRIDE, sk, &sin, SOCK_DGRAM);
63517+}
63518diff --git a/grsecurity/gracl_learn.c b/grsecurity/gracl_learn.c
63519new file mode 100644
63520index 0000000..25f54ef
63521--- /dev/null
63522+++ b/grsecurity/gracl_learn.c
63523@@ -0,0 +1,207 @@
63524+#include <linux/kernel.h>
63525+#include <linux/mm.h>
63526+#include <linux/sched.h>
63527+#include <linux/poll.h>
63528+#include <linux/string.h>
63529+#include <linux/file.h>
63530+#include <linux/types.h>
63531+#include <linux/vmalloc.h>
63532+#include <linux/grinternal.h>
63533+
63534+extern ssize_t write_grsec_handler(struct file * file, const char __user * buf,
63535+ size_t count, loff_t *ppos);
63536+extern int gr_acl_is_enabled(void);
63537+
63538+static DECLARE_WAIT_QUEUE_HEAD(learn_wait);
63539+static int gr_learn_attached;
63540+
63541+/* use a 512k buffer */
63542+#define LEARN_BUFFER_SIZE (512 * 1024)
63543+
63544+static DEFINE_SPINLOCK(gr_learn_lock);
63545+static DEFINE_MUTEX(gr_learn_user_mutex);
63546+
63547+/* we need to maintain two buffers, so that the kernel context of grlearn
63548+ uses a semaphore around the userspace copying, and the other kernel contexts
63549+ use a spinlock when copying into the buffer, since they cannot sleep
63550+*/
63551+static char *learn_buffer;
63552+static char *learn_buffer_user;
63553+static int learn_buffer_len;
63554+static int learn_buffer_user_len;
63555+
63556+static ssize_t
63557+read_learn(struct file *file, char __user * buf, size_t count, loff_t * ppos)
63558+{
63559+ DECLARE_WAITQUEUE(wait, current);
63560+ ssize_t retval = 0;
63561+
63562+ add_wait_queue(&learn_wait, &wait);
63563+ set_current_state(TASK_INTERRUPTIBLE);
63564+ do {
63565+ mutex_lock(&gr_learn_user_mutex);
63566+ spin_lock(&gr_learn_lock);
63567+ if (learn_buffer_len)
63568+ break;
63569+ spin_unlock(&gr_learn_lock);
63570+ mutex_unlock(&gr_learn_user_mutex);
63571+ if (file->f_flags & O_NONBLOCK) {
63572+ retval = -EAGAIN;
63573+ goto out;
63574+ }
63575+ if (signal_pending(current)) {
63576+ retval = -ERESTARTSYS;
63577+ goto out;
63578+ }
63579+
63580+ schedule();
63581+ } while (1);
63582+
63583+ memcpy(learn_buffer_user, learn_buffer, learn_buffer_len);
63584+ learn_buffer_user_len = learn_buffer_len;
63585+ retval = learn_buffer_len;
63586+ learn_buffer_len = 0;
63587+
63588+ spin_unlock(&gr_learn_lock);
63589+
63590+ if (copy_to_user(buf, learn_buffer_user, learn_buffer_user_len))
63591+ retval = -EFAULT;
63592+
63593+ mutex_unlock(&gr_learn_user_mutex);
63594+out:
63595+ set_current_state(TASK_RUNNING);
63596+ remove_wait_queue(&learn_wait, &wait);
63597+ return retval;
63598+}
63599+
63600+static unsigned int
63601+poll_learn(struct file * file, poll_table * wait)
63602+{
63603+ poll_wait(file, &learn_wait, wait);
63604+
63605+ if (learn_buffer_len)
63606+ return (POLLIN | POLLRDNORM);
63607+
63608+ return 0;
63609+}
63610+
63611+void
63612+gr_clear_learn_entries(void)
63613+{
63614+ char *tmp;
63615+
63616+ mutex_lock(&gr_learn_user_mutex);
63617+ spin_lock(&gr_learn_lock);
63618+ tmp = learn_buffer;
63619+ learn_buffer = NULL;
63620+ spin_unlock(&gr_learn_lock);
63621+ if (tmp)
63622+ vfree(tmp);
63623+ if (learn_buffer_user != NULL) {
63624+ vfree(learn_buffer_user);
63625+ learn_buffer_user = NULL;
63626+ }
63627+ learn_buffer_len = 0;
63628+ mutex_unlock(&gr_learn_user_mutex);
63629+
63630+ return;
63631+}
63632+
63633+void
63634+gr_add_learn_entry(const char *fmt, ...)
63635+{
63636+ va_list args;
63637+ unsigned int len;
63638+
63639+ if (!gr_learn_attached)
63640+ return;
63641+
63642+ spin_lock(&gr_learn_lock);
63643+
63644+ /* leave a gap at the end so we know when it's "full" but don't have to
63645+ compute the exact length of the string we're trying to append
63646+ */
63647+ if (learn_buffer_len > LEARN_BUFFER_SIZE - 16384) {
63648+ spin_unlock(&gr_learn_lock);
63649+ wake_up_interruptible(&learn_wait);
63650+ return;
63651+ }
63652+ if (learn_buffer == NULL) {
63653+ spin_unlock(&gr_learn_lock);
63654+ return;
63655+ }
63656+
63657+ va_start(args, fmt);
63658+ len = vsnprintf(learn_buffer + learn_buffer_len, LEARN_BUFFER_SIZE - learn_buffer_len, fmt, args);
63659+ va_end(args);
63660+
63661+ learn_buffer_len += len + 1;
63662+
63663+ spin_unlock(&gr_learn_lock);
63664+ wake_up_interruptible(&learn_wait);
63665+
63666+ return;
63667+}
63668+
63669+static int
63670+open_learn(struct inode *inode, struct file *file)
63671+{
63672+ if (file->f_mode & FMODE_READ && gr_learn_attached)
63673+ return -EBUSY;
63674+ if (file->f_mode & FMODE_READ) {
63675+ int retval = 0;
63676+ mutex_lock(&gr_learn_user_mutex);
63677+ if (learn_buffer == NULL)
63678+ learn_buffer = vmalloc(LEARN_BUFFER_SIZE);
63679+ if (learn_buffer_user == NULL)
63680+ learn_buffer_user = vmalloc(LEARN_BUFFER_SIZE);
63681+ if (learn_buffer == NULL) {
63682+ retval = -ENOMEM;
63683+ goto out_error;
63684+ }
63685+ if (learn_buffer_user == NULL) {
63686+ retval = -ENOMEM;
63687+ goto out_error;
63688+ }
63689+ learn_buffer_len = 0;
63690+ learn_buffer_user_len = 0;
63691+ gr_learn_attached = 1;
63692+out_error:
63693+ mutex_unlock(&gr_learn_user_mutex);
63694+ return retval;
63695+ }
63696+ return 0;
63697+}
63698+
63699+static int
63700+close_learn(struct inode *inode, struct file *file)
63701+{
63702+ if (file->f_mode & FMODE_READ) {
63703+ char *tmp = NULL;
63704+ mutex_lock(&gr_learn_user_mutex);
63705+ spin_lock(&gr_learn_lock);
63706+ tmp = learn_buffer;
63707+ learn_buffer = NULL;
63708+ spin_unlock(&gr_learn_lock);
63709+ if (tmp)
63710+ vfree(tmp);
63711+ if (learn_buffer_user != NULL) {
63712+ vfree(learn_buffer_user);
63713+ learn_buffer_user = NULL;
63714+ }
63715+ learn_buffer_len = 0;
63716+ learn_buffer_user_len = 0;
63717+ gr_learn_attached = 0;
63718+ mutex_unlock(&gr_learn_user_mutex);
63719+ }
63720+
63721+ return 0;
63722+}
63723+
63724+const struct file_operations grsec_fops = {
63725+ .read = read_learn,
63726+ .write = write_grsec_handler,
63727+ .open = open_learn,
63728+ .release = close_learn,
63729+ .poll = poll_learn,
63730+};
63731diff --git a/grsecurity/gracl_res.c b/grsecurity/gracl_res.c
63732new file mode 100644
63733index 0000000..39645c9
63734--- /dev/null
63735+++ b/grsecurity/gracl_res.c
63736@@ -0,0 +1,68 @@
63737+#include <linux/kernel.h>
63738+#include <linux/sched.h>
63739+#include <linux/gracl.h>
63740+#include <linux/grinternal.h>
63741+
63742+static const char *restab_log[] = {
63743+ [RLIMIT_CPU] = "RLIMIT_CPU",
63744+ [RLIMIT_FSIZE] = "RLIMIT_FSIZE",
63745+ [RLIMIT_DATA] = "RLIMIT_DATA",
63746+ [RLIMIT_STACK] = "RLIMIT_STACK",
63747+ [RLIMIT_CORE] = "RLIMIT_CORE",
63748+ [RLIMIT_RSS] = "RLIMIT_RSS",
63749+ [RLIMIT_NPROC] = "RLIMIT_NPROC",
63750+ [RLIMIT_NOFILE] = "RLIMIT_NOFILE",
63751+ [RLIMIT_MEMLOCK] = "RLIMIT_MEMLOCK",
63752+ [RLIMIT_AS] = "RLIMIT_AS",
63753+ [RLIMIT_LOCKS] = "RLIMIT_LOCKS",
63754+ [RLIMIT_SIGPENDING] = "RLIMIT_SIGPENDING",
63755+ [RLIMIT_MSGQUEUE] = "RLIMIT_MSGQUEUE",
63756+ [RLIMIT_NICE] = "RLIMIT_NICE",
63757+ [RLIMIT_RTPRIO] = "RLIMIT_RTPRIO",
63758+ [RLIMIT_RTTIME] = "RLIMIT_RTTIME",
63759+ [GR_CRASH_RES] = "RLIMIT_CRASH"
63760+};
63761+
63762+void
63763+gr_log_resource(const struct task_struct *task,
63764+ const int res, const unsigned long wanted, const int gt)
63765+{
63766+ const struct cred *cred;
63767+ unsigned long rlim;
63768+
63769+ if (!gr_acl_is_enabled() && !grsec_resource_logging)
63770+ return;
63771+
63772+ // not yet supported resource
63773+ if (unlikely(!restab_log[res]))
63774+ return;
63775+
63776+ if (res == RLIMIT_CPU || res == RLIMIT_RTTIME)
63777+ rlim = task_rlimit_max(task, res);
63778+ else
63779+ rlim = task_rlimit(task, res);
63780+
63781+ if (likely((rlim == RLIM_INFINITY) || (gt && wanted <= rlim) || (!gt && wanted < rlim)))
63782+ return;
63783+
63784+ rcu_read_lock();
63785+ cred = __task_cred(task);
63786+
63787+ if (res == RLIMIT_NPROC &&
63788+ (cap_raised(cred->cap_effective, CAP_SYS_ADMIN) ||
63789+ cap_raised(cred->cap_effective, CAP_SYS_RESOURCE)))
63790+ goto out_rcu_unlock;
63791+ else if (res == RLIMIT_MEMLOCK &&
63792+ cap_raised(cred->cap_effective, CAP_IPC_LOCK))
63793+ goto out_rcu_unlock;
63794+ else if (res == RLIMIT_NICE && cap_raised(cred->cap_effective, CAP_SYS_NICE))
63795+ goto out_rcu_unlock;
63796+ rcu_read_unlock();
63797+
63798+ gr_log_res_ulong2_str(GR_DONT_AUDIT, GR_RESOURCE_MSG, task, wanted, restab_log[res], rlim);
63799+
63800+ return;
63801+out_rcu_unlock:
63802+ rcu_read_unlock();
63803+ return;
63804+}
63805diff --git a/grsecurity/gracl_segv.c b/grsecurity/gracl_segv.c
63806new file mode 100644
63807index 0000000..3c38bfe
63808--- /dev/null
63809+++ b/grsecurity/gracl_segv.c
63810@@ -0,0 +1,305 @@
63811+#include <linux/kernel.h>
63812+#include <linux/mm.h>
63813+#include <asm/uaccess.h>
63814+#include <asm/errno.h>
63815+#include <asm/mman.h>
63816+#include <net/sock.h>
63817+#include <linux/file.h>
63818+#include <linux/fs.h>
63819+#include <linux/net.h>
63820+#include <linux/in.h>
63821+#include <linux/slab.h>
63822+#include <linux/types.h>
63823+#include <linux/sched.h>
63824+#include <linux/timer.h>
63825+#include <linux/gracl.h>
63826+#include <linux/grsecurity.h>
63827+#include <linux/grinternal.h>
63828+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63829+#include <linux/magic.h>
63830+#include <linux/pagemap.h>
63831+#include "../fs/btrfs/async-thread.h"
63832+#include "../fs/btrfs/ctree.h"
63833+#include "../fs/btrfs/btrfs_inode.h"
63834+#endif
63835+
63836+static struct crash_uid *uid_set;
63837+static unsigned short uid_used;
63838+static DEFINE_SPINLOCK(gr_uid_lock);
63839+extern rwlock_t gr_inode_lock;
63840+extern struct acl_subject_label *
63841+ lookup_acl_subj_label(const ino_t inode, const dev_t dev,
63842+ struct acl_role_label *role);
63843+
63844+static inline dev_t __get_dev(const struct dentry *dentry)
63845+{
63846+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE)
63847+ if (dentry->d_sb->s_magic == BTRFS_SUPER_MAGIC)
63848+ return BTRFS_I(dentry->d_inode)->root->anon_dev;
63849+ else
63850+#endif
63851+ return dentry->d_sb->s_dev;
63852+}
63853+
63854+int
63855+gr_init_uidset(void)
63856+{
63857+ uid_set =
63858+ kmalloc(GR_UIDTABLE_MAX * sizeof (struct crash_uid), GFP_KERNEL);
63859+ uid_used = 0;
63860+
63861+ return uid_set ? 1 : 0;
63862+}
63863+
63864+void
63865+gr_free_uidset(void)
63866+{
63867+ if (uid_set)
63868+ kfree(uid_set);
63869+
63870+ return;
63871+}
63872+
63873+int
63874+gr_find_uid(const uid_t uid)
63875+{
63876+ struct crash_uid *tmp = uid_set;
63877+ uid_t buid;
63878+ int low = 0, high = uid_used - 1, mid;
63879+
63880+ while (high >= low) {
63881+ mid = (low + high) >> 1;
63882+ buid = tmp[mid].uid;
63883+ if (buid == uid)
63884+ return mid;
63885+ if (buid > uid)
63886+ high = mid - 1;
63887+ if (buid < uid)
63888+ low = mid + 1;
63889+ }
63890+
63891+ return -1;
63892+}
63893+
63894+static __inline__ void
63895+gr_insertsort(void)
63896+{
63897+ unsigned short i, j;
63898+ struct crash_uid index;
63899+
63900+ for (i = 1; i < uid_used; i++) {
63901+ index = uid_set[i];
63902+ j = i;
63903+ while ((j > 0) && uid_set[j - 1].uid > index.uid) {
63904+ uid_set[j] = uid_set[j - 1];
63905+ j--;
63906+ }
63907+ uid_set[j] = index;
63908+ }
63909+
63910+ return;
63911+}
63912+
63913+static __inline__ void
63914+gr_insert_uid(const kuid_t kuid, const unsigned long expires)
63915+{
63916+ int loc;
63917+ uid_t uid = GR_GLOBAL_UID(kuid);
63918+
63919+ if (uid_used == GR_UIDTABLE_MAX)
63920+ return;
63921+
63922+ loc = gr_find_uid(uid);
63923+
63924+ if (loc >= 0) {
63925+ uid_set[loc].expires = expires;
63926+ return;
63927+ }
63928+
63929+ uid_set[uid_used].uid = uid;
63930+ uid_set[uid_used].expires = expires;
63931+ uid_used++;
63932+
63933+ gr_insertsort();
63934+
63935+ return;
63936+}
63937+
63938+void
63939+gr_remove_uid(const unsigned short loc)
63940+{
63941+ unsigned short i;
63942+
63943+ for (i = loc + 1; i < uid_used; i++)
63944+ uid_set[i - 1] = uid_set[i];
63945+
63946+ uid_used--;
63947+
63948+ return;
63949+}
63950+
63951+int
63952+gr_check_crash_uid(const kuid_t kuid)
63953+{
63954+ int loc;
63955+ int ret = 0;
63956+ uid_t uid;
63957+
63958+ if (unlikely(!gr_acl_is_enabled()))
63959+ return 0;
63960+
63961+ uid = GR_GLOBAL_UID(kuid);
63962+
63963+ spin_lock(&gr_uid_lock);
63964+ loc = gr_find_uid(uid);
63965+
63966+ if (loc < 0)
63967+ goto out_unlock;
63968+
63969+ if (time_before_eq(uid_set[loc].expires, get_seconds()))
63970+ gr_remove_uid(loc);
63971+ else
63972+ ret = 1;
63973+
63974+out_unlock:
63975+ spin_unlock(&gr_uid_lock);
63976+ return ret;
63977+}
63978+
63979+static __inline__ int
63980+proc_is_setxid(const struct cred *cred)
63981+{
63982+ if (!uid_eq(cred->uid, cred->euid) || !uid_eq(cred->uid, cred->suid) ||
63983+ !uid_eq(cred->uid, cred->fsuid))
63984+ return 1;
63985+ if (!gid_eq(cred->gid, cred->egid) || !gid_eq(cred->gid, cred->sgid) ||
63986+ !gid_eq(cred->gid, cred->fsgid))
63987+ return 1;
63988+
63989+ return 0;
63990+}
63991+
63992+extern int gr_fake_force_sig(int sig, struct task_struct *t);
63993+
63994+void
63995+gr_handle_crash(struct task_struct *task, const int sig)
63996+{
63997+ struct acl_subject_label *curr;
63998+ struct task_struct *tsk, *tsk2;
63999+ const struct cred *cred;
64000+ const struct cred *cred2;
64001+
64002+ if (sig != SIGSEGV && sig != SIGKILL && sig != SIGBUS && sig != SIGILL)
64003+ return;
64004+
64005+ if (unlikely(!gr_acl_is_enabled()))
64006+ return;
64007+
64008+ curr = task->acl;
64009+
64010+ if (!(curr->resmask & (1U << GR_CRASH_RES)))
64011+ return;
64012+
64013+ if (time_before_eq(curr->expires, get_seconds())) {
64014+ curr->expires = 0;
64015+ curr->crashes = 0;
64016+ }
64017+
64018+ curr->crashes++;
64019+
64020+ if (!curr->expires)
64021+ curr->expires = get_seconds() + curr->res[GR_CRASH_RES].rlim_max;
64022+
64023+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
64024+ time_after(curr->expires, get_seconds())) {
64025+ rcu_read_lock();
64026+ cred = __task_cred(task);
64027+ if (gr_is_global_nonroot(cred->uid) && proc_is_setxid(cred)) {
64028+ gr_log_crash1(GR_DONT_AUDIT, GR_SEGVSTART_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
64029+ spin_lock(&gr_uid_lock);
64030+ gr_insert_uid(cred->uid, curr->expires);
64031+ spin_unlock(&gr_uid_lock);
64032+ curr->expires = 0;
64033+ curr->crashes = 0;
64034+ read_lock(&tasklist_lock);
64035+ do_each_thread(tsk2, tsk) {
64036+ cred2 = __task_cred(tsk);
64037+ if (tsk != task && uid_eq(cred2->uid, cred->uid))
64038+ gr_fake_force_sig(SIGKILL, tsk);
64039+ } while_each_thread(tsk2, tsk);
64040+ read_unlock(&tasklist_lock);
64041+ } else {
64042+ gr_log_crash2(GR_DONT_AUDIT, GR_SEGVNOSUID_ACL_MSG, task, curr->res[GR_CRASH_RES].rlim_max);
64043+ read_lock(&tasklist_lock);
64044+ read_lock(&grsec_exec_file_lock);
64045+ do_each_thread(tsk2, tsk) {
64046+ if (likely(tsk != task)) {
64047+ // if this thread has the same subject as the one that triggered
64048+ // RES_CRASH and it's the same binary, kill it
64049+ if (tsk->acl == task->acl && gr_is_same_file(tsk->exec_file, task->exec_file))
64050+ gr_fake_force_sig(SIGKILL, tsk);
64051+ }
64052+ } while_each_thread(tsk2, tsk);
64053+ read_unlock(&grsec_exec_file_lock);
64054+ read_unlock(&tasklist_lock);
64055+ }
64056+ rcu_read_unlock();
64057+ }
64058+
64059+ return;
64060+}
64061+
64062+int
64063+gr_check_crash_exec(const struct file *filp)
64064+{
64065+ struct acl_subject_label *curr;
64066+
64067+ if (unlikely(!gr_acl_is_enabled()))
64068+ return 0;
64069+
64070+ read_lock(&gr_inode_lock);
64071+ curr = lookup_acl_subj_label(filp->f_path.dentry->d_inode->i_ino,
64072+ __get_dev(filp->f_path.dentry),
64073+ current->role);
64074+ read_unlock(&gr_inode_lock);
64075+
64076+ if (!curr || !(curr->resmask & (1U << GR_CRASH_RES)) ||
64077+ (!curr->crashes && !curr->expires))
64078+ return 0;
64079+
64080+ if ((curr->crashes >= curr->res[GR_CRASH_RES].rlim_cur) &&
64081+ time_after(curr->expires, get_seconds()))
64082+ return 1;
64083+ else if (time_before_eq(curr->expires, get_seconds())) {
64084+ curr->crashes = 0;
64085+ curr->expires = 0;
64086+ }
64087+
64088+ return 0;
64089+}
64090+
64091+void
64092+gr_handle_alertkill(struct task_struct *task)
64093+{
64094+ struct acl_subject_label *curracl;
64095+ __u32 curr_ip;
64096+ struct task_struct *p, *p2;
64097+
64098+ if (unlikely(!gr_acl_is_enabled()))
64099+ return;
64100+
64101+ curracl = task->acl;
64102+ curr_ip = task->signal->curr_ip;
64103+
64104+ if ((curracl->mode & GR_KILLIPPROC) && curr_ip) {
64105+ read_lock(&tasklist_lock);
64106+ do_each_thread(p2, p) {
64107+ if (p->signal->curr_ip == curr_ip)
64108+ gr_fake_force_sig(SIGKILL, p);
64109+ } while_each_thread(p2, p);
64110+ read_unlock(&tasklist_lock);
64111+ } else if (curracl->mode & GR_KILLPROC)
64112+ gr_fake_force_sig(SIGKILL, task);
64113+
64114+ return;
64115+}
64116diff --git a/grsecurity/gracl_shm.c b/grsecurity/gracl_shm.c
64117new file mode 100644
64118index 0000000..98011b0
64119--- /dev/null
64120+++ b/grsecurity/gracl_shm.c
64121@@ -0,0 +1,40 @@
64122+#include <linux/kernel.h>
64123+#include <linux/mm.h>
64124+#include <linux/sched.h>
64125+#include <linux/file.h>
64126+#include <linux/ipc.h>
64127+#include <linux/gracl.h>
64128+#include <linux/grsecurity.h>
64129+#include <linux/grinternal.h>
64130+
64131+int
64132+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64133+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64134+{
64135+ struct task_struct *task;
64136+
64137+ if (!gr_acl_is_enabled())
64138+ return 1;
64139+
64140+ rcu_read_lock();
64141+ read_lock(&tasklist_lock);
64142+
64143+ task = find_task_by_vpid(shm_cprid);
64144+
64145+ if (unlikely(!task))
64146+ task = find_task_by_vpid(shm_lapid);
64147+
64148+ if (unlikely(task && (time_before_eq((unsigned long)task->start_time.tv_sec, (unsigned long)shm_createtime) ||
64149+ (task_pid_nr(task) == shm_lapid)) &&
64150+ (task->acl->mode & GR_PROTSHM) &&
64151+ (task->acl != current->acl))) {
64152+ read_unlock(&tasklist_lock);
64153+ rcu_read_unlock();
64154+ gr_log_int3(GR_DONT_AUDIT, GR_SHMAT_ACL_MSG, GR_GLOBAL_UID(cuid), shm_cprid, shmid);
64155+ return 0;
64156+ }
64157+ read_unlock(&tasklist_lock);
64158+ rcu_read_unlock();
64159+
64160+ return 1;
64161+}
64162diff --git a/grsecurity/grsec_chdir.c b/grsecurity/grsec_chdir.c
64163new file mode 100644
64164index 0000000..bc0be01
64165--- /dev/null
64166+++ b/grsecurity/grsec_chdir.c
64167@@ -0,0 +1,19 @@
64168+#include <linux/kernel.h>
64169+#include <linux/sched.h>
64170+#include <linux/fs.h>
64171+#include <linux/file.h>
64172+#include <linux/grsecurity.h>
64173+#include <linux/grinternal.h>
64174+
64175+void
64176+gr_log_chdir(const struct dentry *dentry, const struct vfsmount *mnt)
64177+{
64178+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
64179+ if ((grsec_enable_chdir && grsec_enable_group &&
64180+ in_group_p(grsec_audit_gid)) || (grsec_enable_chdir &&
64181+ !grsec_enable_group)) {
64182+ gr_log_fs_generic(GR_DO_AUDIT, GR_CHDIR_AUDIT_MSG, dentry, mnt);
64183+ }
64184+#endif
64185+ return;
64186+}
64187diff --git a/grsecurity/grsec_chroot.c b/grsecurity/grsec_chroot.c
64188new file mode 100644
64189index 0000000..bd6e105
64190--- /dev/null
64191+++ b/grsecurity/grsec_chroot.c
64192@@ -0,0 +1,370 @@
64193+#include <linux/kernel.h>
64194+#include <linux/module.h>
64195+#include <linux/sched.h>
64196+#include <linux/file.h>
64197+#include <linux/fs.h>
64198+#include <linux/mount.h>
64199+#include <linux/types.h>
64200+#include "../fs/mount.h"
64201+#include <linux/grsecurity.h>
64202+#include <linux/grinternal.h>
64203+
64204+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
64205+static int gr_init_ran;
64206+#endif
64207+
64208+void gr_set_chroot_entries(struct task_struct *task, const struct path *path)
64209+{
64210+#ifdef CONFIG_GRKERNSEC
64211+ if (task_pid_nr(task) > 1 && path->dentry != init_task.fs->root.dentry &&
64212+ path->dentry != task->nsproxy->mnt_ns->root->mnt.mnt_root
64213+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
64214+ && gr_init_ran
64215+#endif
64216+ )
64217+ task->gr_is_chrooted = 1;
64218+ else {
64219+#ifdef CONFIG_GRKERNSEC_CHROOT_INITRD
64220+ if (task_pid_nr(task) == 1 && !gr_init_ran)
64221+ gr_init_ran = 1;
64222+#endif
64223+ task->gr_is_chrooted = 0;
64224+ }
64225+
64226+ task->gr_chroot_dentry = path->dentry;
64227+#endif
64228+ return;
64229+}
64230+
64231+void gr_clear_chroot_entries(struct task_struct *task)
64232+{
64233+#ifdef CONFIG_GRKERNSEC
64234+ task->gr_is_chrooted = 0;
64235+ task->gr_chroot_dentry = NULL;
64236+#endif
64237+ return;
64238+}
64239+
64240+int
64241+gr_handle_chroot_unix(const pid_t pid)
64242+{
64243+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
64244+ struct task_struct *p;
64245+
64246+ if (unlikely(!grsec_enable_chroot_unix))
64247+ return 1;
64248+
64249+ if (likely(!proc_is_chrooted(current)))
64250+ return 1;
64251+
64252+ rcu_read_lock();
64253+ read_lock(&tasklist_lock);
64254+ p = find_task_by_vpid_unrestricted(pid);
64255+ if (unlikely(p && !have_same_root(current, p))) {
64256+ read_unlock(&tasklist_lock);
64257+ rcu_read_unlock();
64258+ gr_log_noargs(GR_DONT_AUDIT, GR_UNIX_CHROOT_MSG);
64259+ return 0;
64260+ }
64261+ read_unlock(&tasklist_lock);
64262+ rcu_read_unlock();
64263+#endif
64264+ return 1;
64265+}
64266+
64267+int
64268+gr_handle_chroot_nice(void)
64269+{
64270+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64271+ if (grsec_enable_chroot_nice && proc_is_chrooted(current)) {
64272+ gr_log_noargs(GR_DONT_AUDIT, GR_NICE_CHROOT_MSG);
64273+ return -EPERM;
64274+ }
64275+#endif
64276+ return 0;
64277+}
64278+
64279+int
64280+gr_handle_chroot_setpriority(struct task_struct *p, const int niceval)
64281+{
64282+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
64283+ if (grsec_enable_chroot_nice && (niceval < task_nice(p))
64284+ && proc_is_chrooted(current)) {
64285+ gr_log_str_int(GR_DONT_AUDIT, GR_PRIORITY_CHROOT_MSG, p->comm, task_pid_nr(p));
64286+ return -EACCES;
64287+ }
64288+#endif
64289+ return 0;
64290+}
64291+
64292+int
64293+gr_handle_chroot_fowner(struct pid *pid, enum pid_type type)
64294+{
64295+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64296+ struct task_struct *p;
64297+ int ret = 0;
64298+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || !pid)
64299+ return ret;
64300+
64301+ read_lock(&tasklist_lock);
64302+ do_each_pid_task(pid, type, p) {
64303+ if (!have_same_root(current, p)) {
64304+ ret = 1;
64305+ goto out;
64306+ }
64307+ } while_each_pid_task(pid, type, p);
64308+out:
64309+ read_unlock(&tasklist_lock);
64310+ return ret;
64311+#endif
64312+ return 0;
64313+}
64314+
64315+int
64316+gr_pid_is_chrooted(struct task_struct *p)
64317+{
64318+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
64319+ if (!grsec_enable_chroot_findtask || !proc_is_chrooted(current) || p == NULL)
64320+ return 0;
64321+
64322+ if ((p->exit_state & (EXIT_ZOMBIE | EXIT_DEAD)) ||
64323+ !have_same_root(current, p)) {
64324+ return 1;
64325+ }
64326+#endif
64327+ return 0;
64328+}
64329+
64330+EXPORT_SYMBOL(gr_pid_is_chrooted);
64331+
64332+#if defined(CONFIG_GRKERNSEC_CHROOT_DOUBLE) || defined(CONFIG_GRKERNSEC_CHROOT_FCHDIR)
64333+int gr_is_outside_chroot(const struct dentry *u_dentry, const struct vfsmount *u_mnt)
64334+{
64335+ struct path path, currentroot;
64336+ int ret = 0;
64337+
64338+ path.dentry = (struct dentry *)u_dentry;
64339+ path.mnt = (struct vfsmount *)u_mnt;
64340+ get_fs_root(current->fs, &currentroot);
64341+ if (path_is_under(&path, &currentroot))
64342+ ret = 1;
64343+ path_put(&currentroot);
64344+
64345+ return ret;
64346+}
64347+#endif
64348+
64349+int
64350+gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt)
64351+{
64352+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
64353+ if (!grsec_enable_chroot_fchdir)
64354+ return 1;
64355+
64356+ if (!proc_is_chrooted(current))
64357+ return 1;
64358+ else if (!gr_is_outside_chroot(u_dentry, u_mnt)) {
64359+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_FCHDIR_MSG, u_dentry, u_mnt);
64360+ return 0;
64361+ }
64362+#endif
64363+ return 1;
64364+}
64365+
64366+int
64367+gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64368+ const time_t shm_createtime)
64369+{
64370+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
64371+ struct task_struct *p;
64372+ time_t starttime;
64373+
64374+ if (unlikely(!grsec_enable_chroot_shmat))
64375+ return 1;
64376+
64377+ if (likely(!proc_is_chrooted(current)))
64378+ return 1;
64379+
64380+ rcu_read_lock();
64381+ read_lock(&tasklist_lock);
64382+
64383+ if ((p = find_task_by_vpid_unrestricted(shm_cprid))) {
64384+ starttime = p->start_time.tv_sec;
64385+ if (time_before_eq((unsigned long)starttime, (unsigned long)shm_createtime)) {
64386+ if (have_same_root(current, p)) {
64387+ goto allow;
64388+ } else {
64389+ read_unlock(&tasklist_lock);
64390+ rcu_read_unlock();
64391+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64392+ return 0;
64393+ }
64394+ }
64395+ /* creator exited, pid reuse, fall through to next check */
64396+ }
64397+ if ((p = find_task_by_vpid_unrestricted(shm_lapid))) {
64398+ if (unlikely(!have_same_root(current, p))) {
64399+ read_unlock(&tasklist_lock);
64400+ rcu_read_unlock();
64401+ gr_log_noargs(GR_DONT_AUDIT, GR_SHMAT_CHROOT_MSG);
64402+ return 0;
64403+ }
64404+ }
64405+
64406+allow:
64407+ read_unlock(&tasklist_lock);
64408+ rcu_read_unlock();
64409+#endif
64410+ return 1;
64411+}
64412+
64413+void
64414+gr_log_chroot_exec(const struct dentry *dentry, const struct vfsmount *mnt)
64415+{
64416+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
64417+ if (grsec_enable_chroot_execlog && proc_is_chrooted(current))
64418+ gr_log_fs_generic(GR_DO_AUDIT, GR_EXEC_CHROOT_MSG, dentry, mnt);
64419+#endif
64420+ return;
64421+}
64422+
64423+int
64424+gr_handle_chroot_mknod(const struct dentry *dentry,
64425+ const struct vfsmount *mnt, const int mode)
64426+{
64427+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
64428+ if (grsec_enable_chroot_mknod && !S_ISFIFO(mode) && !S_ISREG(mode) &&
64429+ proc_is_chrooted(current)) {
64430+ gr_log_fs_generic(GR_DONT_AUDIT, GR_MKNOD_CHROOT_MSG, dentry, mnt);
64431+ return -EPERM;
64432+ }
64433+#endif
64434+ return 0;
64435+}
64436+
64437+int
64438+gr_handle_chroot_mount(const struct dentry *dentry,
64439+ const struct vfsmount *mnt, const char *dev_name)
64440+{
64441+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
64442+ if (grsec_enable_chroot_mount && proc_is_chrooted(current)) {
64443+ gr_log_str_fs(GR_DONT_AUDIT, GR_MOUNT_CHROOT_MSG, dev_name ? dev_name : "none", dentry, mnt);
64444+ return -EPERM;
64445+ }
64446+#endif
64447+ return 0;
64448+}
64449+
64450+int
64451+gr_handle_chroot_pivot(void)
64452+{
64453+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
64454+ if (grsec_enable_chroot_pivot && proc_is_chrooted(current)) {
64455+ gr_log_noargs(GR_DONT_AUDIT, GR_PIVOT_CHROOT_MSG);
64456+ return -EPERM;
64457+ }
64458+#endif
64459+ return 0;
64460+}
64461+
64462+int
64463+gr_handle_chroot_chroot(const struct dentry *dentry, const struct vfsmount *mnt)
64464+{
64465+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
64466+ if (grsec_enable_chroot_double && proc_is_chrooted(current) &&
64467+ !gr_is_outside_chroot(dentry, mnt)) {
64468+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHROOT_CHROOT_MSG, dentry, mnt);
64469+ return -EPERM;
64470+ }
64471+#endif
64472+ return 0;
64473+}
64474+
64475+extern const char *captab_log[];
64476+extern int captab_log_entries;
64477+
64478+int
64479+gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
64480+{
64481+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64482+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64483+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64484+ if (cap_raised(chroot_caps, cap)) {
64485+ if (cap_raised(cred->cap_effective, cap) && cap < captab_log_entries) {
64486+ gr_log_cap(GR_DONT_AUDIT, GR_CAP_CHROOT_MSG, task, captab_log[cap]);
64487+ }
64488+ return 0;
64489+ }
64490+ }
64491+#endif
64492+ return 1;
64493+}
64494+
64495+int
64496+gr_chroot_is_capable(const int cap)
64497+{
64498+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64499+ return gr_task_chroot_is_capable(current, current_cred(), cap);
64500+#endif
64501+ return 1;
64502+}
64503+
64504+int
64505+gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap)
64506+{
64507+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64508+ if (grsec_enable_chroot_caps && proc_is_chrooted(task)) {
64509+ kernel_cap_t chroot_caps = GR_CHROOT_CAPS;
64510+ if (cap_raised(chroot_caps, cap)) {
64511+ return 0;
64512+ }
64513+ }
64514+#endif
64515+ return 1;
64516+}
64517+
64518+int
64519+gr_chroot_is_capable_nolog(const int cap)
64520+{
64521+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
64522+ return gr_task_chroot_is_capable_nolog(current, cap);
64523+#endif
64524+ return 1;
64525+}
64526+
64527+int
64528+gr_handle_chroot_sysctl(const int op)
64529+{
64530+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
64531+ if (grsec_enable_chroot_sysctl && (op & MAY_WRITE) &&
64532+ proc_is_chrooted(current))
64533+ return -EACCES;
64534+#endif
64535+ return 0;
64536+}
64537+
64538+void
64539+gr_handle_chroot_chdir(const struct path *path)
64540+{
64541+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
64542+ if (grsec_enable_chroot_chdir)
64543+ set_fs_pwd(current->fs, path);
64544+#endif
64545+ return;
64546+}
64547+
64548+int
64549+gr_handle_chroot_chmod(const struct dentry *dentry,
64550+ const struct vfsmount *mnt, const int mode)
64551+{
64552+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
64553+ /* allow chmod +s on directories, but not files */
64554+ if (grsec_enable_chroot_chmod && !S_ISDIR(dentry->d_inode->i_mode) &&
64555+ ((mode & S_ISUID) || ((mode & (S_ISGID | S_IXGRP)) == (S_ISGID | S_IXGRP))) &&
64556+ proc_is_chrooted(current)) {
64557+ gr_log_fs_generic(GR_DONT_AUDIT, GR_CHMOD_CHROOT_MSG, dentry, mnt);
64558+ return -EPERM;
64559+ }
64560+#endif
64561+ return 0;
64562+}
64563diff --git a/grsecurity/grsec_disabled.c b/grsecurity/grsec_disabled.c
64564new file mode 100644
64565index 0000000..ce65ceb
64566--- /dev/null
64567+++ b/grsecurity/grsec_disabled.c
64568@@ -0,0 +1,434 @@
64569+#include <linux/kernel.h>
64570+#include <linux/module.h>
64571+#include <linux/sched.h>
64572+#include <linux/file.h>
64573+#include <linux/fs.h>
64574+#include <linux/kdev_t.h>
64575+#include <linux/net.h>
64576+#include <linux/in.h>
64577+#include <linux/ip.h>
64578+#include <linux/skbuff.h>
64579+#include <linux/sysctl.h>
64580+
64581+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
64582+void
64583+pax_set_initial_flags(struct linux_binprm *bprm)
64584+{
64585+ return;
64586+}
64587+#endif
64588+
64589+#ifdef CONFIG_SYSCTL
64590+__u32
64591+gr_handle_sysctl(const struct ctl_table * table, const int op)
64592+{
64593+ return 0;
64594+}
64595+#endif
64596+
64597+#ifdef CONFIG_TASKSTATS
64598+int gr_is_taskstats_denied(int pid)
64599+{
64600+ return 0;
64601+}
64602+#endif
64603+
64604+int
64605+gr_acl_is_enabled(void)
64606+{
64607+ return 0;
64608+}
64609+
64610+void
64611+gr_handle_proc_create(const struct dentry *dentry, const struct inode *inode)
64612+{
64613+ return;
64614+}
64615+
64616+int
64617+gr_handle_rawio(const struct inode *inode)
64618+{
64619+ return 0;
64620+}
64621+
64622+void
64623+gr_acl_handle_psacct(struct task_struct *task, const long code)
64624+{
64625+ return;
64626+}
64627+
64628+int
64629+gr_handle_ptrace(struct task_struct *task, const long request)
64630+{
64631+ return 0;
64632+}
64633+
64634+int
64635+gr_handle_proc_ptrace(struct task_struct *task)
64636+{
64637+ return 0;
64638+}
64639+
64640+int
64641+gr_set_acls(const int type)
64642+{
64643+ return 0;
64644+}
64645+
64646+int
64647+gr_check_hidden_task(const struct task_struct *tsk)
64648+{
64649+ return 0;
64650+}
64651+
64652+int
64653+gr_check_protected_task(const struct task_struct *task)
64654+{
64655+ return 0;
64656+}
64657+
64658+int
64659+gr_check_protected_task_fowner(struct pid *pid, enum pid_type type)
64660+{
64661+ return 0;
64662+}
64663+
64664+void
64665+gr_copy_label(struct task_struct *tsk)
64666+{
64667+ return;
64668+}
64669+
64670+void
64671+gr_set_pax_flags(struct task_struct *task)
64672+{
64673+ return;
64674+}
64675+
64676+int
64677+gr_set_proc_label(const struct dentry *dentry, const struct vfsmount *mnt,
64678+ const int unsafe_share)
64679+{
64680+ return 0;
64681+}
64682+
64683+void
64684+gr_handle_delete(const ino_t ino, const dev_t dev)
64685+{
64686+ return;
64687+}
64688+
64689+void
64690+gr_handle_create(const struct dentry *dentry, const struct vfsmount *mnt)
64691+{
64692+ return;
64693+}
64694+
64695+void
64696+gr_handle_crash(struct task_struct *task, const int sig)
64697+{
64698+ return;
64699+}
64700+
64701+int
64702+gr_check_crash_exec(const struct file *filp)
64703+{
64704+ return 0;
64705+}
64706+
64707+int
64708+gr_check_crash_uid(const kuid_t uid)
64709+{
64710+ return 0;
64711+}
64712+
64713+void
64714+gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
64715+ struct dentry *old_dentry,
64716+ struct dentry *new_dentry,
64717+ struct vfsmount *mnt, const __u8 replace)
64718+{
64719+ return;
64720+}
64721+
64722+int
64723+gr_search_socket(const int family, const int type, const int protocol)
64724+{
64725+ return 1;
64726+}
64727+
64728+int
64729+gr_search_connectbind(const int mode, const struct socket *sock,
64730+ const struct sockaddr_in *addr)
64731+{
64732+ return 0;
64733+}
64734+
64735+void
64736+gr_handle_alertkill(struct task_struct *task)
64737+{
64738+ return;
64739+}
64740+
64741+__u32
64742+gr_acl_handle_execve(const struct dentry * dentry, const struct vfsmount * mnt)
64743+{
64744+ return 1;
64745+}
64746+
64747+__u32
64748+gr_acl_handle_hidden_file(const struct dentry * dentry,
64749+ const struct vfsmount * mnt)
64750+{
64751+ return 1;
64752+}
64753+
64754+__u32
64755+gr_acl_handle_open(const struct dentry * dentry, const struct vfsmount * mnt,
64756+ int acc_mode)
64757+{
64758+ return 1;
64759+}
64760+
64761+__u32
64762+gr_acl_handle_rmdir(const struct dentry * dentry, const struct vfsmount * mnt)
64763+{
64764+ return 1;
64765+}
64766+
64767+__u32
64768+gr_acl_handle_unlink(const struct dentry * dentry, const struct vfsmount * mnt)
64769+{
64770+ return 1;
64771+}
64772+
64773+int
64774+gr_acl_handle_mmap(const struct file *file, const unsigned long prot,
64775+ unsigned int *vm_flags)
64776+{
64777+ return 1;
64778+}
64779+
64780+__u32
64781+gr_acl_handle_truncate(const struct dentry * dentry,
64782+ const struct vfsmount * mnt)
64783+{
64784+ return 1;
64785+}
64786+
64787+__u32
64788+gr_acl_handle_utime(const struct dentry * dentry, const struct vfsmount * mnt)
64789+{
64790+ return 1;
64791+}
64792+
64793+__u32
64794+gr_acl_handle_access(const struct dentry * dentry,
64795+ const struct vfsmount * mnt, const int fmode)
64796+{
64797+ return 1;
64798+}
64799+
64800+__u32
64801+gr_acl_handle_chmod(const struct dentry * dentry, const struct vfsmount * mnt,
64802+ umode_t *mode)
64803+{
64804+ return 1;
64805+}
64806+
64807+__u32
64808+gr_acl_handle_chown(const struct dentry * dentry, const struct vfsmount * mnt)
64809+{
64810+ return 1;
64811+}
64812+
64813+__u32
64814+gr_acl_handle_setxattr(const struct dentry * dentry, const struct vfsmount * mnt)
64815+{
64816+ return 1;
64817+}
64818+
64819+void
64820+grsecurity_init(void)
64821+{
64822+ return;
64823+}
64824+
64825+umode_t gr_acl_umask(void)
64826+{
64827+ return 0;
64828+}
64829+
64830+__u32
64831+gr_acl_handle_mknod(const struct dentry * new_dentry,
64832+ const struct dentry * parent_dentry,
64833+ const struct vfsmount * parent_mnt,
64834+ const int mode)
64835+{
64836+ return 1;
64837+}
64838+
64839+__u32
64840+gr_acl_handle_mkdir(const struct dentry * new_dentry,
64841+ const struct dentry * parent_dentry,
64842+ const struct vfsmount * parent_mnt)
64843+{
64844+ return 1;
64845+}
64846+
64847+__u32
64848+gr_acl_handle_symlink(const struct dentry * new_dentry,
64849+ const struct dentry * parent_dentry,
64850+ const struct vfsmount * parent_mnt, const struct filename *from)
64851+{
64852+ return 1;
64853+}
64854+
64855+__u32
64856+gr_acl_handle_link(const struct dentry * new_dentry,
64857+ const struct dentry * parent_dentry,
64858+ const struct vfsmount * parent_mnt,
64859+ const struct dentry * old_dentry,
64860+ const struct vfsmount * old_mnt, const struct filename *to)
64861+{
64862+ return 1;
64863+}
64864+
64865+int
64866+gr_acl_handle_rename(const struct dentry *new_dentry,
64867+ const struct dentry *parent_dentry,
64868+ const struct vfsmount *parent_mnt,
64869+ const struct dentry *old_dentry,
64870+ const struct inode *old_parent_inode,
64871+ const struct vfsmount *old_mnt, const struct filename *newname)
64872+{
64873+ return 0;
64874+}
64875+
64876+int
64877+gr_acl_handle_filldir(const struct file *file, const char *name,
64878+ const int namelen, const ino_t ino)
64879+{
64880+ return 1;
64881+}
64882+
64883+int
64884+gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
64885+ const time_t shm_createtime, const kuid_t cuid, const int shmid)
64886+{
64887+ return 1;
64888+}
64889+
64890+int
64891+gr_search_bind(const struct socket *sock, const struct sockaddr_in *addr)
64892+{
64893+ return 0;
64894+}
64895+
64896+int
64897+gr_search_accept(const struct socket *sock)
64898+{
64899+ return 0;
64900+}
64901+
64902+int
64903+gr_search_listen(const struct socket *sock)
64904+{
64905+ return 0;
64906+}
64907+
64908+int
64909+gr_search_connect(const struct socket *sock, const struct sockaddr_in *addr)
64910+{
64911+ return 0;
64912+}
64913+
64914+__u32
64915+gr_acl_handle_unix(const struct dentry * dentry, const struct vfsmount * mnt)
64916+{
64917+ return 1;
64918+}
64919+
64920+__u32
64921+gr_acl_handle_creat(const struct dentry * dentry,
64922+ const struct dentry * p_dentry,
64923+ const struct vfsmount * p_mnt, int open_flags, int acc_mode,
64924+ const int imode)
64925+{
64926+ return 1;
64927+}
64928+
64929+void
64930+gr_acl_handle_exit(void)
64931+{
64932+ return;
64933+}
64934+
64935+int
64936+gr_acl_handle_mprotect(const struct file *file, const unsigned long prot)
64937+{
64938+ return 1;
64939+}
64940+
64941+void
64942+gr_set_role_label(const kuid_t uid, const kgid_t gid)
64943+{
64944+ return;
64945+}
64946+
64947+int
64948+gr_acl_handle_procpidmem(const struct task_struct *task)
64949+{
64950+ return 0;
64951+}
64952+
64953+int
64954+gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb)
64955+{
64956+ return 0;
64957+}
64958+
64959+int
64960+gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr)
64961+{
64962+ return 0;
64963+}
64964+
64965+void
64966+gr_set_kernel_label(struct task_struct *task)
64967+{
64968+ return;
64969+}
64970+
64971+int
64972+gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs)
64973+{
64974+ return 0;
64975+}
64976+
64977+int
64978+gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs)
64979+{
64980+ return 0;
64981+}
64982+
64983+int gr_acl_enable_at_secure(void)
64984+{
64985+ return 0;
64986+}
64987+
64988+dev_t gr_get_dev_from_dentry(struct dentry *dentry)
64989+{
64990+ return dentry->d_sb->s_dev;
64991+}
64992+
64993+void gr_put_exec_file(struct task_struct *task)
64994+{
64995+ return;
64996+}
64997+
64998+EXPORT_SYMBOL(gr_set_kernel_label);
64999+#ifdef CONFIG_SECURITY
65000+EXPORT_SYMBOL(gr_check_user_change);
65001+EXPORT_SYMBOL(gr_check_group_change);
65002+#endif
65003diff --git a/grsecurity/grsec_exec.c b/grsecurity/grsec_exec.c
65004new file mode 100644
65005index 0000000..387032b
65006--- /dev/null
65007+++ b/grsecurity/grsec_exec.c
65008@@ -0,0 +1,187 @@
65009+#include <linux/kernel.h>
65010+#include <linux/sched.h>
65011+#include <linux/file.h>
65012+#include <linux/binfmts.h>
65013+#include <linux/fs.h>
65014+#include <linux/types.h>
65015+#include <linux/grdefs.h>
65016+#include <linux/grsecurity.h>
65017+#include <linux/grinternal.h>
65018+#include <linux/capability.h>
65019+#include <linux/module.h>
65020+#include <linux/compat.h>
65021+
65022+#include <asm/uaccess.h>
65023+
65024+#ifdef CONFIG_GRKERNSEC_EXECLOG
65025+static char gr_exec_arg_buf[132];
65026+static DEFINE_MUTEX(gr_exec_arg_mutex);
65027+#endif
65028+
65029+struct user_arg_ptr {
65030+#ifdef CONFIG_COMPAT
65031+ bool is_compat;
65032+#endif
65033+ union {
65034+ const char __user *const __user *native;
65035+#ifdef CONFIG_COMPAT
65036+ const compat_uptr_t __user *compat;
65037+#endif
65038+ } ptr;
65039+};
65040+
65041+extern const char __user *get_user_arg_ptr(struct user_arg_ptr argv, int nr);
65042+
65043+void
65044+gr_handle_exec_args(struct linux_binprm *bprm, struct user_arg_ptr argv)
65045+{
65046+#ifdef CONFIG_GRKERNSEC_EXECLOG
65047+ char *grarg = gr_exec_arg_buf;
65048+ unsigned int i, x, execlen = 0;
65049+ char c;
65050+
65051+ if (!((grsec_enable_execlog && grsec_enable_group &&
65052+ in_group_p(grsec_audit_gid))
65053+ || (grsec_enable_execlog && !grsec_enable_group)))
65054+ return;
65055+
65056+ mutex_lock(&gr_exec_arg_mutex);
65057+ memset(grarg, 0, sizeof(gr_exec_arg_buf));
65058+
65059+ for (i = 0; i < bprm->argc && execlen < 128; i++) {
65060+ const char __user *p;
65061+ unsigned int len;
65062+
65063+ p = get_user_arg_ptr(argv, i);
65064+ if (IS_ERR(p))
65065+ goto log;
65066+
65067+ len = strnlen_user(p, 128 - execlen);
65068+ if (len > 128 - execlen)
65069+ len = 128 - execlen;
65070+ else if (len > 0)
65071+ len--;
65072+ if (copy_from_user(grarg + execlen, p, len))
65073+ goto log;
65074+
65075+ /* rewrite unprintable characters */
65076+ for (x = 0; x < len; x++) {
65077+ c = *(grarg + execlen + x);
65078+ if (c < 32 || c > 126)
65079+ *(grarg + execlen + x) = ' ';
65080+ }
65081+
65082+ execlen += len;
65083+ *(grarg + execlen) = ' ';
65084+ *(grarg + execlen + 1) = '\0';
65085+ execlen++;
65086+ }
65087+
65088+ log:
65089+ gr_log_fs_str(GR_DO_AUDIT, GR_EXEC_AUDIT_MSG, bprm->file->f_path.dentry,
65090+ bprm->file->f_path.mnt, grarg);
65091+ mutex_unlock(&gr_exec_arg_mutex);
65092+#endif
65093+ return;
65094+}
65095+
65096+#ifdef CONFIG_GRKERNSEC
65097+extern int gr_acl_is_capable(const int cap);
65098+extern int gr_acl_is_capable_nolog(const int cap);
65099+extern int gr_task_acl_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
65100+extern int gr_task_acl_is_capable_nolog(const struct task_struct *task, const int cap);
65101+extern int gr_chroot_is_capable(const int cap);
65102+extern int gr_chroot_is_capable_nolog(const int cap);
65103+extern int gr_task_chroot_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
65104+extern int gr_task_chroot_is_capable_nolog(const struct task_struct *task, const int cap);
65105+#endif
65106+
65107+const char *captab_log[] = {
65108+ "CAP_CHOWN",
65109+ "CAP_DAC_OVERRIDE",
65110+ "CAP_DAC_READ_SEARCH",
65111+ "CAP_FOWNER",
65112+ "CAP_FSETID",
65113+ "CAP_KILL",
65114+ "CAP_SETGID",
65115+ "CAP_SETUID",
65116+ "CAP_SETPCAP",
65117+ "CAP_LINUX_IMMUTABLE",
65118+ "CAP_NET_BIND_SERVICE",
65119+ "CAP_NET_BROADCAST",
65120+ "CAP_NET_ADMIN",
65121+ "CAP_NET_RAW",
65122+ "CAP_IPC_LOCK",
65123+ "CAP_IPC_OWNER",
65124+ "CAP_SYS_MODULE",
65125+ "CAP_SYS_RAWIO",
65126+ "CAP_SYS_CHROOT",
65127+ "CAP_SYS_PTRACE",
65128+ "CAP_SYS_PACCT",
65129+ "CAP_SYS_ADMIN",
65130+ "CAP_SYS_BOOT",
65131+ "CAP_SYS_NICE",
65132+ "CAP_SYS_RESOURCE",
65133+ "CAP_SYS_TIME",
65134+ "CAP_SYS_TTY_CONFIG",
65135+ "CAP_MKNOD",
65136+ "CAP_LEASE",
65137+ "CAP_AUDIT_WRITE",
65138+ "CAP_AUDIT_CONTROL",
65139+ "CAP_SETFCAP",
65140+ "CAP_MAC_OVERRIDE",
65141+ "CAP_MAC_ADMIN",
65142+ "CAP_SYSLOG",
65143+ "CAP_WAKE_ALARM"
65144+};
65145+
65146+int captab_log_entries = sizeof(captab_log)/sizeof(captab_log[0]);
65147+
65148+int gr_is_capable(const int cap)
65149+{
65150+#ifdef CONFIG_GRKERNSEC
65151+ if (gr_acl_is_capable(cap) && gr_chroot_is_capable(cap))
65152+ return 1;
65153+ return 0;
65154+#else
65155+ return 1;
65156+#endif
65157+}
65158+
65159+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap)
65160+{
65161+#ifdef CONFIG_GRKERNSEC
65162+ if (gr_task_acl_is_capable(task, cred, cap) && gr_task_chroot_is_capable(task, cred, cap))
65163+ return 1;
65164+ return 0;
65165+#else
65166+ return 1;
65167+#endif
65168+}
65169+
65170+int gr_is_capable_nolog(const int cap)
65171+{
65172+#ifdef CONFIG_GRKERNSEC
65173+ if (gr_acl_is_capable_nolog(cap) && gr_chroot_is_capable_nolog(cap))
65174+ return 1;
65175+ return 0;
65176+#else
65177+ return 1;
65178+#endif
65179+}
65180+
65181+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap)
65182+{
65183+#ifdef CONFIG_GRKERNSEC
65184+ if (gr_task_acl_is_capable_nolog(task, cap) && gr_task_chroot_is_capable_nolog(task, cap))
65185+ return 1;
65186+ return 0;
65187+#else
65188+ return 1;
65189+#endif
65190+}
65191+
65192+EXPORT_SYMBOL(gr_is_capable);
65193+EXPORT_SYMBOL(gr_is_capable_nolog);
65194+EXPORT_SYMBOL(gr_task_is_capable);
65195+EXPORT_SYMBOL(gr_task_is_capable_nolog);
65196diff --git a/grsecurity/grsec_fifo.c b/grsecurity/grsec_fifo.c
65197new file mode 100644
65198index 0000000..06cc6ea
65199--- /dev/null
65200+++ b/grsecurity/grsec_fifo.c
65201@@ -0,0 +1,24 @@
65202+#include <linux/kernel.h>
65203+#include <linux/sched.h>
65204+#include <linux/fs.h>
65205+#include <linux/file.h>
65206+#include <linux/grinternal.h>
65207+
65208+int
65209+gr_handle_fifo(const struct dentry *dentry, const struct vfsmount *mnt,
65210+ const struct dentry *dir, const int flag, const int acc_mode)
65211+{
65212+#ifdef CONFIG_GRKERNSEC_FIFO
65213+ const struct cred *cred = current_cred();
65214+
65215+ if (grsec_enable_fifo && S_ISFIFO(dentry->d_inode->i_mode) &&
65216+ !(flag & O_EXCL) && (dir->d_inode->i_mode & S_ISVTX) &&
65217+ !uid_eq(dentry->d_inode->i_uid, dir->d_inode->i_uid) &&
65218+ !uid_eq(cred->fsuid, dentry->d_inode->i_uid)) {
65219+ if (!inode_permission(dentry->d_inode, acc_mode))
65220+ gr_log_fs_int2(GR_DONT_AUDIT, GR_FIFO_MSG, dentry, mnt, GR_GLOBAL_UID(dentry->d_inode->i_uid), GR_GLOBAL_GID(dentry->d_inode->i_gid));
65221+ return -EACCES;
65222+ }
65223+#endif
65224+ return 0;
65225+}
65226diff --git a/grsecurity/grsec_fork.c b/grsecurity/grsec_fork.c
65227new file mode 100644
65228index 0000000..8ca18bf
65229--- /dev/null
65230+++ b/grsecurity/grsec_fork.c
65231@@ -0,0 +1,23 @@
65232+#include <linux/kernel.h>
65233+#include <linux/sched.h>
65234+#include <linux/grsecurity.h>
65235+#include <linux/grinternal.h>
65236+#include <linux/errno.h>
65237+
65238+void
65239+gr_log_forkfail(const int retval)
65240+{
65241+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65242+ if (grsec_enable_forkfail && (retval == -EAGAIN || retval == -ENOMEM)) {
65243+ switch (retval) {
65244+ case -EAGAIN:
65245+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "EAGAIN");
65246+ break;
65247+ case -ENOMEM:
65248+ gr_log_str(GR_DONT_AUDIT, GR_FAILFORK_MSG, "ENOMEM");
65249+ break;
65250+ }
65251+ }
65252+#endif
65253+ return;
65254+}
65255diff --git a/grsecurity/grsec_init.c b/grsecurity/grsec_init.c
65256new file mode 100644
65257index 0000000..a862e9f
65258--- /dev/null
65259+++ b/grsecurity/grsec_init.c
65260@@ -0,0 +1,283 @@
65261+#include <linux/kernel.h>
65262+#include <linux/sched.h>
65263+#include <linux/mm.h>
65264+#include <linux/gracl.h>
65265+#include <linux/slab.h>
65266+#include <linux/vmalloc.h>
65267+#include <linux/percpu.h>
65268+#include <linux/module.h>
65269+
65270+int grsec_enable_ptrace_readexec;
65271+int grsec_enable_setxid;
65272+int grsec_enable_symlinkown;
65273+kgid_t grsec_symlinkown_gid;
65274+int grsec_enable_brute;
65275+int grsec_enable_link;
65276+int grsec_enable_dmesg;
65277+int grsec_enable_harden_ptrace;
65278+int grsec_enable_fifo;
65279+int grsec_enable_execlog;
65280+int grsec_enable_signal;
65281+int grsec_enable_forkfail;
65282+int grsec_enable_audit_ptrace;
65283+int grsec_enable_time;
65284+int grsec_enable_audit_textrel;
65285+int grsec_enable_group;
65286+kgid_t grsec_audit_gid;
65287+int grsec_enable_chdir;
65288+int grsec_enable_mount;
65289+int grsec_enable_rofs;
65290+int grsec_enable_chroot_findtask;
65291+int grsec_enable_chroot_mount;
65292+int grsec_enable_chroot_shmat;
65293+int grsec_enable_chroot_fchdir;
65294+int grsec_enable_chroot_double;
65295+int grsec_enable_chroot_pivot;
65296+int grsec_enable_chroot_chdir;
65297+int grsec_enable_chroot_chmod;
65298+int grsec_enable_chroot_mknod;
65299+int grsec_enable_chroot_nice;
65300+int grsec_enable_chroot_execlog;
65301+int grsec_enable_chroot_caps;
65302+int grsec_enable_chroot_sysctl;
65303+int grsec_enable_chroot_unix;
65304+int grsec_enable_tpe;
65305+kgid_t grsec_tpe_gid;
65306+int grsec_enable_blackhole;
65307+#ifdef CONFIG_IPV6_MODULE
65308+EXPORT_SYMBOL(grsec_enable_blackhole);
65309+#endif
65310+int grsec_lastack_retries;
65311+int grsec_enable_tpe_all;
65312+int grsec_enable_tpe_invert;
65313+int grsec_enable_socket_all;
65314+kgid_t grsec_socket_all_gid;
65315+int grsec_enable_socket_client;
65316+kgid_t grsec_socket_client_gid;
65317+int grsec_enable_socket_server;
65318+kgid_t grsec_socket_server_gid;
65319+int grsec_resource_logging;
65320+int grsec_disable_privio;
65321+int grsec_enable_log_rwxmaps;
65322+int grsec_lock;
65323+
65324+DEFINE_SPINLOCK(grsec_alert_lock);
65325+unsigned long grsec_alert_wtime = 0;
65326+unsigned long grsec_alert_fyet = 0;
65327+
65328+DEFINE_SPINLOCK(grsec_audit_lock);
65329+
65330+DEFINE_RWLOCK(grsec_exec_file_lock);
65331+
65332+char *gr_shared_page[4];
65333+
65334+char *gr_alert_log_fmt;
65335+char *gr_audit_log_fmt;
65336+char *gr_alert_log_buf;
65337+char *gr_audit_log_buf;
65338+
65339+extern struct gr_arg *gr_usermode;
65340+extern unsigned char *gr_system_salt;
65341+extern unsigned char *gr_system_sum;
65342+
65343+void __init
65344+grsecurity_init(void)
65345+{
65346+ int j;
65347+ /* create the per-cpu shared pages */
65348+
65349+#ifdef CONFIG_X86
65350+ memset((char *)(0x41a + PAGE_OFFSET), 0, 36);
65351+#endif
65352+
65353+ for (j = 0; j < 4; j++) {
65354+ gr_shared_page[j] = (char *)__alloc_percpu(PAGE_SIZE, __alignof__(unsigned long long));
65355+ if (gr_shared_page[j] == NULL) {
65356+ panic("Unable to allocate grsecurity shared page");
65357+ return;
65358+ }
65359+ }
65360+
65361+ /* allocate log buffers */
65362+ gr_alert_log_fmt = kmalloc(512, GFP_KERNEL);
65363+ if (!gr_alert_log_fmt) {
65364+ panic("Unable to allocate grsecurity alert log format buffer");
65365+ return;
65366+ }
65367+ gr_audit_log_fmt = kmalloc(512, GFP_KERNEL);
65368+ if (!gr_audit_log_fmt) {
65369+ panic("Unable to allocate grsecurity audit log format buffer");
65370+ return;
65371+ }
65372+ gr_alert_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65373+ if (!gr_alert_log_buf) {
65374+ panic("Unable to allocate grsecurity alert log buffer");
65375+ return;
65376+ }
65377+ gr_audit_log_buf = (char *) get_zeroed_page(GFP_KERNEL);
65378+ if (!gr_audit_log_buf) {
65379+ panic("Unable to allocate grsecurity audit log buffer");
65380+ return;
65381+ }
65382+
65383+ /* allocate memory for authentication structure */
65384+ gr_usermode = kmalloc(sizeof(struct gr_arg), GFP_KERNEL);
65385+ gr_system_salt = kmalloc(GR_SALT_LEN, GFP_KERNEL);
65386+ gr_system_sum = kmalloc(GR_SHA_LEN, GFP_KERNEL);
65387+
65388+ if (!gr_usermode || !gr_system_salt || !gr_system_sum) {
65389+ panic("Unable to allocate grsecurity authentication structure");
65390+ return;
65391+ }
65392+
65393+
65394+#ifdef CONFIG_GRKERNSEC_IO
65395+#if !defined(CONFIG_GRKERNSEC_SYSCTL_DISTRO)
65396+ grsec_disable_privio = 1;
65397+#elif defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65398+ grsec_disable_privio = 1;
65399+#else
65400+ grsec_disable_privio = 0;
65401+#endif
65402+#endif
65403+
65404+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
65405+ /* for backward compatibility, tpe_invert always defaults to on if
65406+ enabled in the kernel
65407+ */
65408+ grsec_enable_tpe_invert = 1;
65409+#endif
65410+
65411+#if !defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_SYSCTL_ON)
65412+#ifndef CONFIG_GRKERNSEC_SYSCTL
65413+ grsec_lock = 1;
65414+#endif
65415+
65416+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
65417+ grsec_enable_audit_textrel = 1;
65418+#endif
65419+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
65420+ grsec_enable_log_rwxmaps = 1;
65421+#endif
65422+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
65423+ grsec_enable_group = 1;
65424+ grsec_audit_gid = KGIDT_INIT(CONFIG_GRKERNSEC_AUDIT_GID);
65425+#endif
65426+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
65427+ grsec_enable_ptrace_readexec = 1;
65428+#endif
65429+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
65430+ grsec_enable_chdir = 1;
65431+#endif
65432+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
65433+ grsec_enable_harden_ptrace = 1;
65434+#endif
65435+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
65436+ grsec_enable_mount = 1;
65437+#endif
65438+#ifdef CONFIG_GRKERNSEC_LINK
65439+ grsec_enable_link = 1;
65440+#endif
65441+#ifdef CONFIG_GRKERNSEC_BRUTE
65442+ grsec_enable_brute = 1;
65443+#endif
65444+#ifdef CONFIG_GRKERNSEC_DMESG
65445+ grsec_enable_dmesg = 1;
65446+#endif
65447+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
65448+ grsec_enable_blackhole = 1;
65449+ grsec_lastack_retries = 4;
65450+#endif
65451+#ifdef CONFIG_GRKERNSEC_FIFO
65452+ grsec_enable_fifo = 1;
65453+#endif
65454+#ifdef CONFIG_GRKERNSEC_EXECLOG
65455+ grsec_enable_execlog = 1;
65456+#endif
65457+#ifdef CONFIG_GRKERNSEC_SETXID
65458+ grsec_enable_setxid = 1;
65459+#endif
65460+#ifdef CONFIG_GRKERNSEC_SIGNAL
65461+ grsec_enable_signal = 1;
65462+#endif
65463+#ifdef CONFIG_GRKERNSEC_FORKFAIL
65464+ grsec_enable_forkfail = 1;
65465+#endif
65466+#ifdef CONFIG_GRKERNSEC_TIME
65467+ grsec_enable_time = 1;
65468+#endif
65469+#ifdef CONFIG_GRKERNSEC_RESLOG
65470+ grsec_resource_logging = 1;
65471+#endif
65472+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
65473+ grsec_enable_chroot_findtask = 1;
65474+#endif
65475+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
65476+ grsec_enable_chroot_unix = 1;
65477+#endif
65478+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
65479+ grsec_enable_chroot_mount = 1;
65480+#endif
65481+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
65482+ grsec_enable_chroot_fchdir = 1;
65483+#endif
65484+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
65485+ grsec_enable_chroot_shmat = 1;
65486+#endif
65487+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
65488+ grsec_enable_audit_ptrace = 1;
65489+#endif
65490+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
65491+ grsec_enable_chroot_double = 1;
65492+#endif
65493+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
65494+ grsec_enable_chroot_pivot = 1;
65495+#endif
65496+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
65497+ grsec_enable_chroot_chdir = 1;
65498+#endif
65499+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
65500+ grsec_enable_chroot_chmod = 1;
65501+#endif
65502+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
65503+ grsec_enable_chroot_mknod = 1;
65504+#endif
65505+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
65506+ grsec_enable_chroot_nice = 1;
65507+#endif
65508+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
65509+ grsec_enable_chroot_execlog = 1;
65510+#endif
65511+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
65512+ grsec_enable_chroot_caps = 1;
65513+#endif
65514+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
65515+ grsec_enable_chroot_sysctl = 1;
65516+#endif
65517+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65518+ grsec_enable_symlinkown = 1;
65519+ grsec_symlinkown_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SYMLINKOWN_GID);
65520+#endif
65521+#ifdef CONFIG_GRKERNSEC_TPE
65522+ grsec_enable_tpe = 1;
65523+ grsec_tpe_gid = KGIDT_INIT(CONFIG_GRKERNSEC_TPE_GID);
65524+#ifdef CONFIG_GRKERNSEC_TPE_ALL
65525+ grsec_enable_tpe_all = 1;
65526+#endif
65527+#endif
65528+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
65529+ grsec_enable_socket_all = 1;
65530+ grsec_socket_all_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_ALL_GID);
65531+#endif
65532+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
65533+ grsec_enable_socket_client = 1;
65534+ grsec_socket_client_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_CLIENT_GID);
65535+#endif
65536+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
65537+ grsec_enable_socket_server = 1;
65538+ grsec_socket_server_gid = KGIDT_INIT(CONFIG_GRKERNSEC_SOCKET_SERVER_GID);
65539+#endif
65540+#endif
65541+
65542+ return;
65543+}
65544diff --git a/grsecurity/grsec_link.c b/grsecurity/grsec_link.c
65545new file mode 100644
65546index 0000000..5e05e20
65547--- /dev/null
65548+++ b/grsecurity/grsec_link.c
65549@@ -0,0 +1,58 @@
65550+#include <linux/kernel.h>
65551+#include <linux/sched.h>
65552+#include <linux/fs.h>
65553+#include <linux/file.h>
65554+#include <linux/grinternal.h>
65555+
65556+int gr_handle_symlink_owner(const struct path *link, const struct inode *target)
65557+{
65558+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
65559+ const struct inode *link_inode = link->dentry->d_inode;
65560+
65561+ if (grsec_enable_symlinkown && in_group_p(grsec_symlinkown_gid) &&
65562+ /* ignore root-owned links, e.g. /proc/self */
65563+ gr_is_global_nonroot(link_inode->i_uid) && target &&
65564+ !uid_eq(link_inode->i_uid, target->i_uid)) {
65565+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINKOWNER_MSG, link->dentry, link->mnt, link_inode->i_uid, target->i_uid);
65566+ return 1;
65567+ }
65568+#endif
65569+ return 0;
65570+}
65571+
65572+int
65573+gr_handle_follow_link(const struct inode *parent,
65574+ const struct inode *inode,
65575+ const struct dentry *dentry, const struct vfsmount *mnt)
65576+{
65577+#ifdef CONFIG_GRKERNSEC_LINK
65578+ const struct cred *cred = current_cred();
65579+
65580+ if (grsec_enable_link && S_ISLNK(inode->i_mode) &&
65581+ (parent->i_mode & S_ISVTX) && !uid_eq(parent->i_uid, inode->i_uid) &&
65582+ (parent->i_mode & S_IWOTH) && !uid_eq(cred->fsuid, inode->i_uid)) {
65583+ gr_log_fs_int2(GR_DONT_AUDIT, GR_SYMLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid);
65584+ return -EACCES;
65585+ }
65586+#endif
65587+ return 0;
65588+}
65589+
65590+int
65591+gr_handle_hardlink(const struct dentry *dentry,
65592+ const struct vfsmount *mnt,
65593+ struct inode *inode, const int mode, const struct filename *to)
65594+{
65595+#ifdef CONFIG_GRKERNSEC_LINK
65596+ const struct cred *cred = current_cred();
65597+
65598+ if (grsec_enable_link && !uid_eq(cred->fsuid, inode->i_uid) &&
65599+ (!S_ISREG(mode) || is_privileged_binary(dentry) ||
65600+ (inode_permission(inode, MAY_READ | MAY_WRITE))) &&
65601+ !capable(CAP_FOWNER) && gr_is_global_nonroot(cred->uid)) {
65602+ gr_log_fs_int2_str(GR_DONT_AUDIT, GR_HARDLINK_MSG, dentry, mnt, inode->i_uid, inode->i_gid, to->name);
65603+ return -EPERM;
65604+ }
65605+#endif
65606+ return 0;
65607+}
65608diff --git a/grsecurity/grsec_log.c b/grsecurity/grsec_log.c
65609new file mode 100644
65610index 0000000..7c06085
65611--- /dev/null
65612+++ b/grsecurity/grsec_log.c
65613@@ -0,0 +1,326 @@
65614+#include <linux/kernel.h>
65615+#include <linux/sched.h>
65616+#include <linux/file.h>
65617+#include <linux/tty.h>
65618+#include <linux/fs.h>
65619+#include <linux/grinternal.h>
65620+
65621+#ifdef CONFIG_TREE_PREEMPT_RCU
65622+#define DISABLE_PREEMPT() preempt_disable()
65623+#define ENABLE_PREEMPT() preempt_enable()
65624+#else
65625+#define DISABLE_PREEMPT()
65626+#define ENABLE_PREEMPT()
65627+#endif
65628+
65629+#define BEGIN_LOCKS(x) \
65630+ DISABLE_PREEMPT(); \
65631+ rcu_read_lock(); \
65632+ read_lock(&tasklist_lock); \
65633+ read_lock(&grsec_exec_file_lock); \
65634+ if (x != GR_DO_AUDIT) \
65635+ spin_lock(&grsec_alert_lock); \
65636+ else \
65637+ spin_lock(&grsec_audit_lock)
65638+
65639+#define END_LOCKS(x) \
65640+ if (x != GR_DO_AUDIT) \
65641+ spin_unlock(&grsec_alert_lock); \
65642+ else \
65643+ spin_unlock(&grsec_audit_lock); \
65644+ read_unlock(&grsec_exec_file_lock); \
65645+ read_unlock(&tasklist_lock); \
65646+ rcu_read_unlock(); \
65647+ ENABLE_PREEMPT(); \
65648+ if (x == GR_DONT_AUDIT) \
65649+ gr_handle_alertkill(current)
65650+
65651+enum {
65652+ FLOODING,
65653+ NO_FLOODING
65654+};
65655+
65656+extern char *gr_alert_log_fmt;
65657+extern char *gr_audit_log_fmt;
65658+extern char *gr_alert_log_buf;
65659+extern char *gr_audit_log_buf;
65660+
65661+static int gr_log_start(int audit)
65662+{
65663+ char *loglevel = (audit == GR_DO_AUDIT) ? KERN_INFO : KERN_ALERT;
65664+ char *fmt = (audit == GR_DO_AUDIT) ? gr_audit_log_fmt : gr_alert_log_fmt;
65665+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65666+#if (CONFIG_GRKERNSEC_FLOODTIME > 0 && CONFIG_GRKERNSEC_FLOODBURST > 0)
65667+ unsigned long curr_secs = get_seconds();
65668+
65669+ if (audit == GR_DO_AUDIT)
65670+ goto set_fmt;
65671+
65672+ if (!grsec_alert_wtime || time_after(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)) {
65673+ grsec_alert_wtime = curr_secs;
65674+ grsec_alert_fyet = 0;
65675+ } else if (time_before_eq(curr_secs, grsec_alert_wtime + CONFIG_GRKERNSEC_FLOODTIME)
65676+ && (grsec_alert_fyet < CONFIG_GRKERNSEC_FLOODBURST)) {
65677+ grsec_alert_fyet++;
65678+ } else if (grsec_alert_fyet == CONFIG_GRKERNSEC_FLOODBURST) {
65679+ grsec_alert_wtime = curr_secs;
65680+ grsec_alert_fyet++;
65681+ printk(KERN_ALERT "grsec: more alerts, logging disabled for %d seconds\n", CONFIG_GRKERNSEC_FLOODTIME);
65682+ return FLOODING;
65683+ }
65684+ else return FLOODING;
65685+
65686+set_fmt:
65687+#endif
65688+ memset(buf, 0, PAGE_SIZE);
65689+ if (current->signal->curr_ip && gr_acl_is_enabled()) {
65690+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: (%.64s:%c:%.950s) ");
65691+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65692+ } else if (current->signal->curr_ip) {
65693+ sprintf(fmt, "%s%s", loglevel, "grsec: From %pI4: ");
65694+ snprintf(buf, PAGE_SIZE - 1, fmt, &current->signal->curr_ip);
65695+ } else if (gr_acl_is_enabled()) {
65696+ sprintf(fmt, "%s%s", loglevel, "grsec: (%.64s:%c:%.950s) ");
65697+ snprintf(buf, PAGE_SIZE - 1, fmt, current->role->rolename, gr_roletype_to_char(), current->acl->filename);
65698+ } else {
65699+ sprintf(fmt, "%s%s", loglevel, "grsec: ");
65700+ strcpy(buf, fmt);
65701+ }
65702+
65703+ return NO_FLOODING;
65704+}
65705+
65706+static void gr_log_middle(int audit, const char *msg, va_list ap)
65707+ __attribute__ ((format (printf, 2, 0)));
65708+
65709+static void gr_log_middle(int audit, const char *msg, va_list ap)
65710+{
65711+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65712+ unsigned int len = strlen(buf);
65713+
65714+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65715+
65716+ return;
65717+}
65718+
65719+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65720+ __attribute__ ((format (printf, 2, 3)));
65721+
65722+static void gr_log_middle_varargs(int audit, const char *msg, ...)
65723+{
65724+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65725+ unsigned int len = strlen(buf);
65726+ va_list ap;
65727+
65728+ va_start(ap, msg);
65729+ vsnprintf(buf + len, PAGE_SIZE - len - 1, msg, ap);
65730+ va_end(ap);
65731+
65732+ return;
65733+}
65734+
65735+static void gr_log_end(int audit, int append_default)
65736+{
65737+ char *buf = (audit == GR_DO_AUDIT) ? gr_audit_log_buf : gr_alert_log_buf;
65738+ if (append_default) {
65739+ struct task_struct *task = current;
65740+ struct task_struct *parent = task->real_parent;
65741+ const struct cred *cred = __task_cred(task);
65742+ const struct cred *pcred = __task_cred(parent);
65743+ unsigned int len = strlen(buf);
65744+
65745+ snprintf(buf + len, PAGE_SIZE - len - 1, DEFAULTSECMSG, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65746+ }
65747+
65748+ printk("%s\n", buf);
65749+
65750+ return;
65751+}
65752+
65753+void gr_log_varargs(int audit, const char *msg, int argtypes, ...)
65754+{
65755+ int logtype;
65756+ char *result = (audit == GR_DO_AUDIT) ? "successful" : "denied";
65757+ char *str1 = NULL, *str2 = NULL, *str3 = NULL;
65758+ void *voidptr = NULL;
65759+ int num1 = 0, num2 = 0;
65760+ unsigned long ulong1 = 0, ulong2 = 0;
65761+ struct dentry *dentry = NULL;
65762+ struct vfsmount *mnt = NULL;
65763+ struct file *file = NULL;
65764+ struct task_struct *task = NULL;
65765+ const struct cred *cred, *pcred;
65766+ va_list ap;
65767+
65768+ BEGIN_LOCKS(audit);
65769+ logtype = gr_log_start(audit);
65770+ if (logtype == FLOODING) {
65771+ END_LOCKS(audit);
65772+ return;
65773+ }
65774+ va_start(ap, argtypes);
65775+ switch (argtypes) {
65776+ case GR_TTYSNIFF:
65777+ task = va_arg(ap, struct task_struct *);
65778+ gr_log_middle_varargs(audit, msg, &task->signal->curr_ip, gr_task_fullpath0(task), task->comm, task_pid_nr(task), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent));
65779+ break;
65780+ case GR_SYSCTL_HIDDEN:
65781+ str1 = va_arg(ap, char *);
65782+ gr_log_middle_varargs(audit, msg, result, str1);
65783+ break;
65784+ case GR_RBAC:
65785+ dentry = va_arg(ap, struct dentry *);
65786+ mnt = va_arg(ap, struct vfsmount *);
65787+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt));
65788+ break;
65789+ case GR_RBAC_STR:
65790+ dentry = va_arg(ap, struct dentry *);
65791+ mnt = va_arg(ap, struct vfsmount *);
65792+ str1 = va_arg(ap, char *);
65793+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1);
65794+ break;
65795+ case GR_STR_RBAC:
65796+ str1 = va_arg(ap, char *);
65797+ dentry = va_arg(ap, struct dentry *);
65798+ mnt = va_arg(ap, struct vfsmount *);
65799+ gr_log_middle_varargs(audit, msg, result, str1, gr_to_filename(dentry, mnt));
65800+ break;
65801+ case GR_RBAC_MODE2:
65802+ dentry = va_arg(ap, struct dentry *);
65803+ mnt = va_arg(ap, struct vfsmount *);
65804+ str1 = va_arg(ap, char *);
65805+ str2 = va_arg(ap, char *);
65806+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2);
65807+ break;
65808+ case GR_RBAC_MODE3:
65809+ dentry = va_arg(ap, struct dentry *);
65810+ mnt = va_arg(ap, struct vfsmount *);
65811+ str1 = va_arg(ap, char *);
65812+ str2 = va_arg(ap, char *);
65813+ str3 = va_arg(ap, char *);
65814+ gr_log_middle_varargs(audit, msg, result, gr_to_filename(dentry, mnt), str1, str2, str3);
65815+ break;
65816+ case GR_FILENAME:
65817+ dentry = va_arg(ap, struct dentry *);
65818+ mnt = va_arg(ap, struct vfsmount *);
65819+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt));
65820+ break;
65821+ case GR_STR_FILENAME:
65822+ str1 = va_arg(ap, char *);
65823+ dentry = va_arg(ap, struct dentry *);
65824+ mnt = va_arg(ap, struct vfsmount *);
65825+ gr_log_middle_varargs(audit, msg, str1, gr_to_filename(dentry, mnt));
65826+ break;
65827+ case GR_FILENAME_STR:
65828+ dentry = va_arg(ap, struct dentry *);
65829+ mnt = va_arg(ap, struct vfsmount *);
65830+ str1 = va_arg(ap, char *);
65831+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), str1);
65832+ break;
65833+ case GR_FILENAME_TWO_INT:
65834+ dentry = va_arg(ap, struct dentry *);
65835+ mnt = va_arg(ap, struct vfsmount *);
65836+ num1 = va_arg(ap, int);
65837+ num2 = va_arg(ap, int);
65838+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2);
65839+ break;
65840+ case GR_FILENAME_TWO_INT_STR:
65841+ dentry = va_arg(ap, struct dentry *);
65842+ mnt = va_arg(ap, struct vfsmount *);
65843+ num1 = va_arg(ap, int);
65844+ num2 = va_arg(ap, int);
65845+ str1 = va_arg(ap, char *);
65846+ gr_log_middle_varargs(audit, msg, gr_to_filename(dentry, mnt), num1, num2, str1);
65847+ break;
65848+ case GR_TEXTREL:
65849+ file = va_arg(ap, struct file *);
65850+ ulong1 = va_arg(ap, unsigned long);
65851+ ulong2 = va_arg(ap, unsigned long);
65852+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>", ulong1, ulong2);
65853+ break;
65854+ case GR_PTRACE:
65855+ task = va_arg(ap, struct task_struct *);
65856+ gr_log_middle_varargs(audit, msg, task->exec_file ? gr_to_filename(task->exec_file->f_path.dentry, task->exec_file->f_path.mnt) : "(none)", task->comm, task_pid_nr(task));
65857+ break;
65858+ case GR_RESOURCE:
65859+ task = va_arg(ap, struct task_struct *);
65860+ cred = __task_cred(task);
65861+ pcred = __task_cred(task->real_parent);
65862+ ulong1 = va_arg(ap, unsigned long);
65863+ str1 = va_arg(ap, char *);
65864+ ulong2 = va_arg(ap, unsigned long);
65865+ gr_log_middle_varargs(audit, msg, ulong1, str1, ulong2, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65866+ break;
65867+ case GR_CAP:
65868+ task = va_arg(ap, struct task_struct *);
65869+ cred = __task_cred(task);
65870+ pcred = __task_cred(task->real_parent);
65871+ str1 = va_arg(ap, char *);
65872+ gr_log_middle_varargs(audit, msg, str1, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65873+ break;
65874+ case GR_SIG:
65875+ str1 = va_arg(ap, char *);
65876+ voidptr = va_arg(ap, void *);
65877+ gr_log_middle_varargs(audit, msg, str1, voidptr);
65878+ break;
65879+ case GR_SIG2:
65880+ task = va_arg(ap, struct task_struct *);
65881+ cred = __task_cred(task);
65882+ pcred = __task_cred(task->real_parent);
65883+ num1 = va_arg(ap, int);
65884+ gr_log_middle_varargs(audit, msg, num1, gr_task_fullpath0(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath0(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65885+ break;
65886+ case GR_CRASH1:
65887+ task = va_arg(ap, struct task_struct *);
65888+ cred = __task_cred(task);
65889+ pcred = __task_cred(task->real_parent);
65890+ ulong1 = va_arg(ap, unsigned long);
65891+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), GR_GLOBAL_UID(cred->uid), ulong1);
65892+ break;
65893+ case GR_CRASH2:
65894+ task = va_arg(ap, struct task_struct *);
65895+ cred = __task_cred(task);
65896+ pcred = __task_cred(task->real_parent);
65897+ ulong1 = va_arg(ap, unsigned long);
65898+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid), ulong1);
65899+ break;
65900+ case GR_RWXMAP:
65901+ file = va_arg(ap, struct file *);
65902+ gr_log_middle_varargs(audit, msg, file ? gr_to_filename(file->f_path.dentry, file->f_path.mnt) : "<anonymous mapping>");
65903+ break;
65904+ case GR_PSACCT:
65905+ {
65906+ unsigned int wday, cday;
65907+ __u8 whr, chr;
65908+ __u8 wmin, cmin;
65909+ __u8 wsec, csec;
65910+ char cur_tty[64] = { 0 };
65911+ char parent_tty[64] = { 0 };
65912+
65913+ task = va_arg(ap, struct task_struct *);
65914+ wday = va_arg(ap, unsigned int);
65915+ cday = va_arg(ap, unsigned int);
65916+ whr = va_arg(ap, int);
65917+ chr = va_arg(ap, int);
65918+ wmin = va_arg(ap, int);
65919+ cmin = va_arg(ap, int);
65920+ wsec = va_arg(ap, int);
65921+ csec = va_arg(ap, int);
65922+ ulong1 = va_arg(ap, unsigned long);
65923+ cred = __task_cred(task);
65924+ pcred = __task_cred(task->real_parent);
65925+
65926+ gr_log_middle_varargs(audit, msg, gr_task_fullpath(task), task->comm, task_pid_nr(task), &task->signal->curr_ip, tty_name(task->signal->tty, cur_tty), GR_GLOBAL_UID(cred->uid), GR_GLOBAL_UID(cred->euid), GR_GLOBAL_GID(cred->gid), GR_GLOBAL_GID(cred->egid), wday, whr, wmin, wsec, cday, chr, cmin, csec, (task->flags & PF_SIGNALED) ? "killed by signal" : "exited", ulong1, gr_parent_task_fullpath(task), task->real_parent->comm, task_pid_nr(task->real_parent), &task->real_parent->signal->curr_ip, tty_name(task->real_parent->signal->tty, parent_tty), GR_GLOBAL_UID(pcred->uid), GR_GLOBAL_UID(pcred->euid), GR_GLOBAL_GID(pcred->gid), GR_GLOBAL_GID(pcred->egid));
65927+ }
65928+ break;
65929+ default:
65930+ gr_log_middle(audit, msg, ap);
65931+ }
65932+ va_end(ap);
65933+ // these don't need DEFAULTSECARGS printed on the end
65934+ if (argtypes == GR_CRASH1 || argtypes == GR_CRASH2)
65935+ gr_log_end(audit, 0);
65936+ else
65937+ gr_log_end(audit, 1);
65938+ END_LOCKS(audit);
65939+}
65940diff --git a/grsecurity/grsec_mem.c b/grsecurity/grsec_mem.c
65941new file mode 100644
65942index 0000000..f536303
65943--- /dev/null
65944+++ b/grsecurity/grsec_mem.c
65945@@ -0,0 +1,40 @@
65946+#include <linux/kernel.h>
65947+#include <linux/sched.h>
65948+#include <linux/mm.h>
65949+#include <linux/mman.h>
65950+#include <linux/grinternal.h>
65951+
65952+void
65953+gr_handle_ioperm(void)
65954+{
65955+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPERM_MSG);
65956+ return;
65957+}
65958+
65959+void
65960+gr_handle_iopl(void)
65961+{
65962+ gr_log_noargs(GR_DONT_AUDIT, GR_IOPL_MSG);
65963+ return;
65964+}
65965+
65966+void
65967+gr_handle_mem_readwrite(u64 from, u64 to)
65968+{
65969+ gr_log_two_u64(GR_DONT_AUDIT, GR_MEM_READWRITE_MSG, from, to);
65970+ return;
65971+}
65972+
65973+void
65974+gr_handle_vm86(void)
65975+{
65976+ gr_log_noargs(GR_DONT_AUDIT, GR_VM86_MSG);
65977+ return;
65978+}
65979+
65980+void
65981+gr_log_badprocpid(const char *entry)
65982+{
65983+ gr_log_str(GR_DONT_AUDIT, GR_BADPROCPID_MSG, entry);
65984+ return;
65985+}
65986diff --git a/grsecurity/grsec_mount.c b/grsecurity/grsec_mount.c
65987new file mode 100644
65988index 0000000..2131422
65989--- /dev/null
65990+++ b/grsecurity/grsec_mount.c
65991@@ -0,0 +1,62 @@
65992+#include <linux/kernel.h>
65993+#include <linux/sched.h>
65994+#include <linux/mount.h>
65995+#include <linux/grsecurity.h>
65996+#include <linux/grinternal.h>
65997+
65998+void
65999+gr_log_remount(const char *devname, const int retval)
66000+{
66001+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66002+ if (grsec_enable_mount && (retval >= 0))
66003+ gr_log_str(GR_DO_AUDIT, GR_REMOUNT_AUDIT_MSG, devname ? devname : "none");
66004+#endif
66005+ return;
66006+}
66007+
66008+void
66009+gr_log_unmount(const char *devname, const int retval)
66010+{
66011+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66012+ if (grsec_enable_mount && (retval >= 0))
66013+ gr_log_str(GR_DO_AUDIT, GR_UNMOUNT_AUDIT_MSG, devname ? devname : "none");
66014+#endif
66015+ return;
66016+}
66017+
66018+void
66019+gr_log_mount(const char *from, const char *to, const int retval)
66020+{
66021+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
66022+ if (grsec_enable_mount && (retval >= 0))
66023+ gr_log_str_str(GR_DO_AUDIT, GR_MOUNT_AUDIT_MSG, from ? from : "none", to);
66024+#endif
66025+ return;
66026+}
66027+
66028+int
66029+gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags)
66030+{
66031+#ifdef CONFIG_GRKERNSEC_ROFS
66032+ if (grsec_enable_rofs && !(mnt_flags & MNT_READONLY)) {
66033+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_MOUNT_MSG, dentry, mnt);
66034+ return -EPERM;
66035+ } else
66036+ return 0;
66037+#endif
66038+ return 0;
66039+}
66040+
66041+int
66042+gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode)
66043+{
66044+#ifdef CONFIG_GRKERNSEC_ROFS
66045+ if (grsec_enable_rofs && (acc_mode & MAY_WRITE) &&
66046+ dentry->d_inode && S_ISBLK(dentry->d_inode->i_mode)) {
66047+ gr_log_fs_generic(GR_DO_AUDIT, GR_ROFS_BLOCKWRITE_MSG, dentry, mnt);
66048+ return -EPERM;
66049+ } else
66050+ return 0;
66051+#endif
66052+ return 0;
66053+}
66054diff --git a/grsecurity/grsec_pax.c b/grsecurity/grsec_pax.c
66055new file mode 100644
66056index 0000000..a3b12a0
66057--- /dev/null
66058+++ b/grsecurity/grsec_pax.c
66059@@ -0,0 +1,36 @@
66060+#include <linux/kernel.h>
66061+#include <linux/sched.h>
66062+#include <linux/mm.h>
66063+#include <linux/file.h>
66064+#include <linux/grinternal.h>
66065+#include <linux/grsecurity.h>
66066+
66067+void
66068+gr_log_textrel(struct vm_area_struct * vma)
66069+{
66070+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
66071+ if (grsec_enable_audit_textrel)
66072+ gr_log_textrel_ulong_ulong(GR_DO_AUDIT, GR_TEXTREL_AUDIT_MSG, vma->vm_file, vma->vm_start, vma->vm_pgoff);
66073+#endif
66074+ return;
66075+}
66076+
66077+void
66078+gr_log_rwxmmap(struct file *file)
66079+{
66080+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66081+ if (grsec_enable_log_rwxmaps)
66082+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMMAP_MSG, file);
66083+#endif
66084+ return;
66085+}
66086+
66087+void
66088+gr_log_rwxmprotect(struct file *file)
66089+{
66090+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66091+ if (grsec_enable_log_rwxmaps)
66092+ gr_log_rwxmap(GR_DONT_AUDIT, GR_RWXMPROTECT_MSG, file);
66093+#endif
66094+ return;
66095+}
66096diff --git a/grsecurity/grsec_ptrace.c b/grsecurity/grsec_ptrace.c
66097new file mode 100644
66098index 0000000..f7f29aa
66099--- /dev/null
66100+++ b/grsecurity/grsec_ptrace.c
66101@@ -0,0 +1,30 @@
66102+#include <linux/kernel.h>
66103+#include <linux/sched.h>
66104+#include <linux/grinternal.h>
66105+#include <linux/security.h>
66106+
66107+void
66108+gr_audit_ptrace(struct task_struct *task)
66109+{
66110+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
66111+ if (grsec_enable_audit_ptrace)
66112+ gr_log_ptrace(GR_DO_AUDIT, GR_PTRACE_AUDIT_MSG, task);
66113+#endif
66114+ return;
66115+}
66116+
66117+int
66118+gr_ptrace_readexec(struct file *file, int unsafe_flags)
66119+{
66120+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66121+ const struct dentry *dentry = file->f_path.dentry;
66122+ const struct vfsmount *mnt = file->f_path.mnt;
66123+
66124+ if (grsec_enable_ptrace_readexec && (unsafe_flags & LSM_UNSAFE_PTRACE) &&
66125+ (inode_permission(dentry->d_inode, MAY_READ) || !gr_acl_handle_open(dentry, mnt, MAY_READ))) {
66126+ gr_log_fs_generic(GR_DONT_AUDIT, GR_PTRACE_READEXEC_MSG, dentry, mnt);
66127+ return -EACCES;
66128+ }
66129+#endif
66130+ return 0;
66131+}
66132diff --git a/grsecurity/grsec_sig.c b/grsecurity/grsec_sig.c
66133new file mode 100644
66134index 0000000..4e29cc7
66135--- /dev/null
66136+++ b/grsecurity/grsec_sig.c
66137@@ -0,0 +1,246 @@
66138+#include <linux/kernel.h>
66139+#include <linux/sched.h>
66140+#include <linux/fs.h>
66141+#include <linux/delay.h>
66142+#include <linux/grsecurity.h>
66143+#include <linux/grinternal.h>
66144+#include <linux/hardirq.h>
66145+
66146+char *signames[] = {
66147+ [SIGSEGV] = "Segmentation fault",
66148+ [SIGILL] = "Illegal instruction",
66149+ [SIGABRT] = "Abort",
66150+ [SIGBUS] = "Invalid alignment/Bus error"
66151+};
66152+
66153+void
66154+gr_log_signal(const int sig, const void *addr, const struct task_struct *t)
66155+{
66156+#ifdef CONFIG_GRKERNSEC_SIGNAL
66157+ if (grsec_enable_signal && ((sig == SIGSEGV) || (sig == SIGILL) ||
66158+ (sig == SIGABRT) || (sig == SIGBUS))) {
66159+ if (task_pid_nr(t) == task_pid_nr(current)) {
66160+ gr_log_sig_addr(GR_DONT_AUDIT_GOOD, GR_UNISIGLOG_MSG, signames[sig], addr);
66161+ } else {
66162+ gr_log_sig_task(GR_DONT_AUDIT_GOOD, GR_DUALSIGLOG_MSG, t, sig);
66163+ }
66164+ }
66165+#endif
66166+ return;
66167+}
66168+
66169+int
66170+gr_handle_signal(const struct task_struct *p, const int sig)
66171+{
66172+#ifdef CONFIG_GRKERNSEC
66173+ /* ignore the 0 signal for protected task checks */
66174+ if (task_pid_nr(current) > 1 && sig && gr_check_protected_task(p)) {
66175+ gr_log_sig_task(GR_DONT_AUDIT, GR_SIG_ACL_MSG, p, sig);
66176+ return -EPERM;
66177+ } else if (gr_pid_is_chrooted((struct task_struct *)p)) {
66178+ return -EPERM;
66179+ }
66180+#endif
66181+ return 0;
66182+}
66183+
66184+#ifdef CONFIG_GRKERNSEC
66185+extern int specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t);
66186+
66187+int gr_fake_force_sig(int sig, struct task_struct *t)
66188+{
66189+ unsigned long int flags;
66190+ int ret, blocked, ignored;
66191+ struct k_sigaction *action;
66192+
66193+ spin_lock_irqsave(&t->sighand->siglock, flags);
66194+ action = &t->sighand->action[sig-1];
66195+ ignored = action->sa.sa_handler == SIG_IGN;
66196+ blocked = sigismember(&t->blocked, sig);
66197+ if (blocked || ignored) {
66198+ action->sa.sa_handler = SIG_DFL;
66199+ if (blocked) {
66200+ sigdelset(&t->blocked, sig);
66201+ recalc_sigpending_and_wake(t);
66202+ }
66203+ }
66204+ if (action->sa.sa_handler == SIG_DFL)
66205+ t->signal->flags &= ~SIGNAL_UNKILLABLE;
66206+ ret = specific_send_sig_info(sig, SEND_SIG_PRIV, t);
66207+
66208+ spin_unlock_irqrestore(&t->sighand->siglock, flags);
66209+
66210+ return ret;
66211+}
66212+#endif
66213+
66214+#ifdef CONFIG_GRKERNSEC_BRUTE
66215+#define GR_USER_BAN_TIME (15 * 60)
66216+#define GR_DAEMON_BRUTE_TIME (30 * 60)
66217+
66218+static int __get_dumpable(unsigned long mm_flags)
66219+{
66220+ int ret;
66221+
66222+ ret = mm_flags & MMF_DUMPABLE_MASK;
66223+ return (ret >= 2) ? 2 : ret;
66224+}
66225+#endif
66226+
66227+void gr_handle_brute_attach(unsigned long mm_flags)
66228+{
66229+#ifdef CONFIG_GRKERNSEC_BRUTE
66230+ struct task_struct *p = current;
66231+ kuid_t uid = GLOBAL_ROOT_UID;
66232+ int daemon = 0;
66233+
66234+ if (!grsec_enable_brute)
66235+ return;
66236+
66237+ rcu_read_lock();
66238+ read_lock(&tasklist_lock);
66239+ read_lock(&grsec_exec_file_lock);
66240+ if (p->real_parent && gr_is_same_file(p->real_parent->exec_file, p->exec_file)) {
66241+ p->real_parent->brute_expires = get_seconds() + GR_DAEMON_BRUTE_TIME;
66242+ p->real_parent->brute = 1;
66243+ daemon = 1;
66244+ } else {
66245+ const struct cred *cred = __task_cred(p), *cred2;
66246+ struct task_struct *tsk, *tsk2;
66247+
66248+ if (!__get_dumpable(mm_flags) && gr_is_global_nonroot(cred->uid)) {
66249+ struct user_struct *user;
66250+
66251+ uid = cred->uid;
66252+
66253+ /* this is put upon execution past expiration */
66254+ user = find_user(uid);
66255+ if (user == NULL)
66256+ goto unlock;
66257+ user->suid_banned = 1;
66258+ user->suid_ban_expires = get_seconds() + GR_USER_BAN_TIME;
66259+ if (user->suid_ban_expires == ~0UL)
66260+ user->suid_ban_expires--;
66261+
66262+ /* only kill other threads of the same binary, from the same user */
66263+ do_each_thread(tsk2, tsk) {
66264+ cred2 = __task_cred(tsk);
66265+ if (tsk != p && uid_eq(cred2->uid, uid) && gr_is_same_file(tsk->exec_file, p->exec_file))
66266+ gr_fake_force_sig(SIGKILL, tsk);
66267+ } while_each_thread(tsk2, tsk);
66268+ }
66269+ }
66270+unlock:
66271+ read_unlock(&grsec_exec_file_lock);
66272+ read_unlock(&tasklist_lock);
66273+ rcu_read_unlock();
66274+
66275+ if (gr_is_global_nonroot(uid))
66276+ gr_log_fs_int2(GR_DONT_AUDIT, GR_BRUTE_SUID_MSG, p->exec_file->f_path.dentry, p->exec_file->f_path.mnt, GR_GLOBAL_UID(uid), GR_USER_BAN_TIME / 60);
66277+ else if (daemon)
66278+ gr_log_noargs(GR_DONT_AUDIT, GR_BRUTE_DAEMON_MSG);
66279+
66280+#endif
66281+ return;
66282+}
66283+
66284+void gr_handle_brute_check(void)
66285+{
66286+#ifdef CONFIG_GRKERNSEC_BRUTE
66287+ struct task_struct *p = current;
66288+
66289+ if (unlikely(p->brute)) {
66290+ if (!grsec_enable_brute)
66291+ p->brute = 0;
66292+ else if (time_before(get_seconds(), p->brute_expires))
66293+ msleep(30 * 1000);
66294+ }
66295+#endif
66296+ return;
66297+}
66298+
66299+void gr_handle_kernel_exploit(void)
66300+{
66301+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66302+ const struct cred *cred;
66303+ struct task_struct *tsk, *tsk2;
66304+ struct user_struct *user;
66305+ kuid_t uid;
66306+
66307+ if (in_irq() || in_serving_softirq() || in_nmi())
66308+ panic("grsec: halting the system due to suspicious kernel crash caused in interrupt context");
66309+
66310+ uid = current_uid();
66311+
66312+ if (gr_is_global_root(uid))
66313+ panic("grsec: halting the system due to suspicious kernel crash caused by root");
66314+ else {
66315+ /* kill all the processes of this user, hold a reference
66316+ to their creds struct, and prevent them from creating
66317+ another process until system reset
66318+ */
66319+ printk(KERN_ALERT "grsec: banning user with uid %u until system restart for suspicious kernel crash\n",
66320+ GR_GLOBAL_UID(uid));
66321+ /* we intentionally leak this ref */
66322+ user = get_uid(current->cred->user);
66323+ if (user)
66324+ user->kernel_banned = 1;
66325+
66326+ /* kill all processes of this user */
66327+ read_lock(&tasklist_lock);
66328+ do_each_thread(tsk2, tsk) {
66329+ cred = __task_cred(tsk);
66330+ if (uid_eq(cred->uid, uid))
66331+ gr_fake_force_sig(SIGKILL, tsk);
66332+ } while_each_thread(tsk2, tsk);
66333+ read_unlock(&tasklist_lock);
66334+ }
66335+#endif
66336+}
66337+
66338+#ifdef CONFIG_GRKERNSEC_BRUTE
66339+static bool suid_ban_expired(struct user_struct *user)
66340+{
66341+ if (user->suid_ban_expires != ~0UL && time_after_eq(get_seconds(), user->suid_ban_expires)) {
66342+ user->suid_banned = 0;
66343+ user->suid_ban_expires = 0;
66344+ free_uid(user);
66345+ return true;
66346+ }
66347+
66348+ return false;
66349+}
66350+#endif
66351+
66352+int gr_process_kernel_exec_ban(void)
66353+{
66354+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66355+ if (unlikely(current->cred->user->kernel_banned))
66356+ return -EPERM;
66357+#endif
66358+ return 0;
66359+}
66360+
66361+int gr_process_kernel_setuid_ban(struct user_struct *user)
66362+{
66363+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
66364+ if (unlikely(user->kernel_banned))
66365+ gr_fake_force_sig(SIGKILL, current);
66366+#endif
66367+ return 0;
66368+}
66369+
66370+int gr_process_suid_exec_ban(const struct linux_binprm *bprm)
66371+{
66372+#ifdef CONFIG_GRKERNSEC_BRUTE
66373+ struct user_struct *user = current->cred->user;
66374+ if (unlikely(user->suid_banned)) {
66375+ if (suid_ban_expired(user))
66376+ return 0;
66377+ /* disallow execution of suid binaries only */
66378+ else if (!uid_eq(bprm->cred->euid, current->cred->uid))
66379+ return -EPERM;
66380+ }
66381+#endif
66382+ return 0;
66383+}
66384diff --git a/grsecurity/grsec_sock.c b/grsecurity/grsec_sock.c
66385new file mode 100644
66386index 0000000..4030d57
66387--- /dev/null
66388+++ b/grsecurity/grsec_sock.c
66389@@ -0,0 +1,244 @@
66390+#include <linux/kernel.h>
66391+#include <linux/module.h>
66392+#include <linux/sched.h>
66393+#include <linux/file.h>
66394+#include <linux/net.h>
66395+#include <linux/in.h>
66396+#include <linux/ip.h>
66397+#include <net/sock.h>
66398+#include <net/inet_sock.h>
66399+#include <linux/grsecurity.h>
66400+#include <linux/grinternal.h>
66401+#include <linux/gracl.h>
66402+
66403+extern int gr_search_udp_recvmsg(const struct sock *sk, const struct sk_buff *skb);
66404+extern int gr_search_udp_sendmsg(const struct sock *sk, const struct sockaddr_in *addr);
66405+
66406+EXPORT_SYMBOL(gr_search_udp_recvmsg);
66407+EXPORT_SYMBOL(gr_search_udp_sendmsg);
66408+
66409+#ifdef CONFIG_UNIX_MODULE
66410+EXPORT_SYMBOL(gr_acl_handle_unix);
66411+EXPORT_SYMBOL(gr_acl_handle_mknod);
66412+EXPORT_SYMBOL(gr_handle_chroot_unix);
66413+EXPORT_SYMBOL(gr_handle_create);
66414+#endif
66415+
66416+#ifdef CONFIG_GRKERNSEC
66417+#define gr_conn_table_size 32749
66418+struct conn_table_entry {
66419+ struct conn_table_entry *next;
66420+ struct signal_struct *sig;
66421+};
66422+
66423+struct conn_table_entry *gr_conn_table[gr_conn_table_size];
66424+DEFINE_SPINLOCK(gr_conn_table_lock);
66425+
66426+extern const char * gr_socktype_to_name(unsigned char type);
66427+extern const char * gr_proto_to_name(unsigned char proto);
66428+extern const char * gr_sockfamily_to_name(unsigned char family);
66429+
66430+static __inline__ int
66431+conn_hash(__u32 saddr, __u32 daddr, __u16 sport, __u16 dport, unsigned int size)
66432+{
66433+ return ((daddr + saddr + (sport << 8) + (dport << 16)) % size);
66434+}
66435+
66436+static __inline__ int
66437+conn_match(const struct signal_struct *sig, __u32 saddr, __u32 daddr,
66438+ __u16 sport, __u16 dport)
66439+{
66440+ if (unlikely(sig->gr_saddr == saddr && sig->gr_daddr == daddr &&
66441+ sig->gr_sport == sport && sig->gr_dport == dport))
66442+ return 1;
66443+ else
66444+ return 0;
66445+}
66446+
66447+static void gr_add_to_task_ip_table_nolock(struct signal_struct *sig, struct conn_table_entry *newent)
66448+{
66449+ struct conn_table_entry **match;
66450+ unsigned int index;
66451+
66452+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66453+ sig->gr_sport, sig->gr_dport,
66454+ gr_conn_table_size);
66455+
66456+ newent->sig = sig;
66457+
66458+ match = &gr_conn_table[index];
66459+ newent->next = *match;
66460+ *match = newent;
66461+
66462+ return;
66463+}
66464+
66465+static void gr_del_task_from_ip_table_nolock(struct signal_struct *sig)
66466+{
66467+ struct conn_table_entry *match, *last = NULL;
66468+ unsigned int index;
66469+
66470+ index = conn_hash(sig->gr_saddr, sig->gr_daddr,
66471+ sig->gr_sport, sig->gr_dport,
66472+ gr_conn_table_size);
66473+
66474+ match = gr_conn_table[index];
66475+ while (match && !conn_match(match->sig,
66476+ sig->gr_saddr, sig->gr_daddr, sig->gr_sport,
66477+ sig->gr_dport)) {
66478+ last = match;
66479+ match = match->next;
66480+ }
66481+
66482+ if (match) {
66483+ if (last)
66484+ last->next = match->next;
66485+ else
66486+ gr_conn_table[index] = NULL;
66487+ kfree(match);
66488+ }
66489+
66490+ return;
66491+}
66492+
66493+static struct signal_struct * gr_lookup_task_ip_table(__u32 saddr, __u32 daddr,
66494+ __u16 sport, __u16 dport)
66495+{
66496+ struct conn_table_entry *match;
66497+ unsigned int index;
66498+
66499+ index = conn_hash(saddr, daddr, sport, dport, gr_conn_table_size);
66500+
66501+ match = gr_conn_table[index];
66502+ while (match && !conn_match(match->sig, saddr, daddr, sport, dport))
66503+ match = match->next;
66504+
66505+ if (match)
66506+ return match->sig;
66507+ else
66508+ return NULL;
66509+}
66510+
66511+#endif
66512+
66513+void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet)
66514+{
66515+#ifdef CONFIG_GRKERNSEC
66516+ struct signal_struct *sig = task->signal;
66517+ struct conn_table_entry *newent;
66518+
66519+ newent = kmalloc(sizeof(struct conn_table_entry), GFP_ATOMIC);
66520+ if (newent == NULL)
66521+ return;
66522+ /* no bh lock needed since we are called with bh disabled */
66523+ spin_lock(&gr_conn_table_lock);
66524+ gr_del_task_from_ip_table_nolock(sig);
66525+ sig->gr_saddr = inet->inet_rcv_saddr;
66526+ sig->gr_daddr = inet->inet_daddr;
66527+ sig->gr_sport = inet->inet_sport;
66528+ sig->gr_dport = inet->inet_dport;
66529+ gr_add_to_task_ip_table_nolock(sig, newent);
66530+ spin_unlock(&gr_conn_table_lock);
66531+#endif
66532+ return;
66533+}
66534+
66535+void gr_del_task_from_ip_table(struct task_struct *task)
66536+{
66537+#ifdef CONFIG_GRKERNSEC
66538+ spin_lock_bh(&gr_conn_table_lock);
66539+ gr_del_task_from_ip_table_nolock(task->signal);
66540+ spin_unlock_bh(&gr_conn_table_lock);
66541+#endif
66542+ return;
66543+}
66544+
66545+void
66546+gr_attach_curr_ip(const struct sock *sk)
66547+{
66548+#ifdef CONFIG_GRKERNSEC
66549+ struct signal_struct *p, *set;
66550+ const struct inet_sock *inet = inet_sk(sk);
66551+
66552+ if (unlikely(sk->sk_protocol != IPPROTO_TCP))
66553+ return;
66554+
66555+ set = current->signal;
66556+
66557+ spin_lock_bh(&gr_conn_table_lock);
66558+ p = gr_lookup_task_ip_table(inet->inet_daddr, inet->inet_rcv_saddr,
66559+ inet->inet_dport, inet->inet_sport);
66560+ if (unlikely(p != NULL)) {
66561+ set->curr_ip = p->curr_ip;
66562+ set->used_accept = 1;
66563+ gr_del_task_from_ip_table_nolock(p);
66564+ spin_unlock_bh(&gr_conn_table_lock);
66565+ return;
66566+ }
66567+ spin_unlock_bh(&gr_conn_table_lock);
66568+
66569+ set->curr_ip = inet->inet_daddr;
66570+ set->used_accept = 1;
66571+#endif
66572+ return;
66573+}
66574+
66575+int
66576+gr_handle_sock_all(const int family, const int type, const int protocol)
66577+{
66578+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66579+ if (grsec_enable_socket_all && in_group_p(grsec_socket_all_gid) &&
66580+ (family != AF_UNIX)) {
66581+ if (family == AF_INET)
66582+ gr_log_str3(GR_DONT_AUDIT, GR_SOCK_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), gr_proto_to_name(protocol));
66583+ else
66584+ gr_log_str2_int(GR_DONT_AUDIT, GR_SOCK_NOINET_MSG, gr_sockfamily_to_name(family), gr_socktype_to_name(type), protocol);
66585+ return -EACCES;
66586+ }
66587+#endif
66588+ return 0;
66589+}
66590+
66591+int
66592+gr_handle_sock_server(const struct sockaddr *sck)
66593+{
66594+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66595+ if (grsec_enable_socket_server &&
66596+ in_group_p(grsec_socket_server_gid) &&
66597+ sck && (sck->sa_family != AF_UNIX) &&
66598+ (sck->sa_family != AF_LOCAL)) {
66599+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66600+ return -EACCES;
66601+ }
66602+#endif
66603+ return 0;
66604+}
66605+
66606+int
66607+gr_handle_sock_server_other(const struct sock *sck)
66608+{
66609+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66610+ if (grsec_enable_socket_server &&
66611+ in_group_p(grsec_socket_server_gid) &&
66612+ sck && (sck->sk_family != AF_UNIX) &&
66613+ (sck->sk_family != AF_LOCAL)) {
66614+ gr_log_noargs(GR_DONT_AUDIT, GR_BIND_MSG);
66615+ return -EACCES;
66616+ }
66617+#endif
66618+ return 0;
66619+}
66620+
66621+int
66622+gr_handle_sock_client(const struct sockaddr *sck)
66623+{
66624+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66625+ if (grsec_enable_socket_client && in_group_p(grsec_socket_client_gid) &&
66626+ sck && (sck->sa_family != AF_UNIX) &&
66627+ (sck->sa_family != AF_LOCAL)) {
66628+ gr_log_noargs(GR_DONT_AUDIT, GR_CONNECT_MSG);
66629+ return -EACCES;
66630+ }
66631+#endif
66632+ return 0;
66633+}
66634diff --git a/grsecurity/grsec_sysctl.c b/grsecurity/grsec_sysctl.c
66635new file mode 100644
66636index 0000000..f55ef0f
66637--- /dev/null
66638+++ b/grsecurity/grsec_sysctl.c
66639@@ -0,0 +1,469 @@
66640+#include <linux/kernel.h>
66641+#include <linux/sched.h>
66642+#include <linux/sysctl.h>
66643+#include <linux/grsecurity.h>
66644+#include <linux/grinternal.h>
66645+
66646+int
66647+gr_handle_sysctl_mod(const char *dirname, const char *name, const int op)
66648+{
66649+#ifdef CONFIG_GRKERNSEC_SYSCTL
66650+ if (dirname == NULL || name == NULL)
66651+ return 0;
66652+ if (!strcmp(dirname, "grsecurity") && grsec_lock && (op & MAY_WRITE)) {
66653+ gr_log_str(GR_DONT_AUDIT, GR_SYSCTL_MSG, name);
66654+ return -EACCES;
66655+ }
66656+#endif
66657+ return 0;
66658+}
66659+
66660+#ifdef CONFIG_GRKERNSEC_ROFS
66661+static int __maybe_unused one = 1;
66662+#endif
66663+
66664+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
66665+struct ctl_table grsecurity_table[] = {
66666+#ifdef CONFIG_GRKERNSEC_SYSCTL
66667+#ifdef CONFIG_GRKERNSEC_SYSCTL_DISTRO
66668+#ifdef CONFIG_GRKERNSEC_IO
66669+ {
66670+ .procname = "disable_priv_io",
66671+ .data = &grsec_disable_privio,
66672+ .maxlen = sizeof(int),
66673+ .mode = 0600,
66674+ .proc_handler = &proc_dointvec,
66675+ },
66676+#endif
66677+#endif
66678+#ifdef CONFIG_GRKERNSEC_LINK
66679+ {
66680+ .procname = "linking_restrictions",
66681+ .data = &grsec_enable_link,
66682+ .maxlen = sizeof(int),
66683+ .mode = 0600,
66684+ .proc_handler = &proc_dointvec,
66685+ },
66686+#endif
66687+#ifdef CONFIG_GRKERNSEC_SYMLINKOWN
66688+ {
66689+ .procname = "enforce_symlinksifowner",
66690+ .data = &grsec_enable_symlinkown,
66691+ .maxlen = sizeof(int),
66692+ .mode = 0600,
66693+ .proc_handler = &proc_dointvec,
66694+ },
66695+ {
66696+ .procname = "symlinkown_gid",
66697+ .data = &grsec_symlinkown_gid,
66698+ .maxlen = sizeof(int),
66699+ .mode = 0600,
66700+ .proc_handler = &proc_dointvec,
66701+ },
66702+#endif
66703+#ifdef CONFIG_GRKERNSEC_BRUTE
66704+ {
66705+ .procname = "deter_bruteforce",
66706+ .data = &grsec_enable_brute,
66707+ .maxlen = sizeof(int),
66708+ .mode = 0600,
66709+ .proc_handler = &proc_dointvec,
66710+ },
66711+#endif
66712+#ifdef CONFIG_GRKERNSEC_FIFO
66713+ {
66714+ .procname = "fifo_restrictions",
66715+ .data = &grsec_enable_fifo,
66716+ .maxlen = sizeof(int),
66717+ .mode = 0600,
66718+ .proc_handler = &proc_dointvec,
66719+ },
66720+#endif
66721+#ifdef CONFIG_GRKERNSEC_PTRACE_READEXEC
66722+ {
66723+ .procname = "ptrace_readexec",
66724+ .data = &grsec_enable_ptrace_readexec,
66725+ .maxlen = sizeof(int),
66726+ .mode = 0600,
66727+ .proc_handler = &proc_dointvec,
66728+ },
66729+#endif
66730+#ifdef CONFIG_GRKERNSEC_SETXID
66731+ {
66732+ .procname = "consistent_setxid",
66733+ .data = &grsec_enable_setxid,
66734+ .maxlen = sizeof(int),
66735+ .mode = 0600,
66736+ .proc_handler = &proc_dointvec,
66737+ },
66738+#endif
66739+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
66740+ {
66741+ .procname = "ip_blackhole",
66742+ .data = &grsec_enable_blackhole,
66743+ .maxlen = sizeof(int),
66744+ .mode = 0600,
66745+ .proc_handler = &proc_dointvec,
66746+ },
66747+ {
66748+ .procname = "lastack_retries",
66749+ .data = &grsec_lastack_retries,
66750+ .maxlen = sizeof(int),
66751+ .mode = 0600,
66752+ .proc_handler = &proc_dointvec,
66753+ },
66754+#endif
66755+#ifdef CONFIG_GRKERNSEC_EXECLOG
66756+ {
66757+ .procname = "exec_logging",
66758+ .data = &grsec_enable_execlog,
66759+ .maxlen = sizeof(int),
66760+ .mode = 0600,
66761+ .proc_handler = &proc_dointvec,
66762+ },
66763+#endif
66764+#ifdef CONFIG_GRKERNSEC_RWXMAP_LOG
66765+ {
66766+ .procname = "rwxmap_logging",
66767+ .data = &grsec_enable_log_rwxmaps,
66768+ .maxlen = sizeof(int),
66769+ .mode = 0600,
66770+ .proc_handler = &proc_dointvec,
66771+ },
66772+#endif
66773+#ifdef CONFIG_GRKERNSEC_SIGNAL
66774+ {
66775+ .procname = "signal_logging",
66776+ .data = &grsec_enable_signal,
66777+ .maxlen = sizeof(int),
66778+ .mode = 0600,
66779+ .proc_handler = &proc_dointvec,
66780+ },
66781+#endif
66782+#ifdef CONFIG_GRKERNSEC_FORKFAIL
66783+ {
66784+ .procname = "forkfail_logging",
66785+ .data = &grsec_enable_forkfail,
66786+ .maxlen = sizeof(int),
66787+ .mode = 0600,
66788+ .proc_handler = &proc_dointvec,
66789+ },
66790+#endif
66791+#ifdef CONFIG_GRKERNSEC_TIME
66792+ {
66793+ .procname = "timechange_logging",
66794+ .data = &grsec_enable_time,
66795+ .maxlen = sizeof(int),
66796+ .mode = 0600,
66797+ .proc_handler = &proc_dointvec,
66798+ },
66799+#endif
66800+#ifdef CONFIG_GRKERNSEC_CHROOT_SHMAT
66801+ {
66802+ .procname = "chroot_deny_shmat",
66803+ .data = &grsec_enable_chroot_shmat,
66804+ .maxlen = sizeof(int),
66805+ .mode = 0600,
66806+ .proc_handler = &proc_dointvec,
66807+ },
66808+#endif
66809+#ifdef CONFIG_GRKERNSEC_CHROOT_UNIX
66810+ {
66811+ .procname = "chroot_deny_unix",
66812+ .data = &grsec_enable_chroot_unix,
66813+ .maxlen = sizeof(int),
66814+ .mode = 0600,
66815+ .proc_handler = &proc_dointvec,
66816+ },
66817+#endif
66818+#ifdef CONFIG_GRKERNSEC_CHROOT_MOUNT
66819+ {
66820+ .procname = "chroot_deny_mount",
66821+ .data = &grsec_enable_chroot_mount,
66822+ .maxlen = sizeof(int),
66823+ .mode = 0600,
66824+ .proc_handler = &proc_dointvec,
66825+ },
66826+#endif
66827+#ifdef CONFIG_GRKERNSEC_CHROOT_FCHDIR
66828+ {
66829+ .procname = "chroot_deny_fchdir",
66830+ .data = &grsec_enable_chroot_fchdir,
66831+ .maxlen = sizeof(int),
66832+ .mode = 0600,
66833+ .proc_handler = &proc_dointvec,
66834+ },
66835+#endif
66836+#ifdef CONFIG_GRKERNSEC_CHROOT_DOUBLE
66837+ {
66838+ .procname = "chroot_deny_chroot",
66839+ .data = &grsec_enable_chroot_double,
66840+ .maxlen = sizeof(int),
66841+ .mode = 0600,
66842+ .proc_handler = &proc_dointvec,
66843+ },
66844+#endif
66845+#ifdef CONFIG_GRKERNSEC_CHROOT_PIVOT
66846+ {
66847+ .procname = "chroot_deny_pivot",
66848+ .data = &grsec_enable_chroot_pivot,
66849+ .maxlen = sizeof(int),
66850+ .mode = 0600,
66851+ .proc_handler = &proc_dointvec,
66852+ },
66853+#endif
66854+#ifdef CONFIG_GRKERNSEC_CHROOT_CHDIR
66855+ {
66856+ .procname = "chroot_enforce_chdir",
66857+ .data = &grsec_enable_chroot_chdir,
66858+ .maxlen = sizeof(int),
66859+ .mode = 0600,
66860+ .proc_handler = &proc_dointvec,
66861+ },
66862+#endif
66863+#ifdef CONFIG_GRKERNSEC_CHROOT_CHMOD
66864+ {
66865+ .procname = "chroot_deny_chmod",
66866+ .data = &grsec_enable_chroot_chmod,
66867+ .maxlen = sizeof(int),
66868+ .mode = 0600,
66869+ .proc_handler = &proc_dointvec,
66870+ },
66871+#endif
66872+#ifdef CONFIG_GRKERNSEC_CHROOT_MKNOD
66873+ {
66874+ .procname = "chroot_deny_mknod",
66875+ .data = &grsec_enable_chroot_mknod,
66876+ .maxlen = sizeof(int),
66877+ .mode = 0600,
66878+ .proc_handler = &proc_dointvec,
66879+ },
66880+#endif
66881+#ifdef CONFIG_GRKERNSEC_CHROOT_NICE
66882+ {
66883+ .procname = "chroot_restrict_nice",
66884+ .data = &grsec_enable_chroot_nice,
66885+ .maxlen = sizeof(int),
66886+ .mode = 0600,
66887+ .proc_handler = &proc_dointvec,
66888+ },
66889+#endif
66890+#ifdef CONFIG_GRKERNSEC_CHROOT_EXECLOG
66891+ {
66892+ .procname = "chroot_execlog",
66893+ .data = &grsec_enable_chroot_execlog,
66894+ .maxlen = sizeof(int),
66895+ .mode = 0600,
66896+ .proc_handler = &proc_dointvec,
66897+ },
66898+#endif
66899+#ifdef CONFIG_GRKERNSEC_CHROOT_CAPS
66900+ {
66901+ .procname = "chroot_caps",
66902+ .data = &grsec_enable_chroot_caps,
66903+ .maxlen = sizeof(int),
66904+ .mode = 0600,
66905+ .proc_handler = &proc_dointvec,
66906+ },
66907+#endif
66908+#ifdef CONFIG_GRKERNSEC_CHROOT_SYSCTL
66909+ {
66910+ .procname = "chroot_deny_sysctl",
66911+ .data = &grsec_enable_chroot_sysctl,
66912+ .maxlen = sizeof(int),
66913+ .mode = 0600,
66914+ .proc_handler = &proc_dointvec,
66915+ },
66916+#endif
66917+#ifdef CONFIG_GRKERNSEC_TPE
66918+ {
66919+ .procname = "tpe",
66920+ .data = &grsec_enable_tpe,
66921+ .maxlen = sizeof(int),
66922+ .mode = 0600,
66923+ .proc_handler = &proc_dointvec,
66924+ },
66925+ {
66926+ .procname = "tpe_gid",
66927+ .data = &grsec_tpe_gid,
66928+ .maxlen = sizeof(int),
66929+ .mode = 0600,
66930+ .proc_handler = &proc_dointvec,
66931+ },
66932+#endif
66933+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
66934+ {
66935+ .procname = "tpe_invert",
66936+ .data = &grsec_enable_tpe_invert,
66937+ .maxlen = sizeof(int),
66938+ .mode = 0600,
66939+ .proc_handler = &proc_dointvec,
66940+ },
66941+#endif
66942+#ifdef CONFIG_GRKERNSEC_TPE_ALL
66943+ {
66944+ .procname = "tpe_restrict_all",
66945+ .data = &grsec_enable_tpe_all,
66946+ .maxlen = sizeof(int),
66947+ .mode = 0600,
66948+ .proc_handler = &proc_dointvec,
66949+ },
66950+#endif
66951+#ifdef CONFIG_GRKERNSEC_SOCKET_ALL
66952+ {
66953+ .procname = "socket_all",
66954+ .data = &grsec_enable_socket_all,
66955+ .maxlen = sizeof(int),
66956+ .mode = 0600,
66957+ .proc_handler = &proc_dointvec,
66958+ },
66959+ {
66960+ .procname = "socket_all_gid",
66961+ .data = &grsec_socket_all_gid,
66962+ .maxlen = sizeof(int),
66963+ .mode = 0600,
66964+ .proc_handler = &proc_dointvec,
66965+ },
66966+#endif
66967+#ifdef CONFIG_GRKERNSEC_SOCKET_CLIENT
66968+ {
66969+ .procname = "socket_client",
66970+ .data = &grsec_enable_socket_client,
66971+ .maxlen = sizeof(int),
66972+ .mode = 0600,
66973+ .proc_handler = &proc_dointvec,
66974+ },
66975+ {
66976+ .procname = "socket_client_gid",
66977+ .data = &grsec_socket_client_gid,
66978+ .maxlen = sizeof(int),
66979+ .mode = 0600,
66980+ .proc_handler = &proc_dointvec,
66981+ },
66982+#endif
66983+#ifdef CONFIG_GRKERNSEC_SOCKET_SERVER
66984+ {
66985+ .procname = "socket_server",
66986+ .data = &grsec_enable_socket_server,
66987+ .maxlen = sizeof(int),
66988+ .mode = 0600,
66989+ .proc_handler = &proc_dointvec,
66990+ },
66991+ {
66992+ .procname = "socket_server_gid",
66993+ .data = &grsec_socket_server_gid,
66994+ .maxlen = sizeof(int),
66995+ .mode = 0600,
66996+ .proc_handler = &proc_dointvec,
66997+ },
66998+#endif
66999+#ifdef CONFIG_GRKERNSEC_AUDIT_GROUP
67000+ {
67001+ .procname = "audit_group",
67002+ .data = &grsec_enable_group,
67003+ .maxlen = sizeof(int),
67004+ .mode = 0600,
67005+ .proc_handler = &proc_dointvec,
67006+ },
67007+ {
67008+ .procname = "audit_gid",
67009+ .data = &grsec_audit_gid,
67010+ .maxlen = sizeof(int),
67011+ .mode = 0600,
67012+ .proc_handler = &proc_dointvec,
67013+ },
67014+#endif
67015+#ifdef CONFIG_GRKERNSEC_AUDIT_CHDIR
67016+ {
67017+ .procname = "audit_chdir",
67018+ .data = &grsec_enable_chdir,
67019+ .maxlen = sizeof(int),
67020+ .mode = 0600,
67021+ .proc_handler = &proc_dointvec,
67022+ },
67023+#endif
67024+#ifdef CONFIG_GRKERNSEC_AUDIT_MOUNT
67025+ {
67026+ .procname = "audit_mount",
67027+ .data = &grsec_enable_mount,
67028+ .maxlen = sizeof(int),
67029+ .mode = 0600,
67030+ .proc_handler = &proc_dointvec,
67031+ },
67032+#endif
67033+#ifdef CONFIG_GRKERNSEC_AUDIT_TEXTREL
67034+ {
67035+ .procname = "audit_textrel",
67036+ .data = &grsec_enable_audit_textrel,
67037+ .maxlen = sizeof(int),
67038+ .mode = 0600,
67039+ .proc_handler = &proc_dointvec,
67040+ },
67041+#endif
67042+#ifdef CONFIG_GRKERNSEC_DMESG
67043+ {
67044+ .procname = "dmesg",
67045+ .data = &grsec_enable_dmesg,
67046+ .maxlen = sizeof(int),
67047+ .mode = 0600,
67048+ .proc_handler = &proc_dointvec,
67049+ },
67050+#endif
67051+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
67052+ {
67053+ .procname = "chroot_findtask",
67054+ .data = &grsec_enable_chroot_findtask,
67055+ .maxlen = sizeof(int),
67056+ .mode = 0600,
67057+ .proc_handler = &proc_dointvec,
67058+ },
67059+#endif
67060+#ifdef CONFIG_GRKERNSEC_RESLOG
67061+ {
67062+ .procname = "resource_logging",
67063+ .data = &grsec_resource_logging,
67064+ .maxlen = sizeof(int),
67065+ .mode = 0600,
67066+ .proc_handler = &proc_dointvec,
67067+ },
67068+#endif
67069+#ifdef CONFIG_GRKERNSEC_AUDIT_PTRACE
67070+ {
67071+ .procname = "audit_ptrace",
67072+ .data = &grsec_enable_audit_ptrace,
67073+ .maxlen = sizeof(int),
67074+ .mode = 0600,
67075+ .proc_handler = &proc_dointvec,
67076+ },
67077+#endif
67078+#ifdef CONFIG_GRKERNSEC_HARDEN_PTRACE
67079+ {
67080+ .procname = "harden_ptrace",
67081+ .data = &grsec_enable_harden_ptrace,
67082+ .maxlen = sizeof(int),
67083+ .mode = 0600,
67084+ .proc_handler = &proc_dointvec,
67085+ },
67086+#endif
67087+ {
67088+ .procname = "grsec_lock",
67089+ .data = &grsec_lock,
67090+ .maxlen = sizeof(int),
67091+ .mode = 0600,
67092+ .proc_handler = &proc_dointvec,
67093+ },
67094+#endif
67095+#ifdef CONFIG_GRKERNSEC_ROFS
67096+ {
67097+ .procname = "romount_protect",
67098+ .data = &grsec_enable_rofs,
67099+ .maxlen = sizeof(int),
67100+ .mode = 0600,
67101+ .proc_handler = &proc_dointvec_minmax,
67102+ .extra1 = &one,
67103+ .extra2 = &one,
67104+ },
67105+#endif
67106+ { }
67107+};
67108+#endif
67109diff --git a/grsecurity/grsec_time.c b/grsecurity/grsec_time.c
67110new file mode 100644
67111index 0000000..0dc13c3
67112--- /dev/null
67113+++ b/grsecurity/grsec_time.c
67114@@ -0,0 +1,16 @@
67115+#include <linux/kernel.h>
67116+#include <linux/sched.h>
67117+#include <linux/grinternal.h>
67118+#include <linux/module.h>
67119+
67120+void
67121+gr_log_timechange(void)
67122+{
67123+#ifdef CONFIG_GRKERNSEC_TIME
67124+ if (grsec_enable_time)
67125+ gr_log_noargs(GR_DONT_AUDIT_GOOD, GR_TIME_MSG);
67126+#endif
67127+ return;
67128+}
67129+
67130+EXPORT_SYMBOL(gr_log_timechange);
67131diff --git a/grsecurity/grsec_tpe.c b/grsecurity/grsec_tpe.c
67132new file mode 100644
67133index 0000000..ee57dcf
67134--- /dev/null
67135+++ b/grsecurity/grsec_tpe.c
67136@@ -0,0 +1,73 @@
67137+#include <linux/kernel.h>
67138+#include <linux/sched.h>
67139+#include <linux/file.h>
67140+#include <linux/fs.h>
67141+#include <linux/grinternal.h>
67142+
67143+extern int gr_acl_tpe_check(void);
67144+
67145+int
67146+gr_tpe_allow(const struct file *file)
67147+{
67148+#ifdef CONFIG_GRKERNSEC
67149+ struct inode *inode = file->f_path.dentry->d_parent->d_inode;
67150+ const struct cred *cred = current_cred();
67151+ char *msg = NULL;
67152+ char *msg2 = NULL;
67153+
67154+ // never restrict root
67155+ if (gr_is_global_root(cred->uid))
67156+ return 1;
67157+
67158+ if (grsec_enable_tpe) {
67159+#ifdef CONFIG_GRKERNSEC_TPE_INVERT
67160+ if (grsec_enable_tpe_invert && !in_group_p(grsec_tpe_gid))
67161+ msg = "not being in trusted group";
67162+ else if (!grsec_enable_tpe_invert && in_group_p(grsec_tpe_gid))
67163+ msg = "being in untrusted group";
67164+#else
67165+ if (in_group_p(grsec_tpe_gid))
67166+ msg = "being in untrusted group";
67167+#endif
67168+ }
67169+ if (!msg && gr_acl_tpe_check())
67170+ msg = "being in untrusted role";
67171+
67172+ // not in any affected group/role
67173+ if (!msg)
67174+ goto next_check;
67175+
67176+ if (gr_is_global_nonroot(inode->i_uid))
67177+ msg2 = "file in non-root-owned directory";
67178+ else if (inode->i_mode & S_IWOTH)
67179+ msg2 = "file in world-writable directory";
67180+ else if (inode->i_mode & S_IWGRP)
67181+ msg2 = "file in group-writable directory";
67182+
67183+ if (msg && msg2) {
67184+ char fullmsg[70] = {0};
67185+ snprintf(fullmsg, sizeof(fullmsg)-1, "%s and %s", msg, msg2);
67186+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, fullmsg, file->f_path.dentry, file->f_path.mnt);
67187+ return 0;
67188+ }
67189+ msg = NULL;
67190+next_check:
67191+#ifdef CONFIG_GRKERNSEC_TPE_ALL
67192+ if (!grsec_enable_tpe || !grsec_enable_tpe_all)
67193+ return 1;
67194+
67195+ if (gr_is_global_nonroot(inode->i_uid) && !uid_eq(inode->i_uid, cred->uid))
67196+ msg = "directory not owned by user";
67197+ else if (inode->i_mode & S_IWOTH)
67198+ msg = "file in world-writable directory";
67199+ else if (inode->i_mode & S_IWGRP)
67200+ msg = "file in group-writable directory";
67201+
67202+ if (msg) {
67203+ gr_log_str_fs(GR_DONT_AUDIT, GR_EXEC_TPE_MSG, msg, file->f_path.dentry, file->f_path.mnt);
67204+ return 0;
67205+ }
67206+#endif
67207+#endif
67208+ return 1;
67209+}
67210diff --git a/grsecurity/grsum.c b/grsecurity/grsum.c
67211new file mode 100644
67212index 0000000..9f7b1ac
67213--- /dev/null
67214+++ b/grsecurity/grsum.c
67215@@ -0,0 +1,61 @@
67216+#include <linux/err.h>
67217+#include <linux/kernel.h>
67218+#include <linux/sched.h>
67219+#include <linux/mm.h>
67220+#include <linux/scatterlist.h>
67221+#include <linux/crypto.h>
67222+#include <linux/gracl.h>
67223+
67224+
67225+#if !defined(CONFIG_CRYPTO) || defined(CONFIG_CRYPTO_MODULE) || !defined(CONFIG_CRYPTO_SHA256) || defined(CONFIG_CRYPTO_SHA256_MODULE)
67226+#error "crypto and sha256 must be built into the kernel"
67227+#endif
67228+
67229+int
67230+chkpw(struct gr_arg *entry, unsigned char *salt, unsigned char *sum)
67231+{
67232+ char *p;
67233+ struct crypto_hash *tfm;
67234+ struct hash_desc desc;
67235+ struct scatterlist sg;
67236+ unsigned char temp_sum[GR_SHA_LEN];
67237+ volatile int retval = 0;
67238+ volatile int dummy = 0;
67239+ unsigned int i;
67240+
67241+ sg_init_table(&sg, 1);
67242+
67243+ tfm = crypto_alloc_hash("sha256", 0, CRYPTO_ALG_ASYNC);
67244+ if (IS_ERR(tfm)) {
67245+ /* should never happen, since sha256 should be built in */
67246+ return 1;
67247+ }
67248+
67249+ desc.tfm = tfm;
67250+ desc.flags = 0;
67251+
67252+ crypto_hash_init(&desc);
67253+
67254+ p = salt;
67255+ sg_set_buf(&sg, p, GR_SALT_LEN);
67256+ crypto_hash_update(&desc, &sg, sg.length);
67257+
67258+ p = entry->pw;
67259+ sg_set_buf(&sg, p, strlen(p));
67260+
67261+ crypto_hash_update(&desc, &sg, sg.length);
67262+
67263+ crypto_hash_final(&desc, temp_sum);
67264+
67265+ memset(entry->pw, 0, GR_PW_LEN);
67266+
67267+ for (i = 0; i < GR_SHA_LEN; i++)
67268+ if (sum[i] != temp_sum[i])
67269+ retval = 1;
67270+ else
67271+ dummy = 1; // waste a cycle
67272+
67273+ crypto_free_hash(tfm);
67274+
67275+ return retval;
67276+}
67277diff --git a/include/asm-generic/4level-fixup.h b/include/asm-generic/4level-fixup.h
67278index 77ff547..181834f 100644
67279--- a/include/asm-generic/4level-fixup.h
67280+++ b/include/asm-generic/4level-fixup.h
67281@@ -13,8 +13,10 @@
67282 #define pmd_alloc(mm, pud, address) \
67283 ((unlikely(pgd_none(*(pud))) && __pmd_alloc(mm, pud, address))? \
67284 NULL: pmd_offset(pud, address))
67285+#define pmd_alloc_kernel(mm, pud, address) pmd_alloc((mm), (pud), (address))
67286
67287 #define pud_alloc(mm, pgd, address) (pgd)
67288+#define pud_alloc_kernel(mm, pgd, address) pud_alloc((mm), (pgd), (address))
67289 #define pud_offset(pgd, start) (pgd)
67290 #define pud_none(pud) 0
67291 #define pud_bad(pud) 0
67292diff --git a/include/asm-generic/atomic-long.h b/include/asm-generic/atomic-long.h
67293index b7babf0..04ad282 100644
67294--- a/include/asm-generic/atomic-long.h
67295+++ b/include/asm-generic/atomic-long.h
67296@@ -22,6 +22,12 @@
67297
67298 typedef atomic64_t atomic_long_t;
67299
67300+#ifdef CONFIG_PAX_REFCOUNT
67301+typedef atomic64_unchecked_t atomic_long_unchecked_t;
67302+#else
67303+typedef atomic64_t atomic_long_unchecked_t;
67304+#endif
67305+
67306 #define ATOMIC_LONG_INIT(i) ATOMIC64_INIT(i)
67307
67308 static inline long atomic_long_read(atomic_long_t *l)
67309@@ -31,6 +37,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67310 return (long)atomic64_read(v);
67311 }
67312
67313+#ifdef CONFIG_PAX_REFCOUNT
67314+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67315+{
67316+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67317+
67318+ return (long)atomic64_read_unchecked(v);
67319+}
67320+#endif
67321+
67322 static inline void atomic_long_set(atomic_long_t *l, long i)
67323 {
67324 atomic64_t *v = (atomic64_t *)l;
67325@@ -38,6 +53,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67326 atomic64_set(v, i);
67327 }
67328
67329+#ifdef CONFIG_PAX_REFCOUNT
67330+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67331+{
67332+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67333+
67334+ atomic64_set_unchecked(v, i);
67335+}
67336+#endif
67337+
67338 static inline void atomic_long_inc(atomic_long_t *l)
67339 {
67340 atomic64_t *v = (atomic64_t *)l;
67341@@ -45,6 +69,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67342 atomic64_inc(v);
67343 }
67344
67345+#ifdef CONFIG_PAX_REFCOUNT
67346+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67347+{
67348+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67349+
67350+ atomic64_inc_unchecked(v);
67351+}
67352+#endif
67353+
67354 static inline void atomic_long_dec(atomic_long_t *l)
67355 {
67356 atomic64_t *v = (atomic64_t *)l;
67357@@ -52,6 +85,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67358 atomic64_dec(v);
67359 }
67360
67361+#ifdef CONFIG_PAX_REFCOUNT
67362+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67363+{
67364+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67365+
67366+ atomic64_dec_unchecked(v);
67367+}
67368+#endif
67369+
67370 static inline void atomic_long_add(long i, atomic_long_t *l)
67371 {
67372 atomic64_t *v = (atomic64_t *)l;
67373@@ -59,6 +101,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67374 atomic64_add(i, v);
67375 }
67376
67377+#ifdef CONFIG_PAX_REFCOUNT
67378+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67379+{
67380+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67381+
67382+ atomic64_add_unchecked(i, v);
67383+}
67384+#endif
67385+
67386 static inline void atomic_long_sub(long i, atomic_long_t *l)
67387 {
67388 atomic64_t *v = (atomic64_t *)l;
67389@@ -66,6 +117,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67390 atomic64_sub(i, v);
67391 }
67392
67393+#ifdef CONFIG_PAX_REFCOUNT
67394+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67395+{
67396+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67397+
67398+ atomic64_sub_unchecked(i, v);
67399+}
67400+#endif
67401+
67402 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67403 {
67404 atomic64_t *v = (atomic64_t *)l;
67405@@ -101,6 +161,15 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67406 return (long)atomic64_add_return(i, v);
67407 }
67408
67409+#ifdef CONFIG_PAX_REFCOUNT
67410+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67411+{
67412+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67413+
67414+ return (long)atomic64_add_return_unchecked(i, v);
67415+}
67416+#endif
67417+
67418 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67419 {
67420 atomic64_t *v = (atomic64_t *)l;
67421@@ -115,6 +184,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67422 return (long)atomic64_inc_return(v);
67423 }
67424
67425+#ifdef CONFIG_PAX_REFCOUNT
67426+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67427+{
67428+ atomic64_unchecked_t *v = (atomic64_unchecked_t *)l;
67429+
67430+ return (long)atomic64_inc_return_unchecked(v);
67431+}
67432+#endif
67433+
67434 static inline long atomic_long_dec_return(atomic_long_t *l)
67435 {
67436 atomic64_t *v = (atomic64_t *)l;
67437@@ -140,6 +218,12 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67438
67439 typedef atomic_t atomic_long_t;
67440
67441+#ifdef CONFIG_PAX_REFCOUNT
67442+typedef atomic_unchecked_t atomic_long_unchecked_t;
67443+#else
67444+typedef atomic_t atomic_long_unchecked_t;
67445+#endif
67446+
67447 #define ATOMIC_LONG_INIT(i) ATOMIC_INIT(i)
67448 static inline long atomic_long_read(atomic_long_t *l)
67449 {
67450@@ -148,6 +232,15 @@ static inline long atomic_long_read(atomic_long_t *l)
67451 return (long)atomic_read(v);
67452 }
67453
67454+#ifdef CONFIG_PAX_REFCOUNT
67455+static inline long atomic_long_read_unchecked(atomic_long_unchecked_t *l)
67456+{
67457+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67458+
67459+ return (long)atomic_read_unchecked(v);
67460+}
67461+#endif
67462+
67463 static inline void atomic_long_set(atomic_long_t *l, long i)
67464 {
67465 atomic_t *v = (atomic_t *)l;
67466@@ -155,6 +248,15 @@ static inline void atomic_long_set(atomic_long_t *l, long i)
67467 atomic_set(v, i);
67468 }
67469
67470+#ifdef CONFIG_PAX_REFCOUNT
67471+static inline void atomic_long_set_unchecked(atomic_long_unchecked_t *l, long i)
67472+{
67473+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67474+
67475+ atomic_set_unchecked(v, i);
67476+}
67477+#endif
67478+
67479 static inline void atomic_long_inc(atomic_long_t *l)
67480 {
67481 atomic_t *v = (atomic_t *)l;
67482@@ -162,6 +264,15 @@ static inline void atomic_long_inc(atomic_long_t *l)
67483 atomic_inc(v);
67484 }
67485
67486+#ifdef CONFIG_PAX_REFCOUNT
67487+static inline void atomic_long_inc_unchecked(atomic_long_unchecked_t *l)
67488+{
67489+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67490+
67491+ atomic_inc_unchecked(v);
67492+}
67493+#endif
67494+
67495 static inline void atomic_long_dec(atomic_long_t *l)
67496 {
67497 atomic_t *v = (atomic_t *)l;
67498@@ -169,6 +280,15 @@ static inline void atomic_long_dec(atomic_long_t *l)
67499 atomic_dec(v);
67500 }
67501
67502+#ifdef CONFIG_PAX_REFCOUNT
67503+static inline void atomic_long_dec_unchecked(atomic_long_unchecked_t *l)
67504+{
67505+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67506+
67507+ atomic_dec_unchecked(v);
67508+}
67509+#endif
67510+
67511 static inline void atomic_long_add(long i, atomic_long_t *l)
67512 {
67513 atomic_t *v = (atomic_t *)l;
67514@@ -176,6 +296,15 @@ static inline void atomic_long_add(long i, atomic_long_t *l)
67515 atomic_add(i, v);
67516 }
67517
67518+#ifdef CONFIG_PAX_REFCOUNT
67519+static inline void atomic_long_add_unchecked(long i, atomic_long_unchecked_t *l)
67520+{
67521+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67522+
67523+ atomic_add_unchecked(i, v);
67524+}
67525+#endif
67526+
67527 static inline void atomic_long_sub(long i, atomic_long_t *l)
67528 {
67529 atomic_t *v = (atomic_t *)l;
67530@@ -183,6 +312,15 @@ static inline void atomic_long_sub(long i, atomic_long_t *l)
67531 atomic_sub(i, v);
67532 }
67533
67534+#ifdef CONFIG_PAX_REFCOUNT
67535+static inline void atomic_long_sub_unchecked(long i, atomic_long_unchecked_t *l)
67536+{
67537+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67538+
67539+ atomic_sub_unchecked(i, v);
67540+}
67541+#endif
67542+
67543 static inline int atomic_long_sub_and_test(long i, atomic_long_t *l)
67544 {
67545 atomic_t *v = (atomic_t *)l;
67546@@ -218,6 +356,16 @@ static inline long atomic_long_add_return(long i, atomic_long_t *l)
67547 return (long)atomic_add_return(i, v);
67548 }
67549
67550+#ifdef CONFIG_PAX_REFCOUNT
67551+static inline long atomic_long_add_return_unchecked(long i, atomic_long_unchecked_t *l)
67552+{
67553+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67554+
67555+ return (long)atomic_add_return_unchecked(i, v);
67556+}
67557+
67558+#endif
67559+
67560 static inline long atomic_long_sub_return(long i, atomic_long_t *l)
67561 {
67562 atomic_t *v = (atomic_t *)l;
67563@@ -232,6 +380,15 @@ static inline long atomic_long_inc_return(atomic_long_t *l)
67564 return (long)atomic_inc_return(v);
67565 }
67566
67567+#ifdef CONFIG_PAX_REFCOUNT
67568+static inline long atomic_long_inc_return_unchecked(atomic_long_unchecked_t *l)
67569+{
67570+ atomic_unchecked_t *v = (atomic_unchecked_t *)l;
67571+
67572+ return (long)atomic_inc_return_unchecked(v);
67573+}
67574+#endif
67575+
67576 static inline long atomic_long_dec_return(atomic_long_t *l)
67577 {
67578 atomic_t *v = (atomic_t *)l;
67579@@ -255,4 +412,57 @@ static inline long atomic_long_add_unless(atomic_long_t *l, long a, long u)
67580
67581 #endif /* BITS_PER_LONG == 64 */
67582
67583+#ifdef CONFIG_PAX_REFCOUNT
67584+static inline void pax_refcount_needs_these_functions(void)
67585+{
67586+ atomic_read_unchecked((atomic_unchecked_t *)NULL);
67587+ atomic_set_unchecked((atomic_unchecked_t *)NULL, 0);
67588+ atomic_add_unchecked(0, (atomic_unchecked_t *)NULL);
67589+ atomic_sub_unchecked(0, (atomic_unchecked_t *)NULL);
67590+ atomic_inc_unchecked((atomic_unchecked_t *)NULL);
67591+ (void)atomic_inc_and_test_unchecked((atomic_unchecked_t *)NULL);
67592+ atomic_inc_return_unchecked((atomic_unchecked_t *)NULL);
67593+ atomic_add_return_unchecked(0, (atomic_unchecked_t *)NULL);
67594+ atomic_dec_unchecked((atomic_unchecked_t *)NULL);
67595+ atomic_cmpxchg_unchecked((atomic_unchecked_t *)NULL, 0, 0);
67596+ (void)atomic_xchg_unchecked((atomic_unchecked_t *)NULL, 0);
67597+#ifdef CONFIG_X86
67598+ atomic_clear_mask_unchecked(0, NULL);
67599+ atomic_set_mask_unchecked(0, NULL);
67600+#endif
67601+
67602+ atomic_long_read_unchecked((atomic_long_unchecked_t *)NULL);
67603+ atomic_long_set_unchecked((atomic_long_unchecked_t *)NULL, 0);
67604+ atomic_long_add_unchecked(0, (atomic_long_unchecked_t *)NULL);
67605+ atomic_long_sub_unchecked(0, (atomic_long_unchecked_t *)NULL);
67606+ atomic_long_inc_unchecked((atomic_long_unchecked_t *)NULL);
67607+ atomic_long_add_return_unchecked(0, (atomic_long_unchecked_t *)NULL);
67608+ atomic_long_inc_return_unchecked((atomic_long_unchecked_t *)NULL);
67609+ atomic_long_dec_unchecked((atomic_long_unchecked_t *)NULL);
67610+}
67611+#else
67612+#define atomic_read_unchecked(v) atomic_read(v)
67613+#define atomic_set_unchecked(v, i) atomic_set((v), (i))
67614+#define atomic_add_unchecked(i, v) atomic_add((i), (v))
67615+#define atomic_sub_unchecked(i, v) atomic_sub((i), (v))
67616+#define atomic_inc_unchecked(v) atomic_inc(v)
67617+#define atomic_inc_and_test_unchecked(v) atomic_inc_and_test(v)
67618+#define atomic_inc_return_unchecked(v) atomic_inc_return(v)
67619+#define atomic_add_return_unchecked(i, v) atomic_add_return((i), (v))
67620+#define atomic_dec_unchecked(v) atomic_dec(v)
67621+#define atomic_cmpxchg_unchecked(v, o, n) atomic_cmpxchg((v), (o), (n))
67622+#define atomic_xchg_unchecked(v, i) atomic_xchg((v), (i))
67623+#define atomic_clear_mask_unchecked(mask, v) atomic_clear_mask((mask), (v))
67624+#define atomic_set_mask_unchecked(mask, v) atomic_set_mask((mask), (v))
67625+
67626+#define atomic_long_read_unchecked(v) atomic_long_read(v)
67627+#define atomic_long_set_unchecked(v, i) atomic_long_set((v), (i))
67628+#define atomic_long_add_unchecked(i, v) atomic_long_add((i), (v))
67629+#define atomic_long_sub_unchecked(i, v) atomic_long_sub((i), (v))
67630+#define atomic_long_inc_unchecked(v) atomic_long_inc(v)
67631+#define atomic_long_add_return_unchecked(i, v) atomic_long_add_return((i), (v))
67632+#define atomic_long_inc_return_unchecked(v) atomic_long_inc_return(v)
67633+#define atomic_long_dec_unchecked(v) atomic_long_dec(v)
67634+#endif
67635+
67636 #endif /* _ASM_GENERIC_ATOMIC_LONG_H */
67637diff --git a/include/asm-generic/atomic.h b/include/asm-generic/atomic.h
67638index 33bd2de..f31bff97 100644
67639--- a/include/asm-generic/atomic.h
67640+++ b/include/asm-generic/atomic.h
67641@@ -153,7 +153,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
67642 * Atomically clears the bits set in @mask from @v
67643 */
67644 #ifndef atomic_clear_mask
67645-static inline void atomic_clear_mask(unsigned long mask, atomic_t *v)
67646+static inline void atomic_clear_mask(unsigned int mask, atomic_t *v)
67647 {
67648 unsigned long flags;
67649
67650diff --git a/include/asm-generic/atomic64.h b/include/asm-generic/atomic64.h
67651index b18ce4f..2ee2843 100644
67652--- a/include/asm-generic/atomic64.h
67653+++ b/include/asm-generic/atomic64.h
67654@@ -16,6 +16,8 @@ typedef struct {
67655 long long counter;
67656 } atomic64_t;
67657
67658+typedef atomic64_t atomic64_unchecked_t;
67659+
67660 #define ATOMIC64_INIT(i) { (i) }
67661
67662 extern long long atomic64_read(const atomic64_t *v);
67663@@ -39,4 +41,14 @@ extern int atomic64_add_unless(atomic64_t *v, long long a, long long u);
67664 #define atomic64_dec_and_test(v) (atomic64_dec_return((v)) == 0)
67665 #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1LL, 0LL)
67666
67667+#define atomic64_read_unchecked(v) atomic64_read(v)
67668+#define atomic64_set_unchecked(v, i) atomic64_set((v), (i))
67669+#define atomic64_add_unchecked(a, v) atomic64_add((a), (v))
67670+#define atomic64_add_return_unchecked(a, v) atomic64_add_return((a), (v))
67671+#define atomic64_sub_unchecked(a, v) atomic64_sub((a), (v))
67672+#define atomic64_inc_unchecked(v) atomic64_inc(v)
67673+#define atomic64_inc_return_unchecked(v) atomic64_inc_return(v)
67674+#define atomic64_dec_unchecked(v) atomic64_dec(v)
67675+#define atomic64_cmpxchg_unchecked(v, o, n) atomic64_cmpxchg((v), (o), (n))
67676+
67677 #endif /* _ASM_GENERIC_ATOMIC64_H */
67678diff --git a/include/asm-generic/cache.h b/include/asm-generic/cache.h
67679index 1bfcfe5..e04c5c9 100644
67680--- a/include/asm-generic/cache.h
67681+++ b/include/asm-generic/cache.h
67682@@ -6,7 +6,7 @@
67683 * cache lines need to provide their own cache.h.
67684 */
67685
67686-#define L1_CACHE_SHIFT 5
67687-#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
67688+#define L1_CACHE_SHIFT 5UL
67689+#define L1_CACHE_BYTES (1UL << L1_CACHE_SHIFT)
67690
67691 #endif /* __ASM_GENERIC_CACHE_H */
67692diff --git a/include/asm-generic/emergency-restart.h b/include/asm-generic/emergency-restart.h
67693index 0d68a1e..b74a761 100644
67694--- a/include/asm-generic/emergency-restart.h
67695+++ b/include/asm-generic/emergency-restart.h
67696@@ -1,7 +1,7 @@
67697 #ifndef _ASM_GENERIC_EMERGENCY_RESTART_H
67698 #define _ASM_GENERIC_EMERGENCY_RESTART_H
67699
67700-static inline void machine_emergency_restart(void)
67701+static inline __noreturn void machine_emergency_restart(void)
67702 {
67703 machine_restart(NULL);
67704 }
67705diff --git a/include/asm-generic/kmap_types.h b/include/asm-generic/kmap_types.h
67706index 90f99c7..00ce236 100644
67707--- a/include/asm-generic/kmap_types.h
67708+++ b/include/asm-generic/kmap_types.h
67709@@ -2,9 +2,9 @@
67710 #define _ASM_GENERIC_KMAP_TYPES_H
67711
67712 #ifdef __WITH_KM_FENCE
67713-# define KM_TYPE_NR 41
67714+# define KM_TYPE_NR 42
67715 #else
67716-# define KM_TYPE_NR 20
67717+# define KM_TYPE_NR 21
67718 #endif
67719
67720 #endif
67721diff --git a/include/asm-generic/local.h b/include/asm-generic/local.h
67722index 9ceb03b..62b0b8f 100644
67723--- a/include/asm-generic/local.h
67724+++ b/include/asm-generic/local.h
67725@@ -23,24 +23,37 @@ typedef struct
67726 atomic_long_t a;
67727 } local_t;
67728
67729+typedef struct {
67730+ atomic_long_unchecked_t a;
67731+} local_unchecked_t;
67732+
67733 #define LOCAL_INIT(i) { ATOMIC_LONG_INIT(i) }
67734
67735 #define local_read(l) atomic_long_read(&(l)->a)
67736+#define local_read_unchecked(l) atomic_long_read_unchecked(&(l)->a)
67737 #define local_set(l,i) atomic_long_set((&(l)->a),(i))
67738+#define local_set_unchecked(l,i) atomic_long_set_unchecked((&(l)->a),(i))
67739 #define local_inc(l) atomic_long_inc(&(l)->a)
67740+#define local_inc_unchecked(l) atomic_long_inc_unchecked(&(l)->a)
67741 #define local_dec(l) atomic_long_dec(&(l)->a)
67742+#define local_dec_unchecked(l) atomic_long_dec_unchecked(&(l)->a)
67743 #define local_add(i,l) atomic_long_add((i),(&(l)->a))
67744+#define local_add_unchecked(i,l) atomic_long_add_unchecked((i),(&(l)->a))
67745 #define local_sub(i,l) atomic_long_sub((i),(&(l)->a))
67746+#define local_sub_unchecked(i,l) atomic_long_sub_unchecked((i),(&(l)->a))
67747
67748 #define local_sub_and_test(i, l) atomic_long_sub_and_test((i), (&(l)->a))
67749 #define local_dec_and_test(l) atomic_long_dec_and_test(&(l)->a)
67750 #define local_inc_and_test(l) atomic_long_inc_and_test(&(l)->a)
67751 #define local_add_negative(i, l) atomic_long_add_negative((i), (&(l)->a))
67752 #define local_add_return(i, l) atomic_long_add_return((i), (&(l)->a))
67753+#define local_add_return_unchecked(i, l) atomic_long_add_return_unchecked((i), (&(l)->a))
67754 #define local_sub_return(i, l) atomic_long_sub_return((i), (&(l)->a))
67755 #define local_inc_return(l) atomic_long_inc_return(&(l)->a)
67756+#define local_dec_return(l) atomic_long_dec_return(&(l)->a)
67757
67758 #define local_cmpxchg(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67759+#define local_cmpxchg_unchecked(l, o, n) atomic_long_cmpxchg((&(l)->a), (o), (n))
67760 #define local_xchg(l, n) atomic_long_xchg((&(l)->a), (n))
67761 #define local_add_unless(l, _a, u) atomic_long_add_unless((&(l)->a), (_a), (u))
67762 #define local_inc_not_zero(l) atomic_long_inc_not_zero(&(l)->a)
67763diff --git a/include/asm-generic/pgtable-nopmd.h b/include/asm-generic/pgtable-nopmd.h
67764index 725612b..9cc513a 100644
67765--- a/include/asm-generic/pgtable-nopmd.h
67766+++ b/include/asm-generic/pgtable-nopmd.h
67767@@ -1,14 +1,19 @@
67768 #ifndef _PGTABLE_NOPMD_H
67769 #define _PGTABLE_NOPMD_H
67770
67771-#ifndef __ASSEMBLY__
67772-
67773 #include <asm-generic/pgtable-nopud.h>
67774
67775-struct mm_struct;
67776-
67777 #define __PAGETABLE_PMD_FOLDED
67778
67779+#define PMD_SHIFT PUD_SHIFT
67780+#define PTRS_PER_PMD 1
67781+#define PMD_SIZE (_AC(1,UL) << PMD_SHIFT)
67782+#define PMD_MASK (~(PMD_SIZE-1))
67783+
67784+#ifndef __ASSEMBLY__
67785+
67786+struct mm_struct;
67787+
67788 /*
67789 * Having the pmd type consist of a pud gets the size right, and allows
67790 * us to conceptually access the pud entry that this pmd is folded into
67791@@ -16,11 +21,6 @@ struct mm_struct;
67792 */
67793 typedef struct { pud_t pud; } pmd_t;
67794
67795-#define PMD_SHIFT PUD_SHIFT
67796-#define PTRS_PER_PMD 1
67797-#define PMD_SIZE (1UL << PMD_SHIFT)
67798-#define PMD_MASK (~(PMD_SIZE-1))
67799-
67800 /*
67801 * The "pud_xxx()" functions here are trivial for a folded two-level
67802 * setup: the pmd is never bad, and a pmd always exists (as it's folded
67803diff --git a/include/asm-generic/pgtable-nopud.h b/include/asm-generic/pgtable-nopud.h
67804index 810431d..0ec4804f 100644
67805--- a/include/asm-generic/pgtable-nopud.h
67806+++ b/include/asm-generic/pgtable-nopud.h
67807@@ -1,10 +1,15 @@
67808 #ifndef _PGTABLE_NOPUD_H
67809 #define _PGTABLE_NOPUD_H
67810
67811-#ifndef __ASSEMBLY__
67812-
67813 #define __PAGETABLE_PUD_FOLDED
67814
67815+#define PUD_SHIFT PGDIR_SHIFT
67816+#define PTRS_PER_PUD 1
67817+#define PUD_SIZE (_AC(1,UL) << PUD_SHIFT)
67818+#define PUD_MASK (~(PUD_SIZE-1))
67819+
67820+#ifndef __ASSEMBLY__
67821+
67822 /*
67823 * Having the pud type consist of a pgd gets the size right, and allows
67824 * us to conceptually access the pgd entry that this pud is folded into
67825@@ -12,11 +17,6 @@
67826 */
67827 typedef struct { pgd_t pgd; } pud_t;
67828
67829-#define PUD_SHIFT PGDIR_SHIFT
67830-#define PTRS_PER_PUD 1
67831-#define PUD_SIZE (1UL << PUD_SHIFT)
67832-#define PUD_MASK (~(PUD_SIZE-1))
67833-
67834 /*
67835 * The "pgd_xxx()" functions here are trivial for a folded two-level
67836 * setup: the pud is never bad, and a pud always exists (as it's folded
67837@@ -29,6 +29,7 @@ static inline void pgd_clear(pgd_t *pgd) { }
67838 #define pud_ERROR(pud) (pgd_ERROR((pud).pgd))
67839
67840 #define pgd_populate(mm, pgd, pud) do { } while (0)
67841+#define pgd_populate_kernel(mm, pgd, pud) do { } while (0)
67842 /*
67843 * (puds are folded into pgds so this doesn't get actually called,
67844 * but the define is needed for a generic inline function.)
67845diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
67846index a59ff51..2594a70 100644
67847--- a/include/asm-generic/pgtable.h
67848+++ b/include/asm-generic/pgtable.h
67849@@ -688,6 +688,14 @@ static inline pmd_t pmd_mknuma(pmd_t pmd)
67850 }
67851 #endif /* CONFIG_NUMA_BALANCING */
67852
67853+#ifndef __HAVE_ARCH_PAX_OPEN_KERNEL
67854+static inline unsigned long pax_open_kernel(void) { return 0; }
67855+#endif
67856+
67857+#ifndef __HAVE_ARCH_PAX_CLOSE_KERNEL
67858+static inline unsigned long pax_close_kernel(void) { return 0; }
67859+#endif
67860+
67861 #endif /* CONFIG_MMU */
67862
67863 #endif /* !__ASSEMBLY__ */
67864diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
67865index eb58d2d..df131bf 100644
67866--- a/include/asm-generic/vmlinux.lds.h
67867+++ b/include/asm-generic/vmlinux.lds.h
67868@@ -239,6 +239,7 @@
67869 .rodata : AT(ADDR(.rodata) - LOAD_OFFSET) { \
67870 VMLINUX_SYMBOL(__start_rodata) = .; \
67871 *(.rodata) *(.rodata.*) \
67872+ *(.data..read_only) \
67873 *(__vermagic) /* Kernel version magic */ \
67874 . = ALIGN(8); \
67875 VMLINUX_SYMBOL(__start___tracepoints_ptrs) = .; \
67876@@ -749,17 +750,18 @@
67877 * section in the linker script will go there too. @phdr should have
67878 * a leading colon.
67879 *
67880- * Note that this macros defines __per_cpu_load as an absolute symbol.
67881+ * Note that this macros defines per_cpu_load as an absolute symbol.
67882 * If there is no need to put the percpu section at a predetermined
67883 * address, use PERCPU_SECTION.
67884 */
67885 #define PERCPU_VADDR(cacheline, vaddr, phdr) \
67886- VMLINUX_SYMBOL(__per_cpu_load) = .; \
67887- .data..percpu vaddr : AT(VMLINUX_SYMBOL(__per_cpu_load) \
67888+ per_cpu_load = .; \
67889+ .data..percpu vaddr : AT(VMLINUX_SYMBOL(per_cpu_load) \
67890 - LOAD_OFFSET) { \
67891+ VMLINUX_SYMBOL(__per_cpu_load) = . + per_cpu_load; \
67892 PERCPU_INPUT(cacheline) \
67893 } phdr \
67894- . = VMLINUX_SYMBOL(__per_cpu_load) + SIZEOF(.data..percpu);
67895+ . = VMLINUX_SYMBOL(per_cpu_load) + SIZEOF(.data..percpu);
67896
67897 /**
67898 * PERCPU_SECTION - define output section for percpu area, simple version
67899diff --git a/include/crypto/algapi.h b/include/crypto/algapi.h
67900index 418d270..bfd2794 100644
67901--- a/include/crypto/algapi.h
67902+++ b/include/crypto/algapi.h
67903@@ -34,7 +34,7 @@ struct crypto_type {
67904 unsigned int maskclear;
67905 unsigned int maskset;
67906 unsigned int tfmsize;
67907-};
67908+} __do_const;
67909
67910 struct crypto_instance {
67911 struct crypto_alg alg;
67912diff --git a/include/drm/drmP.h b/include/drm/drmP.h
67913index 63d17ee..716de2b 100644
67914--- a/include/drm/drmP.h
67915+++ b/include/drm/drmP.h
67916@@ -72,6 +72,7 @@
67917 #include <linux/workqueue.h>
67918 #include <linux/poll.h>
67919 #include <asm/pgalloc.h>
67920+#include <asm/local.h>
67921 #include <drm/drm.h>
67922 #include <drm/drm_sarea.h>
67923
67924@@ -296,10 +297,12 @@ do { \
67925 * \param cmd command.
67926 * \param arg argument.
67927 */
67928-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
67929+typedef int (* const drm_ioctl_t)(struct drm_device *dev, void *data,
67930+ struct drm_file *file_priv);
67931+typedef int (* drm_ioctl_no_const_t)(struct drm_device *dev, void *data,
67932 struct drm_file *file_priv);
67933
67934-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67935+typedef int (* const drm_ioctl_compat_t)(struct file *filp, unsigned int cmd,
67936 unsigned long arg);
67937
67938 #define DRM_IOCTL_NR(n) _IOC_NR(n)
67939@@ -314,10 +317,10 @@ typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
67940 struct drm_ioctl_desc {
67941 unsigned int cmd;
67942 int flags;
67943- drm_ioctl_t *func;
67944+ drm_ioctl_t func;
67945 unsigned int cmd_drv;
67946 const char *name;
67947-};
67948+} __do_const;
67949
67950 /**
67951 * Creates a driver or general drm_ioctl_desc array entry for the given
67952@@ -1015,7 +1018,7 @@ struct drm_info_list {
67953 int (*show)(struct seq_file*, void*); /** show callback */
67954 u32 driver_features; /**< Required driver features for this entry */
67955 void *data;
67956-};
67957+} __do_const;
67958
67959 /**
67960 * debugfs node structure. This structure represents a debugfs file.
67961@@ -1088,7 +1091,7 @@ struct drm_device {
67962
67963 /** \name Usage Counters */
67964 /*@{ */
67965- int open_count; /**< Outstanding files open */
67966+ local_t open_count; /**< Outstanding files open */
67967 atomic_t ioctl_count; /**< Outstanding IOCTLs pending */
67968 atomic_t vma_count; /**< Outstanding vma areas open */
67969 int buf_use; /**< Buffers in use -- cannot alloc */
67970@@ -1099,7 +1102,7 @@ struct drm_device {
67971 /*@{ */
67972 unsigned long counters;
67973 enum drm_stat_type types[15];
67974- atomic_t counts[15];
67975+ atomic_unchecked_t counts[15];
67976 /*@} */
67977
67978 struct list_head filelist;
67979diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
67980index f43d556..94d9343 100644
67981--- a/include/drm/drm_crtc_helper.h
67982+++ b/include/drm/drm_crtc_helper.h
67983@@ -109,7 +109,7 @@ struct drm_encoder_helper_funcs {
67984 struct drm_connector *connector);
67985 /* disable encoder when not in use - more explicit than dpms off */
67986 void (*disable)(struct drm_encoder *encoder);
67987-};
67988+} __no_const;
67989
67990 /**
67991 * drm_connector_helper_funcs - helper operations for connectors
67992diff --git a/include/drm/ttm/ttm_memory.h b/include/drm/ttm/ttm_memory.h
67993index 72dcbe8..8db58d7 100644
67994--- a/include/drm/ttm/ttm_memory.h
67995+++ b/include/drm/ttm/ttm_memory.h
67996@@ -48,7 +48,7 @@
67997
67998 struct ttm_mem_shrink {
67999 int (*do_shrink) (struct ttm_mem_shrink *);
68000-};
68001+} __no_const;
68002
68003 /**
68004 * struct ttm_mem_global - Global memory accounting structure.
68005diff --git a/include/keys/asymmetric-subtype.h b/include/keys/asymmetric-subtype.h
68006index 4b840e8..155d235 100644
68007--- a/include/keys/asymmetric-subtype.h
68008+++ b/include/keys/asymmetric-subtype.h
68009@@ -37,7 +37,7 @@ struct asymmetric_key_subtype {
68010 /* Verify the signature on a key of this subtype (optional) */
68011 int (*verify_signature)(const struct key *key,
68012 const struct public_key_signature *sig);
68013-};
68014+} __do_const;
68015
68016 /**
68017 * asymmetric_key_subtype - Get the subtype from an asymmetric key
68018diff --git a/include/linux/atmdev.h b/include/linux/atmdev.h
68019index c1da539..1dcec55 100644
68020--- a/include/linux/atmdev.h
68021+++ b/include/linux/atmdev.h
68022@@ -28,7 +28,7 @@ struct compat_atm_iobuf {
68023 #endif
68024
68025 struct k_atm_aal_stats {
68026-#define __HANDLE_ITEM(i) atomic_t i
68027+#define __HANDLE_ITEM(i) atomic_unchecked_t i
68028 __AAL_STAT_ITEMS
68029 #undef __HANDLE_ITEM
68030 };
68031@@ -200,7 +200,7 @@ struct atmdev_ops { /* only send is required */
68032 int (*change_qos)(struct atm_vcc *vcc,struct atm_qos *qos,int flags);
68033 int (*proc_read)(struct atm_dev *dev,loff_t *pos,char *page);
68034 struct module *owner;
68035-};
68036+} __do_const ;
68037
68038 struct atmphy_ops {
68039 int (*start)(struct atm_dev *dev);
68040diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
68041index 70cf138..cabb82e 100644
68042--- a/include/linux/binfmts.h
68043+++ b/include/linux/binfmts.h
68044@@ -73,8 +73,9 @@ struct linux_binfmt {
68045 int (*load_binary)(struct linux_binprm *);
68046 int (*load_shlib)(struct file *);
68047 int (*core_dump)(struct coredump_params *cprm);
68048+ void (*handle_mprotect)(struct vm_area_struct *vma, unsigned long newflags);
68049 unsigned long min_coredump; /* minimal dump size */
68050-};
68051+} __do_const;
68052
68053 extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
68054
68055diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
68056index 2fdb4a4..54aad7e 100644
68057--- a/include/linux/blkdev.h
68058+++ b/include/linux/blkdev.h
68059@@ -1526,7 +1526,7 @@ struct block_device_operations {
68060 /* this callback is with swap_lock and sometimes page table lock held */
68061 void (*swap_slot_free_notify) (struct block_device *, unsigned long);
68062 struct module *owner;
68063-};
68064+} __do_const;
68065
68066 extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
68067 unsigned long);
68068diff --git a/include/linux/blktrace_api.h b/include/linux/blktrace_api.h
68069index 7c2e030..b72475d 100644
68070--- a/include/linux/blktrace_api.h
68071+++ b/include/linux/blktrace_api.h
68072@@ -23,7 +23,7 @@ struct blk_trace {
68073 struct dentry *dir;
68074 struct dentry *dropped_file;
68075 struct dentry *msg_file;
68076- atomic_t dropped;
68077+ atomic_unchecked_t dropped;
68078 };
68079
68080 extern int blk_trace_ioctl(struct block_device *, unsigned, char __user *);
68081diff --git a/include/linux/cache.h b/include/linux/cache.h
68082index 4c57065..4307975 100644
68083--- a/include/linux/cache.h
68084+++ b/include/linux/cache.h
68085@@ -16,6 +16,10 @@
68086 #define __read_mostly
68087 #endif
68088
68089+#ifndef __read_only
68090+#define __read_only __read_mostly
68091+#endif
68092+
68093 #ifndef ____cacheline_aligned
68094 #define ____cacheline_aligned __attribute__((__aligned__(SMP_CACHE_BYTES)))
68095 #endif
68096diff --git a/include/linux/capability.h b/include/linux/capability.h
68097index d9a4f7f4..19f77d6 100644
68098--- a/include/linux/capability.h
68099+++ b/include/linux/capability.h
68100@@ -213,8 +213,13 @@ extern bool ns_capable(struct user_namespace *ns, int cap);
68101 extern bool nsown_capable(int cap);
68102 extern bool inode_capable(const struct inode *inode, int cap);
68103 extern bool file_ns_capable(const struct file *file, struct user_namespace *ns, int cap);
68104+extern bool capable_nolog(int cap);
68105+extern bool ns_capable_nolog(struct user_namespace *ns, int cap);
68106+extern bool inode_capable_nolog(const struct inode *inode, int cap);
68107
68108 /* audit system wants to get cap info from files as well */
68109 extern int get_vfs_caps_from_disk(const struct dentry *dentry, struct cpu_vfs_cap_data *cpu_caps);
68110
68111+extern int is_privileged_binary(const struct dentry *dentry);
68112+
68113 #endif /* !_LINUX_CAPABILITY_H */
68114diff --git a/include/linux/cdrom.h b/include/linux/cdrom.h
68115index 8609d57..86e4d79 100644
68116--- a/include/linux/cdrom.h
68117+++ b/include/linux/cdrom.h
68118@@ -87,7 +87,6 @@ struct cdrom_device_ops {
68119
68120 /* driver specifications */
68121 const int capability; /* capability flags */
68122- int n_minors; /* number of active minor devices */
68123 /* handle uniform packets for scsi type devices (scsi,atapi) */
68124 int (*generic_packet) (struct cdrom_device_info *,
68125 struct packet_command *);
68126diff --git a/include/linux/cleancache.h b/include/linux/cleancache.h
68127index 4ce9056..86caac6 100644
68128--- a/include/linux/cleancache.h
68129+++ b/include/linux/cleancache.h
68130@@ -31,7 +31,7 @@ struct cleancache_ops {
68131 void (*invalidate_page)(int, struct cleancache_filekey, pgoff_t);
68132 void (*invalidate_inode)(int, struct cleancache_filekey);
68133 void (*invalidate_fs)(int);
68134-};
68135+} __no_const;
68136
68137 extern struct cleancache_ops *
68138 cleancache_register_ops(struct cleancache_ops *ops);
68139diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
68140index 1186098..f87e53d 100644
68141--- a/include/linux/clk-provider.h
68142+++ b/include/linux/clk-provider.h
68143@@ -132,6 +132,7 @@ struct clk_ops {
68144 unsigned long);
68145 void (*init)(struct clk_hw *hw);
68146 };
68147+typedef struct clk_ops __no_const clk_ops_no_const;
68148
68149 /**
68150 * struct clk_init_data - holds init data that's common to all clocks and is
68151diff --git a/include/linux/compat.h b/include/linux/compat.h
68152index 7f0c1dd..b5729c6 100644
68153--- a/include/linux/compat.h
68154+++ b/include/linux/compat.h
68155@@ -312,7 +312,7 @@ compat_sys_get_robust_list(int pid, compat_uptr_t __user *head_ptr,
68156 compat_size_t __user *len_ptr);
68157
68158 asmlinkage long compat_sys_ipc(u32, int, int, u32, compat_uptr_t, u32);
68159-asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg);
68160+asmlinkage long compat_sys_shmat(int shmid, compat_uptr_t shmaddr, int shmflg) __intentional_overflow(0);
68161 asmlinkage long compat_sys_semctl(int semid, int semnum, int cmd, int arg);
68162 asmlinkage long compat_sys_msgsnd(int msqid, compat_uptr_t msgp,
68163 compat_ssize_t msgsz, int msgflg);
68164@@ -419,7 +419,7 @@ extern int compat_ptrace_request(struct task_struct *child,
68165 extern long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
68166 compat_ulong_t addr, compat_ulong_t data);
68167 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
68168- compat_long_t addr, compat_long_t data);
68169+ compat_ulong_t addr, compat_ulong_t data);
68170
68171 asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, size_t);
68172 /*
68173diff --git a/include/linux/compiler-gcc4.h b/include/linux/compiler-gcc4.h
68174index 842de22..7f3a41f 100644
68175--- a/include/linux/compiler-gcc4.h
68176+++ b/include/linux/compiler-gcc4.h
68177@@ -39,9 +39,29 @@
68178 # define __compiletime_warning(message) __attribute__((warning(message)))
68179 # define __compiletime_error(message) __attribute__((error(message)))
68180 #endif /* __CHECKER__ */
68181+
68182+#define __alloc_size(...) __attribute((alloc_size(__VA_ARGS__)))
68183+#define __bos(ptr, arg) __builtin_object_size((ptr), (arg))
68184+#define __bos0(ptr) __bos((ptr), 0)
68185+#define __bos1(ptr) __bos((ptr), 1)
68186 #endif /* GCC_VERSION >= 40300 */
68187
68188 #if GCC_VERSION >= 40500
68189+
68190+#ifdef CONSTIFY_PLUGIN
68191+#define __no_const __attribute__((no_const))
68192+#define __do_const __attribute__((do_const))
68193+#endif
68194+
68195+#ifdef SIZE_OVERFLOW_PLUGIN
68196+#define __size_overflow(...) __attribute__((size_overflow(__VA_ARGS__)))
68197+#define __intentional_overflow(...) __attribute__((intentional_overflow(__VA_ARGS__)))
68198+#endif
68199+
68200+#ifdef LATENT_ENTROPY_PLUGIN
68201+#define __latent_entropy __attribute__((latent_entropy))
68202+#endif
68203+
68204 /*
68205 * Mark a position in code as unreachable. This can be used to
68206 * suppress control flow warnings after asm blocks that transfer
68207diff --git a/include/linux/compiler.h b/include/linux/compiler.h
68208index 92669cd..1771a15 100644
68209--- a/include/linux/compiler.h
68210+++ b/include/linux/compiler.h
68211@@ -5,11 +5,14 @@
68212
68213 #ifdef __CHECKER__
68214 # define __user __attribute__((noderef, address_space(1)))
68215+# define __force_user __force __user
68216 # define __kernel __attribute__((address_space(0)))
68217+# define __force_kernel __force __kernel
68218 # define __safe __attribute__((safe))
68219 # define __force __attribute__((force))
68220 # define __nocast __attribute__((nocast))
68221 # define __iomem __attribute__((noderef, address_space(2)))
68222+# define __force_iomem __force __iomem
68223 # define __must_hold(x) __attribute__((context(x,1,1)))
68224 # define __acquires(x) __attribute__((context(x,0,1)))
68225 # define __releases(x) __attribute__((context(x,1,0)))
68226@@ -17,20 +20,37 @@
68227 # define __release(x) __context__(x,-1)
68228 # define __cond_lock(x,c) ((c) ? ({ __acquire(x); 1; }) : 0)
68229 # define __percpu __attribute__((noderef, address_space(3)))
68230+# define __force_percpu __force __percpu
68231 #ifdef CONFIG_SPARSE_RCU_POINTER
68232 # define __rcu __attribute__((noderef, address_space(4)))
68233+# define __force_rcu __force __rcu
68234 #else
68235 # define __rcu
68236+# define __force_rcu
68237 #endif
68238 extern void __chk_user_ptr(const volatile void __user *);
68239 extern void __chk_io_ptr(const volatile void __iomem *);
68240 #else
68241-# define __user
68242-# define __kernel
68243+# ifdef CHECKER_PLUGIN
68244+//# define __user
68245+//# define __force_user
68246+//# define __kernel
68247+//# define __force_kernel
68248+# else
68249+# ifdef STRUCTLEAK_PLUGIN
68250+# define __user __attribute__((user))
68251+# else
68252+# define __user
68253+# endif
68254+# define __force_user
68255+# define __kernel
68256+# define __force_kernel
68257+# endif
68258 # define __safe
68259 # define __force
68260 # define __nocast
68261 # define __iomem
68262+# define __force_iomem
68263 # define __chk_user_ptr(x) (void)0
68264 # define __chk_io_ptr(x) (void)0
68265 # define __builtin_warning(x, y...) (1)
68266@@ -41,7 +61,9 @@ extern void __chk_io_ptr(const volatile void __iomem *);
68267 # define __release(x) (void)0
68268 # define __cond_lock(x,c) (c)
68269 # define __percpu
68270+# define __force_percpu
68271 # define __rcu
68272+# define __force_rcu
68273 #endif
68274
68275 /* Indirect macros required for expanded argument pasting, eg. __LINE__. */
68276@@ -275,6 +297,26 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68277 # define __attribute_const__ /* unimplemented */
68278 #endif
68279
68280+#ifndef __no_const
68281+# define __no_const
68282+#endif
68283+
68284+#ifndef __do_const
68285+# define __do_const
68286+#endif
68287+
68288+#ifndef __size_overflow
68289+# define __size_overflow(...)
68290+#endif
68291+
68292+#ifndef __intentional_overflow
68293+# define __intentional_overflow(...)
68294+#endif
68295+
68296+#ifndef __latent_entropy
68297+# define __latent_entropy
68298+#endif
68299+
68300 /*
68301 * Tell gcc if a function is cold. The compiler will assume any path
68302 * directly leading to the call is unlikely.
68303@@ -284,6 +326,22 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68304 #define __cold
68305 #endif
68306
68307+#ifndef __alloc_size
68308+#define __alloc_size(...)
68309+#endif
68310+
68311+#ifndef __bos
68312+#define __bos(ptr, arg)
68313+#endif
68314+
68315+#ifndef __bos0
68316+#define __bos0(ptr)
68317+#endif
68318+
68319+#ifndef __bos1
68320+#define __bos1(ptr)
68321+#endif
68322+
68323 /* Simple shorthand for a section definition */
68324 #ifndef __section
68325 # define __section(S) __attribute__ ((__section__(#S)))
68326@@ -349,7 +407,8 @@ void ftrace_likely_update(struct ftrace_branch_data *f, int val, int expect);
68327 * use is to mediate communication between process-level code and irq/NMI
68328 * handlers, all running on the same CPU.
68329 */
68330-#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x))
68331+#define ACCESS_ONCE(x) (*(volatile const typeof(x) *)&(x))
68332+#define ACCESS_ONCE_RW(x) (*(volatile typeof(x) *)&(x))
68333
68334 /* Ignore/forbid kprobes attach on very low level functions marked by this attribute: */
68335 #ifdef CONFIG_KPROBES
68336diff --git a/include/linux/completion.h b/include/linux/completion.h
68337index 33f0280..35c6568 100644
68338--- a/include/linux/completion.h
68339+++ b/include/linux/completion.h
68340@@ -79,15 +79,15 @@ static inline void init_completion(struct completion *x)
68341 extern void wait_for_completion(struct completion *);
68342 extern void wait_for_completion_io(struct completion *);
68343 extern int wait_for_completion_interruptible(struct completion *x);
68344-extern int wait_for_completion_killable(struct completion *x);
68345+extern int wait_for_completion_killable(struct completion *x) __intentional_overflow(-1);
68346 extern unsigned long wait_for_completion_timeout(struct completion *x,
68347 unsigned long timeout);
68348 extern unsigned long wait_for_completion_io_timeout(struct completion *x,
68349 unsigned long timeout);
68350 extern long wait_for_completion_interruptible_timeout(
68351- struct completion *x, unsigned long timeout);
68352+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68353 extern long wait_for_completion_killable_timeout(
68354- struct completion *x, unsigned long timeout);
68355+ struct completion *x, unsigned long timeout) __intentional_overflow(-1);
68356 extern bool try_wait_for_completion(struct completion *x);
68357 extern bool completion_done(struct completion *x);
68358
68359diff --git a/include/linux/configfs.h b/include/linux/configfs.h
68360index 34025df..d94bbbc 100644
68361--- a/include/linux/configfs.h
68362+++ b/include/linux/configfs.h
68363@@ -125,7 +125,7 @@ struct configfs_attribute {
68364 const char *ca_name;
68365 struct module *ca_owner;
68366 umode_t ca_mode;
68367-};
68368+} __do_const;
68369
68370 /*
68371 * Users often need to create attribute structures for their configurable
68372diff --git a/include/linux/cpu.h b/include/linux/cpu.h
68373index 9f3c7e8..a18c7b6 100644
68374--- a/include/linux/cpu.h
68375+++ b/include/linux/cpu.h
68376@@ -115,7 +115,7 @@ enum {
68377 /* Need to know about CPUs going up/down? */
68378 #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE)
68379 #define cpu_notifier(fn, pri) { \
68380- static struct notifier_block fn##_nb __cpuinitdata = \
68381+ static struct notifier_block fn##_nb = \
68382 { .notifier_call = fn, .priority = pri }; \
68383 register_cpu_notifier(&fn##_nb); \
68384 }
68385diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
68386index 037d36a..ca5fe6e 100644
68387--- a/include/linux/cpufreq.h
68388+++ b/include/linux/cpufreq.h
68389@@ -262,7 +262,7 @@ struct cpufreq_driver {
68390 int (*suspend) (struct cpufreq_policy *policy);
68391 int (*resume) (struct cpufreq_policy *policy);
68392 struct freq_attr **attr;
68393-};
68394+} __do_const;
68395
68396 /* flags */
68397
68398@@ -321,6 +321,7 @@ struct global_attr {
68399 ssize_t (*store)(struct kobject *a, struct attribute *b,
68400 const char *c, size_t count);
68401 };
68402+typedef struct global_attr __no_const global_attr_no_const;
68403
68404 #define define_one_global_ro(_name) \
68405 static struct global_attr _name = \
68406diff --git a/include/linux/cpuidle.h b/include/linux/cpuidle.h
68407index 8f04062..900239a 100644
68408--- a/include/linux/cpuidle.h
68409+++ b/include/linux/cpuidle.h
68410@@ -52,7 +52,8 @@ struct cpuidle_state {
68411 int index);
68412
68413 int (*enter_dead) (struct cpuidle_device *dev, int index);
68414-};
68415+} __do_const;
68416+typedef struct cpuidle_state __no_const cpuidle_state_no_const;
68417
68418 /* Idle State Flags */
68419 #define CPUIDLE_FLAG_TIME_VALID (0x01) /* is residency time measurable? */
68420@@ -191,7 +192,7 @@ struct cpuidle_governor {
68421 void (*reflect) (struct cpuidle_device *dev, int index);
68422
68423 struct module *owner;
68424-};
68425+} __do_const;
68426
68427 #ifdef CONFIG_CPU_IDLE
68428
68429diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
68430index d08e4d2..95fad61 100644
68431--- a/include/linux/cpumask.h
68432+++ b/include/linux/cpumask.h
68433@@ -118,17 +118,17 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68434 }
68435
68436 /* Valid inputs for n are -1 and 0. */
68437-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68438+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68439 {
68440 return n+1;
68441 }
68442
68443-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68444+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68445 {
68446 return n+1;
68447 }
68448
68449-static inline unsigned int cpumask_next_and(int n,
68450+static inline unsigned int __intentional_overflow(-1) cpumask_next_and(int n,
68451 const struct cpumask *srcp,
68452 const struct cpumask *andp)
68453 {
68454@@ -167,7 +167,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
68455 *
68456 * Returns >= nr_cpu_ids if no further cpus set.
68457 */
68458-static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68459+static inline unsigned int __intentional_overflow(-1) cpumask_next(int n, const struct cpumask *srcp)
68460 {
68461 /* -1 is a legal arg here. */
68462 if (n != -1)
68463@@ -182,7 +182,7 @@ static inline unsigned int cpumask_next(int n, const struct cpumask *srcp)
68464 *
68465 * Returns >= nr_cpu_ids if no further cpus unset.
68466 */
68467-static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68468+static inline unsigned int __intentional_overflow(-1) cpumask_next_zero(int n, const struct cpumask *srcp)
68469 {
68470 /* -1 is a legal arg here. */
68471 if (n != -1)
68472@@ -190,7 +190,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
68473 return find_next_zero_bit(cpumask_bits(srcp), nr_cpumask_bits, n+1);
68474 }
68475
68476-int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
68477+int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *) __intentional_overflow(-1);
68478 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
68479
68480 /**
68481diff --git a/include/linux/cred.h b/include/linux/cred.h
68482index 04421e8..6bce4ef 100644
68483--- a/include/linux/cred.h
68484+++ b/include/linux/cred.h
68485@@ -194,6 +194,9 @@ static inline void validate_creds_for_do_exit(struct task_struct *tsk)
68486 static inline void validate_process_creds(void)
68487 {
68488 }
68489+static inline void validate_task_creds(struct task_struct *task)
68490+{
68491+}
68492 #endif
68493
68494 /**
68495diff --git a/include/linux/crypto.h b/include/linux/crypto.h
68496index b92eadf..b4ecdc1 100644
68497--- a/include/linux/crypto.h
68498+++ b/include/linux/crypto.h
68499@@ -373,7 +373,7 @@ struct cipher_tfm {
68500 const u8 *key, unsigned int keylen);
68501 void (*cit_encrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68502 void (*cit_decrypt_one)(struct crypto_tfm *tfm, u8 *dst, const u8 *src);
68503-};
68504+} __no_const;
68505
68506 struct hash_tfm {
68507 int (*init)(struct hash_desc *desc);
68508@@ -394,13 +394,13 @@ struct compress_tfm {
68509 int (*cot_decompress)(struct crypto_tfm *tfm,
68510 const u8 *src, unsigned int slen,
68511 u8 *dst, unsigned int *dlen);
68512-};
68513+} __no_const;
68514
68515 struct rng_tfm {
68516 int (*rng_gen_random)(struct crypto_rng *tfm, u8 *rdata,
68517 unsigned int dlen);
68518 int (*rng_reset)(struct crypto_rng *tfm, u8 *seed, unsigned int slen);
68519-};
68520+} __no_const;
68521
68522 #define crt_ablkcipher crt_u.ablkcipher
68523 #define crt_aead crt_u.aead
68524diff --git a/include/linux/ctype.h b/include/linux/ctype.h
68525index 653589e..4ef254a 100644
68526--- a/include/linux/ctype.h
68527+++ b/include/linux/ctype.h
68528@@ -56,7 +56,7 @@ static inline unsigned char __toupper(unsigned char c)
68529 * Fast implementation of tolower() for internal usage. Do not use in your
68530 * code.
68531 */
68532-static inline char _tolower(const char c)
68533+static inline unsigned char _tolower(const unsigned char c)
68534 {
68535 return c | 0x20;
68536 }
68537diff --git a/include/linux/decompress/mm.h b/include/linux/decompress/mm.h
68538index 7925bf0..d5143d2 100644
68539--- a/include/linux/decompress/mm.h
68540+++ b/include/linux/decompress/mm.h
68541@@ -77,7 +77,7 @@ static void free(void *where)
68542 * warnings when not needed (indeed large_malloc / large_free are not
68543 * needed by inflate */
68544
68545-#define malloc(a) kmalloc(a, GFP_KERNEL)
68546+#define malloc(a) kmalloc((a), GFP_KERNEL)
68547 #define free(a) kfree(a)
68548
68549 #define large_malloc(a) vmalloc(a)
68550diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
68551index fe8c447..bdc1f33 100644
68552--- a/include/linux/devfreq.h
68553+++ b/include/linux/devfreq.h
68554@@ -114,7 +114,7 @@ struct devfreq_governor {
68555 int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
68556 int (*event_handler)(struct devfreq *devfreq,
68557 unsigned int event, void *data);
68558-};
68559+} __do_const;
68560
68561 /**
68562 * struct devfreq - Device devfreq structure
68563diff --git a/include/linux/device.h b/include/linux/device.h
68564index c0a1261..dba7569 100644
68565--- a/include/linux/device.h
68566+++ b/include/linux/device.h
68567@@ -290,7 +290,7 @@ struct subsys_interface {
68568 struct list_head node;
68569 int (*add_dev)(struct device *dev, struct subsys_interface *sif);
68570 int (*remove_dev)(struct device *dev, struct subsys_interface *sif);
68571-};
68572+} __do_const;
68573
68574 int subsys_interface_register(struct subsys_interface *sif);
68575 void subsys_interface_unregister(struct subsys_interface *sif);
68576@@ -473,7 +473,7 @@ struct device_type {
68577 void (*release)(struct device *dev);
68578
68579 const struct dev_pm_ops *pm;
68580-};
68581+} __do_const;
68582
68583 /* interface for exporting device attributes */
68584 struct device_attribute {
68585@@ -483,11 +483,12 @@ struct device_attribute {
68586 ssize_t (*store)(struct device *dev, struct device_attribute *attr,
68587 const char *buf, size_t count);
68588 };
68589+typedef struct device_attribute __no_const device_attribute_no_const;
68590
68591 struct dev_ext_attribute {
68592 struct device_attribute attr;
68593 void *var;
68594-};
68595+} __do_const;
68596
68597 ssize_t device_show_ulong(struct device *dev, struct device_attribute *attr,
68598 char *buf);
68599diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
68600index 94af418..b1ca7a2 100644
68601--- a/include/linux/dma-mapping.h
68602+++ b/include/linux/dma-mapping.h
68603@@ -54,7 +54,7 @@ struct dma_map_ops {
68604 u64 (*get_required_mask)(struct device *dev);
68605 #endif
68606 int is_phys;
68607-};
68608+} __do_const;
68609
68610 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
68611
68612diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
68613index 96d3e4a..dc36433 100644
68614--- a/include/linux/dmaengine.h
68615+++ b/include/linux/dmaengine.h
68616@@ -1035,9 +1035,9 @@ struct dma_pinned_list {
68617 struct dma_pinned_list *dma_pin_iovec_pages(struct iovec *iov, size_t len);
68618 void dma_unpin_iovec_pages(struct dma_pinned_list* pinned_list);
68619
68620-dma_cookie_t dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68621+dma_cookie_t __intentional_overflow(0) dma_memcpy_to_iovec(struct dma_chan *chan, struct iovec *iov,
68622 struct dma_pinned_list *pinned_list, unsigned char *kdata, size_t len);
68623-dma_cookie_t dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68624+dma_cookie_t __intentional_overflow(0) dma_memcpy_pg_to_iovec(struct dma_chan *chan, struct iovec *iov,
68625 struct dma_pinned_list *pinned_list, struct page *page,
68626 unsigned int offset, size_t len);
68627
68628diff --git a/include/linux/efi.h b/include/linux/efi.h
68629index 2bc0ad7..3f7b006 100644
68630--- a/include/linux/efi.h
68631+++ b/include/linux/efi.h
68632@@ -745,6 +745,7 @@ struct efivar_operations {
68633 efi_set_variable_t *set_variable;
68634 efi_query_variable_store_t *query_variable_store;
68635 };
68636+typedef struct efivar_operations __no_const efivar_operations_no_const;
68637
68638 struct efivars {
68639 /*
68640diff --git a/include/linux/elf.h b/include/linux/elf.h
68641index 40a3c0e..4c45a38 100644
68642--- a/include/linux/elf.h
68643+++ b/include/linux/elf.h
68644@@ -24,6 +24,7 @@ extern Elf32_Dyn _DYNAMIC [];
68645 #define elf_note elf32_note
68646 #define elf_addr_t Elf32_Off
68647 #define Elf_Half Elf32_Half
68648+#define elf_dyn Elf32_Dyn
68649
68650 #else
68651
68652@@ -34,6 +35,7 @@ extern Elf64_Dyn _DYNAMIC [];
68653 #define elf_note elf64_note
68654 #define elf_addr_t Elf64_Off
68655 #define Elf_Half Elf64_Half
68656+#define elf_dyn Elf64_Dyn
68657
68658 #endif
68659
68660diff --git a/include/linux/err.h b/include/linux/err.h
68661index f2edce2..cc2082c 100644
68662--- a/include/linux/err.h
68663+++ b/include/linux/err.h
68664@@ -19,12 +19,12 @@
68665
68666 #define IS_ERR_VALUE(x) unlikely((x) >= (unsigned long)-MAX_ERRNO)
68667
68668-static inline void * __must_check ERR_PTR(long error)
68669+static inline void * __must_check __intentional_overflow(-1) ERR_PTR(long error)
68670 {
68671 return (void *) error;
68672 }
68673
68674-static inline long __must_check PTR_ERR(const void *ptr)
68675+static inline long __must_check __intentional_overflow(-1) PTR_ERR(const void *ptr)
68676 {
68677 return (long) ptr;
68678 }
68679diff --git a/include/linux/extcon.h b/include/linux/extcon.h
68680index fcb51c8..bdafcf6 100644
68681--- a/include/linux/extcon.h
68682+++ b/include/linux/extcon.h
68683@@ -134,7 +134,7 @@ struct extcon_dev {
68684 /* /sys/class/extcon/.../mutually_exclusive/... */
68685 struct attribute_group attr_g_muex;
68686 struct attribute **attrs_muex;
68687- struct device_attribute *d_attrs_muex;
68688+ device_attribute_no_const *d_attrs_muex;
68689 };
68690
68691 /**
68692diff --git a/include/linux/fb.h b/include/linux/fb.h
68693index d49c60f..2834fbe 100644
68694--- a/include/linux/fb.h
68695+++ b/include/linux/fb.h
68696@@ -304,7 +304,7 @@ struct fb_ops {
68697 /* called at KDB enter and leave time to prepare the console */
68698 int (*fb_debug_enter)(struct fb_info *info);
68699 int (*fb_debug_leave)(struct fb_info *info);
68700-};
68701+} __do_const;
68702
68703 #ifdef CONFIG_FB_TILEBLITTING
68704 #define FB_TILE_CURSOR_NONE 0
68705diff --git a/include/linux/filter.h b/include/linux/filter.h
68706index f65f5a6..2f4f93a 100644
68707--- a/include/linux/filter.h
68708+++ b/include/linux/filter.h
68709@@ -20,6 +20,7 @@ struct compat_sock_fprog {
68710
68711 struct sk_buff;
68712 struct sock;
68713+struct bpf_jit_work;
68714
68715 struct sk_filter
68716 {
68717@@ -27,6 +28,9 @@ struct sk_filter
68718 unsigned int len; /* Number of filter blocks */
68719 unsigned int (*bpf_func)(const struct sk_buff *skb,
68720 const struct sock_filter *filter);
68721+#ifdef CONFIG_BPF_JIT
68722+ struct bpf_jit_work *work;
68723+#endif
68724 struct rcu_head rcu;
68725 struct sock_filter insns[0];
68726 };
68727diff --git a/include/linux/frontswap.h b/include/linux/frontswap.h
68728index 8293262..2b3b8bd 100644
68729--- a/include/linux/frontswap.h
68730+++ b/include/linux/frontswap.h
68731@@ -11,7 +11,7 @@ struct frontswap_ops {
68732 int (*load)(unsigned, pgoff_t, struct page *);
68733 void (*invalidate_page)(unsigned, pgoff_t);
68734 void (*invalidate_area)(unsigned);
68735-};
68736+} __no_const;
68737
68738 extern bool frontswap_enabled;
68739 extern struct frontswap_ops *
68740diff --git a/include/linux/fs.h b/include/linux/fs.h
68741index 65c2be2..4c53f6e 100644
68742--- a/include/linux/fs.h
68743+++ b/include/linux/fs.h
68744@@ -1543,7 +1543,8 @@ struct file_operations {
68745 long (*fallocate)(struct file *file, int mode, loff_t offset,
68746 loff_t len);
68747 int (*show_fdinfo)(struct seq_file *m, struct file *f);
68748-};
68749+} __do_const;
68750+typedef struct file_operations __no_const file_operations_no_const;
68751
68752 struct inode_operations {
68753 struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
68754@@ -2688,4 +2689,14 @@ static inline void inode_has_no_xattr(struct inode *inode)
68755 inode->i_flags |= S_NOSEC;
68756 }
68757
68758+static inline bool is_sidechannel_device(const struct inode *inode)
68759+{
68760+#ifdef CONFIG_GRKERNSEC_DEVICE_SIDECHANNEL
68761+ umode_t mode = inode->i_mode;
68762+ return ((S_ISCHR(mode) || S_ISBLK(mode)) && (mode & (S_IROTH | S_IWOTH)));
68763+#else
68764+ return false;
68765+#endif
68766+}
68767+
68768 #endif /* _LINUX_FS_H */
68769diff --git a/include/linux/fs_struct.h b/include/linux/fs_struct.h
68770index 2b93a9a..855d94a 100644
68771--- a/include/linux/fs_struct.h
68772+++ b/include/linux/fs_struct.h
68773@@ -6,7 +6,7 @@
68774 #include <linux/seqlock.h>
68775
68776 struct fs_struct {
68777- int users;
68778+ atomic_t users;
68779 spinlock_t lock;
68780 seqcount_t seq;
68781 int umask;
68782diff --git a/include/linux/fscache-cache.h b/include/linux/fscache-cache.h
68783index 5dfa0aa..6acf322 100644
68784--- a/include/linux/fscache-cache.h
68785+++ b/include/linux/fscache-cache.h
68786@@ -112,7 +112,7 @@ struct fscache_operation {
68787 fscache_operation_release_t release;
68788 };
68789
68790-extern atomic_t fscache_op_debug_id;
68791+extern atomic_unchecked_t fscache_op_debug_id;
68792 extern void fscache_op_work_func(struct work_struct *work);
68793
68794 extern void fscache_enqueue_operation(struct fscache_operation *);
68795@@ -134,7 +134,7 @@ static inline void fscache_operation_init(struct fscache_operation *op,
68796 INIT_WORK(&op->work, fscache_op_work_func);
68797 atomic_set(&op->usage, 1);
68798 op->state = FSCACHE_OP_ST_INITIALISED;
68799- op->debug_id = atomic_inc_return(&fscache_op_debug_id);
68800+ op->debug_id = atomic_inc_return_unchecked(&fscache_op_debug_id);
68801 op->processor = processor;
68802 op->release = release;
68803 INIT_LIST_HEAD(&op->pend_link);
68804diff --git a/include/linux/fscache.h b/include/linux/fscache.h
68805index 7a08623..4c07b0f 100644
68806--- a/include/linux/fscache.h
68807+++ b/include/linux/fscache.h
68808@@ -152,7 +152,7 @@ struct fscache_cookie_def {
68809 * - this is mandatory for any object that may have data
68810 */
68811 void (*now_uncached)(void *cookie_netfs_data);
68812-};
68813+} __do_const;
68814
68815 /*
68816 * fscache cached network filesystem type
68817diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
68818index a78680a..87bd73e 100644
68819--- a/include/linux/fsnotify.h
68820+++ b/include/linux/fsnotify.h
68821@@ -195,6 +195,9 @@ static inline void fsnotify_access(struct file *file)
68822 struct inode *inode = path->dentry->d_inode;
68823 __u32 mask = FS_ACCESS;
68824
68825+ if (is_sidechannel_device(inode))
68826+ return;
68827+
68828 if (S_ISDIR(inode->i_mode))
68829 mask |= FS_ISDIR;
68830
68831@@ -213,6 +216,9 @@ static inline void fsnotify_modify(struct file *file)
68832 struct inode *inode = path->dentry->d_inode;
68833 __u32 mask = FS_MODIFY;
68834
68835+ if (is_sidechannel_device(inode))
68836+ return;
68837+
68838 if (S_ISDIR(inode->i_mode))
68839 mask |= FS_ISDIR;
68840
68841@@ -315,7 +321,7 @@ static inline void fsnotify_change(struct dentry *dentry, unsigned int ia_valid)
68842 */
68843 static inline const unsigned char *fsnotify_oldname_init(const unsigned char *name)
68844 {
68845- return kstrdup(name, GFP_KERNEL);
68846+ return (const unsigned char *)kstrdup((const char *)name, GFP_KERNEL);
68847 }
68848
68849 /*
68850diff --git a/include/linux/genhd.h b/include/linux/genhd.h
68851index 9f3c275..911b591 100644
68852--- a/include/linux/genhd.h
68853+++ b/include/linux/genhd.h
68854@@ -194,7 +194,7 @@ struct gendisk {
68855 struct kobject *slave_dir;
68856
68857 struct timer_rand_state *random;
68858- atomic_t sync_io; /* RAID */
68859+ atomic_unchecked_t sync_io; /* RAID */
68860 struct disk_events *ev;
68861 #ifdef CONFIG_BLK_DEV_INTEGRITY
68862 struct blk_integrity *integrity;
68863diff --git a/include/linux/genl_magic_func.h b/include/linux/genl_magic_func.h
68864index 023bc34..b02b46a 100644
68865--- a/include/linux/genl_magic_func.h
68866+++ b/include/linux/genl_magic_func.h
68867@@ -246,7 +246,7 @@ const char *CONCAT_(GENL_MAGIC_FAMILY, _genl_cmd_to_str)(__u8 cmd)
68868 },
68869
68870 #define ZZZ_genl_ops CONCAT_(GENL_MAGIC_FAMILY, _genl_ops)
68871-static struct genl_ops ZZZ_genl_ops[] __read_mostly = {
68872+static struct genl_ops ZZZ_genl_ops[] = {
68873 #include GENL_MAGIC_INCLUDE_FILE
68874 };
68875
68876diff --git a/include/linux/gfp.h b/include/linux/gfp.h
68877index 0f615eb..5c3832f 100644
68878--- a/include/linux/gfp.h
68879+++ b/include/linux/gfp.h
68880@@ -35,6 +35,13 @@ struct vm_area_struct;
68881 #define ___GFP_NO_KSWAPD 0x400000u
68882 #define ___GFP_OTHER_NODE 0x800000u
68883 #define ___GFP_WRITE 0x1000000u
68884+
68885+#ifdef CONFIG_PAX_USERCOPY_SLABS
68886+#define ___GFP_USERCOPY 0x2000000u
68887+#else
68888+#define ___GFP_USERCOPY 0
68889+#endif
68890+
68891 /* If the above are modified, __GFP_BITS_SHIFT may need updating */
68892
68893 /*
68894@@ -92,6 +99,7 @@ struct vm_area_struct;
68895 #define __GFP_OTHER_NODE ((__force gfp_t)___GFP_OTHER_NODE) /* On behalf of other node */
68896 #define __GFP_KMEMCG ((__force gfp_t)___GFP_KMEMCG) /* Allocation comes from a memcg-accounted resource */
68897 #define __GFP_WRITE ((__force gfp_t)___GFP_WRITE) /* Allocator intends to dirty page */
68898+#define __GFP_USERCOPY ((__force gfp_t)___GFP_USERCOPY)/* Allocator intends to copy page to/from userland */
68899
68900 /*
68901 * This may seem redundant, but it's a way of annotating false positives vs.
68902@@ -99,7 +107,7 @@ struct vm_area_struct;
68903 */
68904 #define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
68905
68906-#define __GFP_BITS_SHIFT 25 /* Room for N __GFP_FOO bits */
68907+#define __GFP_BITS_SHIFT 26 /* Room for N __GFP_FOO bits */
68908 #define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
68909
68910 /* This equals 0, but use constants in case they ever change */
68911@@ -153,6 +161,8 @@ struct vm_area_struct;
68912 /* 4GB DMA on some platforms */
68913 #define GFP_DMA32 __GFP_DMA32
68914
68915+#define GFP_USERCOPY __GFP_USERCOPY
68916+
68917 /* Convert GFP flags to their corresponding migrate type */
68918 static inline int allocflags_to_migratetype(gfp_t gfp_flags)
68919 {
68920diff --git a/include/linux/gracl.h b/include/linux/gracl.h
68921new file mode 100644
68922index 0000000..ebe6d72
68923--- /dev/null
68924+++ b/include/linux/gracl.h
68925@@ -0,0 +1,319 @@
68926+#ifndef GR_ACL_H
68927+#define GR_ACL_H
68928+
68929+#include <linux/grdefs.h>
68930+#include <linux/resource.h>
68931+#include <linux/capability.h>
68932+#include <linux/dcache.h>
68933+#include <asm/resource.h>
68934+
68935+/* Major status information */
68936+
68937+#define GR_VERSION "grsecurity 2.9.1"
68938+#define GRSECURITY_VERSION 0x2901
68939+
68940+enum {
68941+ GR_SHUTDOWN = 0,
68942+ GR_ENABLE = 1,
68943+ GR_SPROLE = 2,
68944+ GR_RELOAD = 3,
68945+ GR_SEGVMOD = 4,
68946+ GR_STATUS = 5,
68947+ GR_UNSPROLE = 6,
68948+ GR_PASSSET = 7,
68949+ GR_SPROLEPAM = 8,
68950+};
68951+
68952+/* Password setup definitions
68953+ * kernel/grhash.c */
68954+enum {
68955+ GR_PW_LEN = 128,
68956+ GR_SALT_LEN = 16,
68957+ GR_SHA_LEN = 32,
68958+};
68959+
68960+enum {
68961+ GR_SPROLE_LEN = 64,
68962+};
68963+
68964+enum {
68965+ GR_NO_GLOB = 0,
68966+ GR_REG_GLOB,
68967+ GR_CREATE_GLOB
68968+};
68969+
68970+#define GR_NLIMITS 32
68971+
68972+/* Begin Data Structures */
68973+
68974+struct sprole_pw {
68975+ unsigned char *rolename;
68976+ unsigned char salt[GR_SALT_LEN];
68977+ unsigned char sum[GR_SHA_LEN]; /* 256-bit SHA hash of the password */
68978+};
68979+
68980+struct name_entry {
68981+ __u32 key;
68982+ ino_t inode;
68983+ dev_t device;
68984+ char *name;
68985+ __u16 len;
68986+ __u8 deleted;
68987+ struct name_entry *prev;
68988+ struct name_entry *next;
68989+};
68990+
68991+struct inodev_entry {
68992+ struct name_entry *nentry;
68993+ struct inodev_entry *prev;
68994+ struct inodev_entry *next;
68995+};
68996+
68997+struct acl_role_db {
68998+ struct acl_role_label **r_hash;
68999+ __u32 r_size;
69000+};
69001+
69002+struct inodev_db {
69003+ struct inodev_entry **i_hash;
69004+ __u32 i_size;
69005+};
69006+
69007+struct name_db {
69008+ struct name_entry **n_hash;
69009+ __u32 n_size;
69010+};
69011+
69012+struct crash_uid {
69013+ uid_t uid;
69014+ unsigned long expires;
69015+};
69016+
69017+struct gr_hash_struct {
69018+ void **table;
69019+ void **nametable;
69020+ void *first;
69021+ __u32 table_size;
69022+ __u32 used_size;
69023+ int type;
69024+};
69025+
69026+/* Userspace Grsecurity ACL data structures */
69027+
69028+struct acl_subject_label {
69029+ char *filename;
69030+ ino_t inode;
69031+ dev_t device;
69032+ __u32 mode;
69033+ kernel_cap_t cap_mask;
69034+ kernel_cap_t cap_lower;
69035+ kernel_cap_t cap_invert_audit;
69036+
69037+ struct rlimit res[GR_NLIMITS];
69038+ __u32 resmask;
69039+
69040+ __u8 user_trans_type;
69041+ __u8 group_trans_type;
69042+ uid_t *user_transitions;
69043+ gid_t *group_transitions;
69044+ __u16 user_trans_num;
69045+ __u16 group_trans_num;
69046+
69047+ __u32 sock_families[2];
69048+ __u32 ip_proto[8];
69049+ __u32 ip_type;
69050+ struct acl_ip_label **ips;
69051+ __u32 ip_num;
69052+ __u32 inaddr_any_override;
69053+
69054+ __u32 crashes;
69055+ unsigned long expires;
69056+
69057+ struct acl_subject_label *parent_subject;
69058+ struct gr_hash_struct *hash;
69059+ struct acl_subject_label *prev;
69060+ struct acl_subject_label *next;
69061+
69062+ struct acl_object_label **obj_hash;
69063+ __u32 obj_hash_size;
69064+ __u16 pax_flags;
69065+};
69066+
69067+struct role_allowed_ip {
69068+ __u32 addr;
69069+ __u32 netmask;
69070+
69071+ struct role_allowed_ip *prev;
69072+ struct role_allowed_ip *next;
69073+};
69074+
69075+struct role_transition {
69076+ char *rolename;
69077+
69078+ struct role_transition *prev;
69079+ struct role_transition *next;
69080+};
69081+
69082+struct acl_role_label {
69083+ char *rolename;
69084+ uid_t uidgid;
69085+ __u16 roletype;
69086+
69087+ __u16 auth_attempts;
69088+ unsigned long expires;
69089+
69090+ struct acl_subject_label *root_label;
69091+ struct gr_hash_struct *hash;
69092+
69093+ struct acl_role_label *prev;
69094+ struct acl_role_label *next;
69095+
69096+ struct role_transition *transitions;
69097+ struct role_allowed_ip *allowed_ips;
69098+ uid_t *domain_children;
69099+ __u16 domain_child_num;
69100+
69101+ umode_t umask;
69102+
69103+ struct acl_subject_label **subj_hash;
69104+ __u32 subj_hash_size;
69105+};
69106+
69107+struct user_acl_role_db {
69108+ struct acl_role_label **r_table;
69109+ __u32 num_pointers; /* Number of allocations to track */
69110+ __u32 num_roles; /* Number of roles */
69111+ __u32 num_domain_children; /* Number of domain children */
69112+ __u32 num_subjects; /* Number of subjects */
69113+ __u32 num_objects; /* Number of objects */
69114+};
69115+
69116+struct acl_object_label {
69117+ char *filename;
69118+ ino_t inode;
69119+ dev_t device;
69120+ __u32 mode;
69121+
69122+ struct acl_subject_label *nested;
69123+ struct acl_object_label *globbed;
69124+
69125+ /* next two structures not used */
69126+
69127+ struct acl_object_label *prev;
69128+ struct acl_object_label *next;
69129+};
69130+
69131+struct acl_ip_label {
69132+ char *iface;
69133+ __u32 addr;
69134+ __u32 netmask;
69135+ __u16 low, high;
69136+ __u8 mode;
69137+ __u32 type;
69138+ __u32 proto[8];
69139+
69140+ /* next two structures not used */
69141+
69142+ struct acl_ip_label *prev;
69143+ struct acl_ip_label *next;
69144+};
69145+
69146+struct gr_arg {
69147+ struct user_acl_role_db role_db;
69148+ unsigned char pw[GR_PW_LEN];
69149+ unsigned char salt[GR_SALT_LEN];
69150+ unsigned char sum[GR_SHA_LEN];
69151+ unsigned char sp_role[GR_SPROLE_LEN];
69152+ struct sprole_pw *sprole_pws;
69153+ dev_t segv_device;
69154+ ino_t segv_inode;
69155+ uid_t segv_uid;
69156+ __u16 num_sprole_pws;
69157+ __u16 mode;
69158+};
69159+
69160+struct gr_arg_wrapper {
69161+ struct gr_arg *arg;
69162+ __u32 version;
69163+ __u32 size;
69164+};
69165+
69166+struct subject_map {
69167+ struct acl_subject_label *user;
69168+ struct acl_subject_label *kernel;
69169+ struct subject_map *prev;
69170+ struct subject_map *next;
69171+};
69172+
69173+struct acl_subj_map_db {
69174+ struct subject_map **s_hash;
69175+ __u32 s_size;
69176+};
69177+
69178+/* End Data Structures Section */
69179+
69180+/* Hash functions generated by empirical testing by Brad Spengler
69181+ Makes good use of the low bits of the inode. Generally 0-1 times
69182+ in loop for successful match. 0-3 for unsuccessful match.
69183+ Shift/add algorithm with modulus of table size and an XOR*/
69184+
69185+static __inline__ unsigned int
69186+gr_rhash(const uid_t uid, const __u16 type, const unsigned int sz)
69187+{
69188+ return ((((uid + type) << (16 + type)) ^ uid) % sz);
69189+}
69190+
69191+ static __inline__ unsigned int
69192+gr_shash(const struct acl_subject_label *userp, const unsigned int sz)
69193+{
69194+ return ((const unsigned long)userp % sz);
69195+}
69196+
69197+static __inline__ unsigned int
69198+gr_fhash(const ino_t ino, const dev_t dev, const unsigned int sz)
69199+{
69200+ return (((ino + dev) ^ ((ino << 13) + (ino << 23) + (dev << 9))) % sz);
69201+}
69202+
69203+static __inline__ unsigned int
69204+gr_nhash(const char *name, const __u16 len, const unsigned int sz)
69205+{
69206+ return full_name_hash((const unsigned char *)name, len) % sz;
69207+}
69208+
69209+#define FOR_EACH_ROLE_START(role) \
69210+ role = role_list; \
69211+ while (role) {
69212+
69213+#define FOR_EACH_ROLE_END(role) \
69214+ role = role->prev; \
69215+ }
69216+
69217+#define FOR_EACH_SUBJECT_START(role,subj,iter) \
69218+ subj = NULL; \
69219+ iter = 0; \
69220+ while (iter < role->subj_hash_size) { \
69221+ if (subj == NULL) \
69222+ subj = role->subj_hash[iter]; \
69223+ if (subj == NULL) { \
69224+ iter++; \
69225+ continue; \
69226+ }
69227+
69228+#define FOR_EACH_SUBJECT_END(subj,iter) \
69229+ subj = subj->next; \
69230+ if (subj == NULL) \
69231+ iter++; \
69232+ }
69233+
69234+
69235+#define FOR_EACH_NESTED_SUBJECT_START(role,subj) \
69236+ subj = role->hash->first; \
69237+ while (subj != NULL) {
69238+
69239+#define FOR_EACH_NESTED_SUBJECT_END(subj) \
69240+ subj = subj->next; \
69241+ }
69242+
69243+#endif
69244+
69245diff --git a/include/linux/gralloc.h b/include/linux/gralloc.h
69246new file mode 100644
69247index 0000000..323ecf2
69248--- /dev/null
69249+++ b/include/linux/gralloc.h
69250@@ -0,0 +1,9 @@
69251+#ifndef __GRALLOC_H
69252+#define __GRALLOC_H
69253+
69254+void acl_free_all(void);
69255+int acl_alloc_stack_init(unsigned long size);
69256+void *acl_alloc(unsigned long len);
69257+void *acl_alloc_num(unsigned long num, unsigned long len);
69258+
69259+#endif
69260diff --git a/include/linux/grdefs.h b/include/linux/grdefs.h
69261new file mode 100644
69262index 0000000..be66033
69263--- /dev/null
69264+++ b/include/linux/grdefs.h
69265@@ -0,0 +1,140 @@
69266+#ifndef GRDEFS_H
69267+#define GRDEFS_H
69268+
69269+/* Begin grsecurity status declarations */
69270+
69271+enum {
69272+ GR_READY = 0x01,
69273+ GR_STATUS_INIT = 0x00 // disabled state
69274+};
69275+
69276+/* Begin ACL declarations */
69277+
69278+/* Role flags */
69279+
69280+enum {
69281+ GR_ROLE_USER = 0x0001,
69282+ GR_ROLE_GROUP = 0x0002,
69283+ GR_ROLE_DEFAULT = 0x0004,
69284+ GR_ROLE_SPECIAL = 0x0008,
69285+ GR_ROLE_AUTH = 0x0010,
69286+ GR_ROLE_NOPW = 0x0020,
69287+ GR_ROLE_GOD = 0x0040,
69288+ GR_ROLE_LEARN = 0x0080,
69289+ GR_ROLE_TPE = 0x0100,
69290+ GR_ROLE_DOMAIN = 0x0200,
69291+ GR_ROLE_PAM = 0x0400,
69292+ GR_ROLE_PERSIST = 0x0800
69293+};
69294+
69295+/* ACL Subject and Object mode flags */
69296+enum {
69297+ GR_DELETED = 0x80000000
69298+};
69299+
69300+/* ACL Object-only mode flags */
69301+enum {
69302+ GR_READ = 0x00000001,
69303+ GR_APPEND = 0x00000002,
69304+ GR_WRITE = 0x00000004,
69305+ GR_EXEC = 0x00000008,
69306+ GR_FIND = 0x00000010,
69307+ GR_INHERIT = 0x00000020,
69308+ GR_SETID = 0x00000040,
69309+ GR_CREATE = 0x00000080,
69310+ GR_DELETE = 0x00000100,
69311+ GR_LINK = 0x00000200,
69312+ GR_AUDIT_READ = 0x00000400,
69313+ GR_AUDIT_APPEND = 0x00000800,
69314+ GR_AUDIT_WRITE = 0x00001000,
69315+ GR_AUDIT_EXEC = 0x00002000,
69316+ GR_AUDIT_FIND = 0x00004000,
69317+ GR_AUDIT_INHERIT= 0x00008000,
69318+ GR_AUDIT_SETID = 0x00010000,
69319+ GR_AUDIT_CREATE = 0x00020000,
69320+ GR_AUDIT_DELETE = 0x00040000,
69321+ GR_AUDIT_LINK = 0x00080000,
69322+ GR_PTRACERD = 0x00100000,
69323+ GR_NOPTRACE = 0x00200000,
69324+ GR_SUPPRESS = 0x00400000,
69325+ GR_NOLEARN = 0x00800000,
69326+ GR_INIT_TRANSFER= 0x01000000
69327+};
69328+
69329+#define GR_AUDITS (GR_AUDIT_READ | GR_AUDIT_WRITE | GR_AUDIT_APPEND | GR_AUDIT_EXEC | \
69330+ GR_AUDIT_FIND | GR_AUDIT_INHERIT | GR_AUDIT_SETID | \
69331+ GR_AUDIT_CREATE | GR_AUDIT_DELETE | GR_AUDIT_LINK)
69332+
69333+/* ACL subject-only mode flags */
69334+enum {
69335+ GR_KILL = 0x00000001,
69336+ GR_VIEW = 0x00000002,
69337+ GR_PROTECTED = 0x00000004,
69338+ GR_LEARN = 0x00000008,
69339+ GR_OVERRIDE = 0x00000010,
69340+ /* just a placeholder, this mode is only used in userspace */
69341+ GR_DUMMY = 0x00000020,
69342+ GR_PROTSHM = 0x00000040,
69343+ GR_KILLPROC = 0x00000080,
69344+ GR_KILLIPPROC = 0x00000100,
69345+ /* just a placeholder, this mode is only used in userspace */
69346+ GR_NOTROJAN = 0x00000200,
69347+ GR_PROTPROCFD = 0x00000400,
69348+ GR_PROCACCT = 0x00000800,
69349+ GR_RELAXPTRACE = 0x00001000,
69350+ //GR_NESTED = 0x00002000,
69351+ GR_INHERITLEARN = 0x00004000,
69352+ GR_PROCFIND = 0x00008000,
69353+ GR_POVERRIDE = 0x00010000,
69354+ GR_KERNELAUTH = 0x00020000,
69355+ GR_ATSECURE = 0x00040000,
69356+ GR_SHMEXEC = 0x00080000
69357+};
69358+
69359+enum {
69360+ GR_PAX_ENABLE_SEGMEXEC = 0x0001,
69361+ GR_PAX_ENABLE_PAGEEXEC = 0x0002,
69362+ GR_PAX_ENABLE_MPROTECT = 0x0004,
69363+ GR_PAX_ENABLE_RANDMMAP = 0x0008,
69364+ GR_PAX_ENABLE_EMUTRAMP = 0x0010,
69365+ GR_PAX_DISABLE_SEGMEXEC = 0x0100,
69366+ GR_PAX_DISABLE_PAGEEXEC = 0x0200,
69367+ GR_PAX_DISABLE_MPROTECT = 0x0400,
69368+ GR_PAX_DISABLE_RANDMMAP = 0x0800,
69369+ GR_PAX_DISABLE_EMUTRAMP = 0x1000,
69370+};
69371+
69372+enum {
69373+ GR_ID_USER = 0x01,
69374+ GR_ID_GROUP = 0x02,
69375+};
69376+
69377+enum {
69378+ GR_ID_ALLOW = 0x01,
69379+ GR_ID_DENY = 0x02,
69380+};
69381+
69382+#define GR_CRASH_RES 31
69383+#define GR_UIDTABLE_MAX 500
69384+
69385+/* begin resource learning section */
69386+enum {
69387+ GR_RLIM_CPU_BUMP = 60,
69388+ GR_RLIM_FSIZE_BUMP = 50000,
69389+ GR_RLIM_DATA_BUMP = 10000,
69390+ GR_RLIM_STACK_BUMP = 1000,
69391+ GR_RLIM_CORE_BUMP = 10000,
69392+ GR_RLIM_RSS_BUMP = 500000,
69393+ GR_RLIM_NPROC_BUMP = 1,
69394+ GR_RLIM_NOFILE_BUMP = 5,
69395+ GR_RLIM_MEMLOCK_BUMP = 50000,
69396+ GR_RLIM_AS_BUMP = 500000,
69397+ GR_RLIM_LOCKS_BUMP = 2,
69398+ GR_RLIM_SIGPENDING_BUMP = 5,
69399+ GR_RLIM_MSGQUEUE_BUMP = 10000,
69400+ GR_RLIM_NICE_BUMP = 1,
69401+ GR_RLIM_RTPRIO_BUMP = 1,
69402+ GR_RLIM_RTTIME_BUMP = 1000000
69403+};
69404+
69405+#endif
69406diff --git a/include/linux/grinternal.h b/include/linux/grinternal.h
69407new file mode 100644
69408index 0000000..12994b5
69409--- /dev/null
69410+++ b/include/linux/grinternal.h
69411@@ -0,0 +1,227 @@
69412+#ifndef __GRINTERNAL_H
69413+#define __GRINTERNAL_H
69414+
69415+#ifdef CONFIG_GRKERNSEC
69416+
69417+#include <linux/fs.h>
69418+#include <linux/mnt_namespace.h>
69419+#include <linux/nsproxy.h>
69420+#include <linux/gracl.h>
69421+#include <linux/grdefs.h>
69422+#include <linux/grmsg.h>
69423+
69424+void gr_add_learn_entry(const char *fmt, ...)
69425+ __attribute__ ((format (printf, 1, 2)));
69426+__u32 gr_search_file(const struct dentry *dentry, const __u32 mode,
69427+ const struct vfsmount *mnt);
69428+__u32 gr_check_create(const struct dentry *new_dentry,
69429+ const struct dentry *parent,
69430+ const struct vfsmount *mnt, const __u32 mode);
69431+int gr_check_protected_task(const struct task_struct *task);
69432+__u32 to_gr_audit(const __u32 reqmode);
69433+int gr_set_acls(const int type);
69434+int gr_apply_subject_to_task(struct task_struct *task);
69435+int gr_acl_is_enabled(void);
69436+char gr_roletype_to_char(void);
69437+
69438+void gr_handle_alertkill(struct task_struct *task);
69439+char *gr_to_filename(const struct dentry *dentry,
69440+ const struct vfsmount *mnt);
69441+char *gr_to_filename1(const struct dentry *dentry,
69442+ const struct vfsmount *mnt);
69443+char *gr_to_filename2(const struct dentry *dentry,
69444+ const struct vfsmount *mnt);
69445+char *gr_to_filename3(const struct dentry *dentry,
69446+ const struct vfsmount *mnt);
69447+
69448+extern int grsec_enable_ptrace_readexec;
69449+extern int grsec_enable_harden_ptrace;
69450+extern int grsec_enable_link;
69451+extern int grsec_enable_fifo;
69452+extern int grsec_enable_execve;
69453+extern int grsec_enable_shm;
69454+extern int grsec_enable_execlog;
69455+extern int grsec_enable_signal;
69456+extern int grsec_enable_audit_ptrace;
69457+extern int grsec_enable_forkfail;
69458+extern int grsec_enable_time;
69459+extern int grsec_enable_rofs;
69460+extern int grsec_enable_chroot_shmat;
69461+extern int grsec_enable_chroot_mount;
69462+extern int grsec_enable_chroot_double;
69463+extern int grsec_enable_chroot_pivot;
69464+extern int grsec_enable_chroot_chdir;
69465+extern int grsec_enable_chroot_chmod;
69466+extern int grsec_enable_chroot_mknod;
69467+extern int grsec_enable_chroot_fchdir;
69468+extern int grsec_enable_chroot_nice;
69469+extern int grsec_enable_chroot_execlog;
69470+extern int grsec_enable_chroot_caps;
69471+extern int grsec_enable_chroot_sysctl;
69472+extern int grsec_enable_chroot_unix;
69473+extern int grsec_enable_symlinkown;
69474+extern kgid_t grsec_symlinkown_gid;
69475+extern int grsec_enable_tpe;
69476+extern kgid_t grsec_tpe_gid;
69477+extern int grsec_enable_tpe_all;
69478+extern int grsec_enable_tpe_invert;
69479+extern int grsec_enable_socket_all;
69480+extern kgid_t grsec_socket_all_gid;
69481+extern int grsec_enable_socket_client;
69482+extern kgid_t grsec_socket_client_gid;
69483+extern int grsec_enable_socket_server;
69484+extern kgid_t grsec_socket_server_gid;
69485+extern kgid_t grsec_audit_gid;
69486+extern int grsec_enable_group;
69487+extern int grsec_enable_audit_textrel;
69488+extern int grsec_enable_log_rwxmaps;
69489+extern int grsec_enable_mount;
69490+extern int grsec_enable_chdir;
69491+extern int grsec_resource_logging;
69492+extern int grsec_enable_blackhole;
69493+extern int grsec_lastack_retries;
69494+extern int grsec_enable_brute;
69495+extern int grsec_lock;
69496+
69497+extern spinlock_t grsec_alert_lock;
69498+extern unsigned long grsec_alert_wtime;
69499+extern unsigned long grsec_alert_fyet;
69500+
69501+extern spinlock_t grsec_audit_lock;
69502+
69503+extern rwlock_t grsec_exec_file_lock;
69504+
69505+#define gr_task_fullpath(tsk) ((tsk)->exec_file ? \
69506+ gr_to_filename2((tsk)->exec_file->f_path.dentry, \
69507+ (tsk)->exec_file->f_path.mnt) : "/")
69508+
69509+#define gr_parent_task_fullpath(tsk) ((tsk)->real_parent->exec_file ? \
69510+ gr_to_filename3((tsk)->real_parent->exec_file->f_path.dentry, \
69511+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
69512+
69513+#define gr_task_fullpath0(tsk) ((tsk)->exec_file ? \
69514+ gr_to_filename((tsk)->exec_file->f_path.dentry, \
69515+ (tsk)->exec_file->f_path.mnt) : "/")
69516+
69517+#define gr_parent_task_fullpath0(tsk) ((tsk)->real_parent->exec_file ? \
69518+ gr_to_filename1((tsk)->real_parent->exec_file->f_path.dentry, \
69519+ (tsk)->real_parent->exec_file->f_path.mnt) : "/")
69520+
69521+#define proc_is_chrooted(tsk_a) ((tsk_a)->gr_is_chrooted)
69522+
69523+#define have_same_root(tsk_a,tsk_b) ((tsk_a)->gr_chroot_dentry == (tsk_b)->gr_chroot_dentry)
69524+
69525+static inline bool gr_is_same_file(const struct file *file1, const struct file *file2)
69526+{
69527+ if (file1 && file2) {
69528+ const struct inode *inode1 = file1->f_path.dentry->d_inode;
69529+ const struct inode *inode2 = file2->f_path.dentry->d_inode;
69530+ if (inode1->i_ino == inode2->i_ino && inode1->i_sb->s_dev == inode2->i_sb->s_dev)
69531+ return true;
69532+ }
69533+
69534+ return false;
69535+}
69536+
69537+#define GR_CHROOT_CAPS {{ \
69538+ CAP_TO_MASK(CAP_LINUX_IMMUTABLE) | CAP_TO_MASK(CAP_NET_ADMIN) | \
69539+ CAP_TO_MASK(CAP_SYS_MODULE) | CAP_TO_MASK(CAP_SYS_RAWIO) | \
69540+ CAP_TO_MASK(CAP_SYS_PACCT) | CAP_TO_MASK(CAP_SYS_ADMIN) | \
69541+ CAP_TO_MASK(CAP_SYS_BOOT) | CAP_TO_MASK(CAP_SYS_TIME) | \
69542+ CAP_TO_MASK(CAP_NET_RAW) | CAP_TO_MASK(CAP_SYS_TTY_CONFIG) | \
69543+ CAP_TO_MASK(CAP_IPC_OWNER) | CAP_TO_MASK(CAP_SETFCAP), \
69544+ CAP_TO_MASK(CAP_SYSLOG) | CAP_TO_MASK(CAP_MAC_ADMIN) }}
69545+
69546+#define security_learn(normal_msg,args...) \
69547+({ \
69548+ read_lock(&grsec_exec_file_lock); \
69549+ gr_add_learn_entry(normal_msg "\n", ## args); \
69550+ read_unlock(&grsec_exec_file_lock); \
69551+})
69552+
69553+enum {
69554+ GR_DO_AUDIT,
69555+ GR_DONT_AUDIT,
69556+ /* used for non-audit messages that we shouldn't kill the task on */
69557+ GR_DONT_AUDIT_GOOD
69558+};
69559+
69560+enum {
69561+ GR_TTYSNIFF,
69562+ GR_RBAC,
69563+ GR_RBAC_STR,
69564+ GR_STR_RBAC,
69565+ GR_RBAC_MODE2,
69566+ GR_RBAC_MODE3,
69567+ GR_FILENAME,
69568+ GR_SYSCTL_HIDDEN,
69569+ GR_NOARGS,
69570+ GR_ONE_INT,
69571+ GR_ONE_INT_TWO_STR,
69572+ GR_ONE_STR,
69573+ GR_STR_INT,
69574+ GR_TWO_STR_INT,
69575+ GR_TWO_INT,
69576+ GR_TWO_U64,
69577+ GR_THREE_INT,
69578+ GR_FIVE_INT_TWO_STR,
69579+ GR_TWO_STR,
69580+ GR_THREE_STR,
69581+ GR_FOUR_STR,
69582+ GR_STR_FILENAME,
69583+ GR_FILENAME_STR,
69584+ GR_FILENAME_TWO_INT,
69585+ GR_FILENAME_TWO_INT_STR,
69586+ GR_TEXTREL,
69587+ GR_PTRACE,
69588+ GR_RESOURCE,
69589+ GR_CAP,
69590+ GR_SIG,
69591+ GR_SIG2,
69592+ GR_CRASH1,
69593+ GR_CRASH2,
69594+ GR_PSACCT,
69595+ GR_RWXMAP
69596+};
69597+
69598+#define gr_log_hidden_sysctl(audit, msg, str) gr_log_varargs(audit, msg, GR_SYSCTL_HIDDEN, str)
69599+#define gr_log_ttysniff(audit, msg, task) gr_log_varargs(audit, msg, GR_TTYSNIFF, task)
69600+#define gr_log_fs_rbac_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_RBAC, dentry, mnt)
69601+#define gr_log_fs_rbac_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_RBAC_STR, dentry, mnt, str)
69602+#define gr_log_fs_str_rbac(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_RBAC, str, dentry, mnt)
69603+#define gr_log_fs_rbac_mode2(audit, msg, dentry, mnt, str1, str2) gr_log_varargs(audit, msg, GR_RBAC_MODE2, dentry, mnt, str1, str2)
69604+#define gr_log_fs_rbac_mode3(audit, msg, dentry, mnt, str1, str2, str3) gr_log_varargs(audit, msg, GR_RBAC_MODE3, dentry, mnt, str1, str2, str3)
69605+#define gr_log_fs_generic(audit, msg, dentry, mnt) gr_log_varargs(audit, msg, GR_FILENAME, dentry, mnt)
69606+#define gr_log_noargs(audit, msg) gr_log_varargs(audit, msg, GR_NOARGS)
69607+#define gr_log_int(audit, msg, num) gr_log_varargs(audit, msg, GR_ONE_INT, num)
69608+#define gr_log_int_str2(audit, msg, num, str1, str2) gr_log_varargs(audit, msg, GR_ONE_INT_TWO_STR, num, str1, str2)
69609+#define gr_log_str(audit, msg, str) gr_log_varargs(audit, msg, GR_ONE_STR, str)
69610+#define gr_log_str_int(audit, msg, str, num) gr_log_varargs(audit, msg, GR_STR_INT, str, num)
69611+#define gr_log_int_int(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_INT, num1, num2)
69612+#define gr_log_two_u64(audit, msg, num1, num2) gr_log_varargs(audit, msg, GR_TWO_U64, num1, num2)
69613+#define gr_log_int3(audit, msg, num1, num2, num3) gr_log_varargs(audit, msg, GR_THREE_INT, num1, num2, num3)
69614+#define gr_log_int5_str2(audit, msg, num1, num2, str1, str2) gr_log_varargs(audit, msg, GR_FIVE_INT_TWO_STR, num1, num2, str1, str2)
69615+#define gr_log_str_str(audit, msg, str1, str2) gr_log_varargs(audit, msg, GR_TWO_STR, str1, str2)
69616+#define gr_log_str2_int(audit, msg, str1, str2, num) gr_log_varargs(audit, msg, GR_TWO_STR_INT, str1, str2, num)
69617+#define gr_log_str3(audit, msg, str1, str2, str3) gr_log_varargs(audit, msg, GR_THREE_STR, str1, str2, str3)
69618+#define gr_log_str4(audit, msg, str1, str2, str3, str4) gr_log_varargs(audit, msg, GR_FOUR_STR, str1, str2, str3, str4)
69619+#define gr_log_str_fs(audit, msg, str, dentry, mnt) gr_log_varargs(audit, msg, GR_STR_FILENAME, str, dentry, mnt)
69620+#define gr_log_fs_str(audit, msg, dentry, mnt, str) gr_log_varargs(audit, msg, GR_FILENAME_STR, dentry, mnt, str)
69621+#define gr_log_fs_int2(audit, msg, dentry, mnt, num1, num2) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT, dentry, mnt, num1, num2)
69622+#define gr_log_fs_int2_str(audit, msg, dentry, mnt, num1, num2, str) gr_log_varargs(audit, msg, GR_FILENAME_TWO_INT_STR, dentry, mnt, num1, num2, str)
69623+#define gr_log_textrel_ulong_ulong(audit, msg, file, ulong1, ulong2) gr_log_varargs(audit, msg, GR_TEXTREL, file, ulong1, ulong2)
69624+#define gr_log_ptrace(audit, msg, task) gr_log_varargs(audit, msg, GR_PTRACE, task)
69625+#define gr_log_res_ulong2_str(audit, msg, task, ulong1, str, ulong2) gr_log_varargs(audit, msg, GR_RESOURCE, task, ulong1, str, ulong2)
69626+#define gr_log_cap(audit, msg, task, str) gr_log_varargs(audit, msg, GR_CAP, task, str)
69627+#define gr_log_sig_addr(audit, msg, str, addr) gr_log_varargs(audit, msg, GR_SIG, str, addr)
69628+#define gr_log_sig_task(audit, msg, task, num) gr_log_varargs(audit, msg, GR_SIG2, task, num)
69629+#define gr_log_crash1(audit, msg, task, ulong) gr_log_varargs(audit, msg, GR_CRASH1, task, ulong)
69630+#define gr_log_crash2(audit, msg, task, ulong1) gr_log_varargs(audit, msg, GR_CRASH2, task, ulong1)
69631+#define gr_log_procacct(audit, msg, task, num1, num2, num3, num4, num5, num6, num7, num8, num9) gr_log_varargs(audit, msg, GR_PSACCT, task, num1, num2, num3, num4, num5, num6, num7, num8, num9)
69632+#define gr_log_rwxmap(audit, msg, str) gr_log_varargs(audit, msg, GR_RWXMAP, str)
69633+
69634+void gr_log_varargs(int audit, const char *msg, int argtypes, ...);
69635+
69636+#endif
69637+
69638+#endif
69639diff --git a/include/linux/grmsg.h b/include/linux/grmsg.h
69640new file mode 100644
69641index 0000000..2f159b5
69642--- /dev/null
69643+++ b/include/linux/grmsg.h
69644@@ -0,0 +1,112 @@
69645+#define DEFAULTSECMSG "%.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u, parent %.256s[%.16s:%d] uid/euid:%u/%u gid/egid:%u/%u"
69646+#define GR_ACL_PROCACCT_MSG "%.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u run time:[%ud %uh %um %us] cpu time:[%ud %uh %um %us] %s with exit code %ld, parent %.256s[%.16s:%d] IP:%pI4 TTY:%.64s uid/euid:%u/%u gid/egid:%u/%u"
69647+#define GR_PTRACE_ACL_MSG "denied ptrace of %.950s(%.16s:%d) by "
69648+#define GR_STOPMOD_MSG "denied modification of module state by "
69649+#define GR_ROFS_BLOCKWRITE_MSG "denied write to block device %.950s by "
69650+#define GR_ROFS_MOUNT_MSG "denied writable mount of %.950s by "
69651+#define GR_IOPERM_MSG "denied use of ioperm() by "
69652+#define GR_IOPL_MSG "denied use of iopl() by "
69653+#define GR_SHMAT_ACL_MSG "denied attach of shared memory of UID %u, PID %d, ID %u by "
69654+#define GR_UNIX_CHROOT_MSG "denied connect() to abstract AF_UNIX socket outside of chroot by "
69655+#define GR_SHMAT_CHROOT_MSG "denied attach of shared memory outside of chroot by "
69656+#define GR_MEM_READWRITE_MSG "denied access of range %Lx -> %Lx in /dev/mem by "
69657+#define GR_SYMLINK_MSG "not following symlink %.950s owned by %d.%d by "
69658+#define GR_LEARN_AUDIT_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%lu\t%lu\t%.4095s\t%lu\t%pI4"
69659+#define GR_ID_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%c\t%d\t%d\t%d\t%pI4"
69660+#define GR_HIDDEN_ACL_MSG "%s access to hidden file %.950s by "
69661+#define GR_OPEN_ACL_MSG "%s open of %.950s for%s%s by "
69662+#define GR_CREATE_ACL_MSG "%s create of %.950s for%s%s by "
69663+#define GR_FIFO_MSG "denied writing FIFO %.950s of %d.%d by "
69664+#define GR_MKNOD_CHROOT_MSG "denied mknod of %.950s from chroot by "
69665+#define GR_MKNOD_ACL_MSG "%s mknod of %.950s by "
69666+#define GR_UNIXCONNECT_ACL_MSG "%s connect() to the unix domain socket %.950s by "
69667+#define GR_TTYSNIFF_ACL_MSG "terminal being sniffed by IP:%pI4 %.480s[%.16s:%d], parent %.480s[%.16s:%d] against "
69668+#define GR_MKDIR_ACL_MSG "%s mkdir of %.950s by "
69669+#define GR_RMDIR_ACL_MSG "%s rmdir of %.950s by "
69670+#define GR_UNLINK_ACL_MSG "%s unlink of %.950s by "
69671+#define GR_SYMLINK_ACL_MSG "%s symlink from %.480s to %.480s by "
69672+#define GR_HARDLINK_MSG "denied hardlink of %.930s (owned by %d.%d) to %.30s for "
69673+#define GR_LINK_ACL_MSG "%s link of %.480s to %.480s by "
69674+#define GR_INHERIT_ACL_MSG "successful inherit of %.480s's ACL for %.480s by "
69675+#define GR_RENAME_ACL_MSG "%s rename of %.480s to %.480s by "
69676+#define GR_UNSAFESHARE_EXEC_ACL_MSG "denied exec with cloned fs of %.950s by "
69677+#define GR_PTRACE_EXEC_ACL_MSG "denied ptrace of %.950s by "
69678+#define GR_EXEC_ACL_MSG "%s execution of %.950s by "
69679+#define GR_EXEC_TPE_MSG "denied untrusted exec (due to %.70s) of %.950s by "
69680+#define GR_SEGVSTART_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning uid %u from login for %lu seconds"
69681+#define GR_SEGVNOSUID_ACL_MSG "possible exploit bruteforcing on " DEFAULTSECMSG " banning execution for %lu seconds"
69682+#define GR_MOUNT_CHROOT_MSG "denied mount of %.256s as %.930s from chroot by "
69683+#define GR_PIVOT_CHROOT_MSG "denied pivot_root from chroot by "
69684+#define GR_TRUNCATE_ACL_MSG "%s truncate of %.950s by "
69685+#define GR_ATIME_ACL_MSG "%s access time change of %.950s by "
69686+#define GR_ACCESS_ACL_MSG "%s access of %.950s for%s%s%s by "
69687+#define GR_CHROOT_CHROOT_MSG "denied double chroot to %.950s by "
69688+#define GR_CHMOD_CHROOT_MSG "denied chmod +s of %.950s by "
69689+#define GR_CHMOD_ACL_MSG "%s chmod of %.950s by "
69690+#define GR_CHROOT_FCHDIR_MSG "denied fchdir outside of chroot to %.950s by "
69691+#define GR_CHOWN_ACL_MSG "%s chown of %.950s by "
69692+#define GR_SETXATTR_ACL_MSG "%s setting extended attributes of %.950s by "
69693+#define GR_WRITLIB_ACL_MSG "denied load of writable library %.950s by "
69694+#define GR_INITF_ACL_MSG "init_variables() failed %s by "
69695+#define GR_DISABLED_ACL_MSG "Error loading %s, trying to run kernel with acls disabled. To disable acls at startup use <kernel image name> gracl=off from your boot loader"
69696+#define GR_DEV_ACL_MSG "/dev/grsec: %d bytes sent %d required, being fed garbage by "
69697+#define GR_SHUTS_ACL_MSG "shutdown auth success for "
69698+#define GR_SHUTF_ACL_MSG "shutdown auth failure for "
69699+#define GR_SHUTI_ACL_MSG "ignoring shutdown for disabled RBAC system for "
69700+#define GR_SEGVMODS_ACL_MSG "segvmod auth success for "
69701+#define GR_SEGVMODF_ACL_MSG "segvmod auth failure for "
69702+#define GR_SEGVMODI_ACL_MSG "ignoring segvmod for disabled RBAC system for "
69703+#define GR_ENABLE_ACL_MSG "%s RBAC system loaded by "
69704+#define GR_ENABLEF_ACL_MSG "unable to load %s for "
69705+#define GR_RELOADI_ACL_MSG "ignoring reload request for disabled RBAC system"
69706+#define GR_RELOAD_ACL_MSG "%s RBAC system reloaded by "
69707+#define GR_RELOADF_ACL_MSG "failed reload of %s for "
69708+#define GR_SPROLEI_ACL_MSG "ignoring change to special role for disabled RBAC system for "
69709+#define GR_SPROLES_ACL_MSG "successful change to special role %s (id %d) by "
69710+#define GR_SPROLEL_ACL_MSG "special role %s (id %d) exited by "
69711+#define GR_SPROLEF_ACL_MSG "special role %s failure for "
69712+#define GR_UNSPROLEI_ACL_MSG "ignoring unauth of special role for disabled RBAC system for "
69713+#define GR_UNSPROLES_ACL_MSG "successful unauth of special role %s (id %d) by "
69714+#define GR_INVMODE_ACL_MSG "invalid mode %d by "
69715+#define GR_PRIORITY_CHROOT_MSG "denied priority change of process (%.16s:%d) by "
69716+#define GR_FAILFORK_MSG "failed fork with errno %s by "
69717+#define GR_NICE_CHROOT_MSG "denied priority change by "
69718+#define GR_UNISIGLOG_MSG "%.32s occurred at %p in "
69719+#define GR_DUALSIGLOG_MSG "signal %d sent to " DEFAULTSECMSG " by "
69720+#define GR_SIG_ACL_MSG "denied send of signal %d to protected task " DEFAULTSECMSG " by "
69721+#define GR_SYSCTL_MSG "denied modification of grsecurity sysctl value : %.32s by "
69722+#define GR_SYSCTL_ACL_MSG "%s sysctl of %.950s for%s%s by "
69723+#define GR_TIME_MSG "time set by "
69724+#define GR_DEFACL_MSG "fatal: unable to find subject for (%.16s:%d), loaded by "
69725+#define GR_MMAP_ACL_MSG "%s executable mmap of %.950s by "
69726+#define GR_MPROTECT_ACL_MSG "%s executable mprotect of %.950s by "
69727+#define GR_SOCK_MSG "denied socket(%.16s,%.16s,%.16s) by "
69728+#define GR_SOCK_NOINET_MSG "denied socket(%.16s,%.16s,%d) by "
69729+#define GR_BIND_MSG "denied bind() by "
69730+#define GR_CONNECT_MSG "denied connect() by "
69731+#define GR_BIND_ACL_MSG "denied bind() to %pI4 port %u sock type %.16s protocol %.16s by "
69732+#define GR_CONNECT_ACL_MSG "denied connect() to %pI4 port %u sock type %.16s protocol %.16s by "
69733+#define GR_IP_LEARN_MSG "%s\t%u\t%u\t%u\t%.4095s\t%.4095s\t%pI4\t%u\t%u\t%u\t%u\t%pI4"
69734+#define GR_EXEC_CHROOT_MSG "exec of %.980s within chroot by process "
69735+#define GR_CAP_ACL_MSG "use of %s denied for "
69736+#define GR_CAP_CHROOT_MSG "use of %s in chroot denied for "
69737+#define GR_CAP_ACL_MSG2 "use of %s permitted for "
69738+#define GR_USRCHANGE_ACL_MSG "change to uid %u denied for "
69739+#define GR_GRPCHANGE_ACL_MSG "change to gid %u denied for "
69740+#define GR_REMOUNT_AUDIT_MSG "remount of %.256s by "
69741+#define GR_UNMOUNT_AUDIT_MSG "unmount of %.256s by "
69742+#define GR_MOUNT_AUDIT_MSG "mount of %.256s to %.256s by "
69743+#define GR_CHDIR_AUDIT_MSG "chdir to %.980s by "
69744+#define GR_EXEC_AUDIT_MSG "exec of %.930s (%.128s) by "
69745+#define GR_RESOURCE_MSG "denied resource overstep by requesting %lu for %.16s against limit %lu for "
69746+#define GR_RWXMMAP_MSG "denied RWX mmap of %.950s by "
69747+#define GR_RWXMPROTECT_MSG "denied RWX mprotect of %.950s by "
69748+#define GR_TEXTREL_AUDIT_MSG "text relocation in %s, VMA:0x%08lx 0x%08lx by "
69749+#define GR_VM86_MSG "denied use of vm86 by "
69750+#define GR_PTRACE_AUDIT_MSG "process %.950s(%.16s:%d) attached to via ptrace by "
69751+#define GR_PTRACE_READEXEC_MSG "denied ptrace of unreadable binary %.950s by "
69752+#define GR_INIT_TRANSFER_MSG "persistent special role transferred privilege to init by "
69753+#define GR_BADPROCPID_MSG "denied read of sensitive /proc/pid/%s entry via fd passed across exec by "
69754+#define GR_SYMLINKOWNER_MSG "denied following symlink %.950s since symlink owner %u does not match target owner %u, by "
69755+#define GR_BRUTE_DAEMON_MSG "bruteforce prevention initiated for the next 30 minutes or until service restarted, stalling each fork 30 seconds. Please investigate the crash report for "
69756+#define GR_BRUTE_SUID_MSG "bruteforce prevention initiated due to crash of %.950s against uid %u, banning suid/sgid execs for %u minutes. Please investigate the crash report for "
69757diff --git a/include/linux/grsecurity.h b/include/linux/grsecurity.h
69758new file mode 100644
69759index 0000000..d957f6d
69760--- /dev/null
69761+++ b/include/linux/grsecurity.h
69762@@ -0,0 +1,241 @@
69763+#ifndef GR_SECURITY_H
69764+#define GR_SECURITY_H
69765+#include <linux/fs.h>
69766+#include <linux/fs_struct.h>
69767+#include <linux/binfmts.h>
69768+#include <linux/gracl.h>
69769+
69770+/* notify of brain-dead configs */
69771+#if defined(CONFIG_GRKERNSEC_PROC_USER) && defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
69772+#error "CONFIG_GRKERNSEC_PROC_USER and CONFIG_GRKERNSEC_PROC_USERGROUP cannot both be enabled."
69773+#endif
69774+#if defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_PAGEEXEC) && !defined(CONFIG_PAX_SEGMEXEC) && !defined(CONFIG_PAX_KERNEXEC)
69775+#error "CONFIG_PAX_NOEXEC enabled, but PAGEEXEC, SEGMEXEC, and KERNEXEC are disabled."
69776+#endif
69777+#if defined(CONFIG_PAX_ASLR) && !defined(CONFIG_PAX_RANDKSTACK) && !defined(CONFIG_PAX_RANDUSTACK) && !defined(CONFIG_PAX_RANDMMAP)
69778+#error "CONFIG_PAX_ASLR enabled, but RANDKSTACK, RANDUSTACK, and RANDMMAP are disabled."
69779+#endif
69780+#if defined(CONFIG_PAX) && !defined(CONFIG_PAX_NOEXEC) && !defined(CONFIG_PAX_ASLR)
69781+#error "CONFIG_PAX enabled, but no PaX options are enabled."
69782+#endif
69783+
69784+void gr_handle_brute_attach(unsigned long mm_flags);
69785+void gr_handle_brute_check(void);
69786+void gr_handle_kernel_exploit(void);
69787+
69788+char gr_roletype_to_char(void);
69789+
69790+int gr_acl_enable_at_secure(void);
69791+
69792+int gr_check_user_change(kuid_t real, kuid_t effective, kuid_t fs);
69793+int gr_check_group_change(kgid_t real, kgid_t effective, kgid_t fs);
69794+
69795+void gr_del_task_from_ip_table(struct task_struct *p);
69796+
69797+int gr_pid_is_chrooted(struct task_struct *p);
69798+int gr_handle_chroot_fowner(struct pid *pid, enum pid_type type);
69799+int gr_handle_chroot_nice(void);
69800+int gr_handle_chroot_sysctl(const int op);
69801+int gr_handle_chroot_setpriority(struct task_struct *p,
69802+ const int niceval);
69803+int gr_chroot_fchdir(struct dentry *u_dentry, struct vfsmount *u_mnt);
69804+int gr_handle_chroot_chroot(const struct dentry *dentry,
69805+ const struct vfsmount *mnt);
69806+void gr_handle_chroot_chdir(const struct path *path);
69807+int gr_handle_chroot_chmod(const struct dentry *dentry,
69808+ const struct vfsmount *mnt, const int mode);
69809+int gr_handle_chroot_mknod(const struct dentry *dentry,
69810+ const struct vfsmount *mnt, const int mode);
69811+int gr_handle_chroot_mount(const struct dentry *dentry,
69812+ const struct vfsmount *mnt,
69813+ const char *dev_name);
69814+int gr_handle_chroot_pivot(void);
69815+int gr_handle_chroot_unix(const pid_t pid);
69816+
69817+int gr_handle_rawio(const struct inode *inode);
69818+
69819+void gr_handle_ioperm(void);
69820+void gr_handle_iopl(void);
69821+
69822+umode_t gr_acl_umask(void);
69823+
69824+int gr_tpe_allow(const struct file *file);
69825+
69826+void gr_set_chroot_entries(struct task_struct *task, const struct path *path);
69827+void gr_clear_chroot_entries(struct task_struct *task);
69828+
69829+void gr_log_forkfail(const int retval);
69830+void gr_log_timechange(void);
69831+void gr_log_signal(const int sig, const void *addr, const struct task_struct *t);
69832+void gr_log_chdir(const struct dentry *dentry,
69833+ const struct vfsmount *mnt);
69834+void gr_log_chroot_exec(const struct dentry *dentry,
69835+ const struct vfsmount *mnt);
69836+void gr_log_remount(const char *devname, const int retval);
69837+void gr_log_unmount(const char *devname, const int retval);
69838+void gr_log_mount(const char *from, const char *to, const int retval);
69839+void gr_log_textrel(struct vm_area_struct *vma);
69840+void gr_log_rwxmmap(struct file *file);
69841+void gr_log_rwxmprotect(struct file *file);
69842+
69843+int gr_handle_follow_link(const struct inode *parent,
69844+ const struct inode *inode,
69845+ const struct dentry *dentry,
69846+ const struct vfsmount *mnt);
69847+int gr_handle_fifo(const struct dentry *dentry,
69848+ const struct vfsmount *mnt,
69849+ const struct dentry *dir, const int flag,
69850+ const int acc_mode);
69851+int gr_handle_hardlink(const struct dentry *dentry,
69852+ const struct vfsmount *mnt,
69853+ struct inode *inode,
69854+ const int mode, const struct filename *to);
69855+
69856+int gr_is_capable(const int cap);
69857+int gr_is_capable_nolog(const int cap);
69858+int gr_task_is_capable(const struct task_struct *task, const struct cred *cred, const int cap);
69859+int gr_task_is_capable_nolog(const struct task_struct *task, const int cap);
69860+
69861+void gr_copy_label(struct task_struct *tsk);
69862+void gr_handle_crash(struct task_struct *task, const int sig);
69863+int gr_handle_signal(const struct task_struct *p, const int sig);
69864+int gr_check_crash_uid(const kuid_t uid);
69865+int gr_check_protected_task(const struct task_struct *task);
69866+int gr_check_protected_task_fowner(struct pid *pid, enum pid_type type);
69867+int gr_acl_handle_mmap(const struct file *file,
69868+ const unsigned long prot);
69869+int gr_acl_handle_mprotect(const struct file *file,
69870+ const unsigned long prot);
69871+int gr_check_hidden_task(const struct task_struct *tsk);
69872+__u32 gr_acl_handle_truncate(const struct dentry *dentry,
69873+ const struct vfsmount *mnt);
69874+__u32 gr_acl_handle_utime(const struct dentry *dentry,
69875+ const struct vfsmount *mnt);
69876+__u32 gr_acl_handle_access(const struct dentry *dentry,
69877+ const struct vfsmount *mnt, const int fmode);
69878+__u32 gr_acl_handle_chmod(const struct dentry *dentry,
69879+ const struct vfsmount *mnt, umode_t *mode);
69880+__u32 gr_acl_handle_chown(const struct dentry *dentry,
69881+ const struct vfsmount *mnt);
69882+__u32 gr_acl_handle_setxattr(const struct dentry *dentry,
69883+ const struct vfsmount *mnt);
69884+int gr_handle_ptrace(struct task_struct *task, const long request);
69885+int gr_handle_proc_ptrace(struct task_struct *task);
69886+__u32 gr_acl_handle_execve(const struct dentry *dentry,
69887+ const struct vfsmount *mnt);
69888+int gr_check_crash_exec(const struct file *filp);
69889+int gr_acl_is_enabled(void);
69890+void gr_set_kernel_label(struct task_struct *task);
69891+void gr_set_role_label(struct task_struct *task, const kuid_t uid,
69892+ const kgid_t gid);
69893+int gr_set_proc_label(const struct dentry *dentry,
69894+ const struct vfsmount *mnt,
69895+ const int unsafe_flags);
69896+__u32 gr_acl_handle_hidden_file(const struct dentry *dentry,
69897+ const struct vfsmount *mnt);
69898+__u32 gr_acl_handle_open(const struct dentry *dentry,
69899+ const struct vfsmount *mnt, int acc_mode);
69900+__u32 gr_acl_handle_creat(const struct dentry *dentry,
69901+ const struct dentry *p_dentry,
69902+ const struct vfsmount *p_mnt,
69903+ int open_flags, int acc_mode, const int imode);
69904+void gr_handle_create(const struct dentry *dentry,
69905+ const struct vfsmount *mnt);
69906+void gr_handle_proc_create(const struct dentry *dentry,
69907+ const struct inode *inode);
69908+__u32 gr_acl_handle_mknod(const struct dentry *new_dentry,
69909+ const struct dentry *parent_dentry,
69910+ const struct vfsmount *parent_mnt,
69911+ const int mode);
69912+__u32 gr_acl_handle_mkdir(const struct dentry *new_dentry,
69913+ const struct dentry *parent_dentry,
69914+ const struct vfsmount *parent_mnt);
69915+__u32 gr_acl_handle_rmdir(const struct dentry *dentry,
69916+ const struct vfsmount *mnt);
69917+void gr_handle_delete(const ino_t ino, const dev_t dev);
69918+__u32 gr_acl_handle_unlink(const struct dentry *dentry,
69919+ const struct vfsmount *mnt);
69920+__u32 gr_acl_handle_symlink(const struct dentry *new_dentry,
69921+ const struct dentry *parent_dentry,
69922+ const struct vfsmount *parent_mnt,
69923+ const struct filename *from);
69924+__u32 gr_acl_handle_link(const struct dentry *new_dentry,
69925+ const struct dentry *parent_dentry,
69926+ const struct vfsmount *parent_mnt,
69927+ const struct dentry *old_dentry,
69928+ const struct vfsmount *old_mnt, const struct filename *to);
69929+int gr_handle_symlink_owner(const struct path *link, const struct inode *target);
69930+int gr_acl_handle_rename(struct dentry *new_dentry,
69931+ struct dentry *parent_dentry,
69932+ const struct vfsmount *parent_mnt,
69933+ struct dentry *old_dentry,
69934+ struct inode *old_parent_inode,
69935+ struct vfsmount *old_mnt, const struct filename *newname);
69936+void gr_handle_rename(struct inode *old_dir, struct inode *new_dir,
69937+ struct dentry *old_dentry,
69938+ struct dentry *new_dentry,
69939+ struct vfsmount *mnt, const __u8 replace);
69940+__u32 gr_check_link(const struct dentry *new_dentry,
69941+ const struct dentry *parent_dentry,
69942+ const struct vfsmount *parent_mnt,
69943+ const struct dentry *old_dentry,
69944+ const struct vfsmount *old_mnt);
69945+int gr_acl_handle_filldir(const struct file *file, const char *name,
69946+ const unsigned int namelen, const ino_t ino);
69947+
69948+__u32 gr_acl_handle_unix(const struct dentry *dentry,
69949+ const struct vfsmount *mnt);
69950+void gr_acl_handle_exit(void);
69951+void gr_acl_handle_psacct(struct task_struct *task, const long code);
69952+int gr_acl_handle_procpidmem(const struct task_struct *task);
69953+int gr_handle_rofs_mount(struct dentry *dentry, struct vfsmount *mnt, int mnt_flags);
69954+int gr_handle_rofs_blockwrite(struct dentry *dentry, struct vfsmount *mnt, int acc_mode);
69955+void gr_audit_ptrace(struct task_struct *task);
69956+dev_t gr_get_dev_from_dentry(struct dentry *dentry);
69957+void gr_put_exec_file(struct task_struct *task);
69958+
69959+int gr_ptrace_readexec(struct file *file, int unsafe_flags);
69960+
69961+#if defined(CONFIG_GRKERNSEC) && (defined(CONFIG_GRKERNSEC_RESLOG) || !defined(CONFIG_GRKERNSEC_NO_RBAC))
69962+extern void gr_learn_resource(const struct task_struct *task, const int res,
69963+ const unsigned long wanted, const int gt);
69964+#else
69965+static inline void gr_learn_resource(const struct task_struct *task, const int res,
69966+ const unsigned long wanted, const int gt)
69967+{
69968+}
69969+#endif
69970+
69971+#ifdef CONFIG_GRKERNSEC_RESLOG
69972+extern void gr_log_resource(const struct task_struct *task, const int res,
69973+ const unsigned long wanted, const int gt);
69974+#else
69975+static inline void gr_log_resource(const struct task_struct *task, const int res,
69976+ const unsigned long wanted, const int gt)
69977+{
69978+}
69979+#endif
69980+
69981+#ifdef CONFIG_GRKERNSEC
69982+void task_grsec_rbac(struct seq_file *m, struct task_struct *p);
69983+void gr_handle_vm86(void);
69984+void gr_handle_mem_readwrite(u64 from, u64 to);
69985+
69986+void gr_log_badprocpid(const char *entry);
69987+
69988+extern int grsec_enable_dmesg;
69989+extern int grsec_disable_privio;
69990+
69991+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
69992+extern kgid_t grsec_proc_gid;
69993+#endif
69994+
69995+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
69996+extern int grsec_enable_chroot_findtask;
69997+#endif
69998+#ifdef CONFIG_GRKERNSEC_SETXID
69999+extern int grsec_enable_setxid;
70000+#endif
70001+#endif
70002+
70003+#endif
70004diff --git a/include/linux/grsock.h b/include/linux/grsock.h
70005new file mode 100644
70006index 0000000..e7ffaaf
70007--- /dev/null
70008+++ b/include/linux/grsock.h
70009@@ -0,0 +1,19 @@
70010+#ifndef __GRSOCK_H
70011+#define __GRSOCK_H
70012+
70013+extern void gr_attach_curr_ip(const struct sock *sk);
70014+extern int gr_handle_sock_all(const int family, const int type,
70015+ const int protocol);
70016+extern int gr_handle_sock_server(const struct sockaddr *sck);
70017+extern int gr_handle_sock_server_other(const struct sock *sck);
70018+extern int gr_handle_sock_client(const struct sockaddr *sck);
70019+extern int gr_search_connect(struct socket * sock,
70020+ struct sockaddr_in * addr);
70021+extern int gr_search_bind(struct socket * sock,
70022+ struct sockaddr_in * addr);
70023+extern int gr_search_listen(struct socket * sock);
70024+extern int gr_search_accept(struct socket * sock);
70025+extern int gr_search_socket(const int domain, const int type,
70026+ const int protocol);
70027+
70028+#endif
70029diff --git a/include/linux/highmem.h b/include/linux/highmem.h
70030index 7fb31da..08b5114 100644
70031--- a/include/linux/highmem.h
70032+++ b/include/linux/highmem.h
70033@@ -189,6 +189,18 @@ static inline void clear_highpage(struct page *page)
70034 kunmap_atomic(kaddr);
70035 }
70036
70037+static inline void sanitize_highpage(struct page *page)
70038+{
70039+ void *kaddr;
70040+ unsigned long flags;
70041+
70042+ local_irq_save(flags);
70043+ kaddr = kmap_atomic(page);
70044+ clear_page(kaddr);
70045+ kunmap_atomic(kaddr);
70046+ local_irq_restore(flags);
70047+}
70048+
70049 static inline void zero_user_segments(struct page *page,
70050 unsigned start1, unsigned end1,
70051 unsigned start2, unsigned end2)
70052diff --git a/include/linux/hwmon-sysfs.h b/include/linux/hwmon-sysfs.h
70053index 1c7b89a..7f52502 100644
70054--- a/include/linux/hwmon-sysfs.h
70055+++ b/include/linux/hwmon-sysfs.h
70056@@ -25,7 +25,8 @@
70057 struct sensor_device_attribute{
70058 struct device_attribute dev_attr;
70059 int index;
70060-};
70061+} __do_const;
70062+typedef struct sensor_device_attribute __no_const sensor_device_attribute_no_const;
70063 #define to_sensor_dev_attr(_dev_attr) \
70064 container_of(_dev_attr, struct sensor_device_attribute, dev_attr)
70065
70066@@ -41,7 +42,7 @@ struct sensor_device_attribute_2 {
70067 struct device_attribute dev_attr;
70068 u8 index;
70069 u8 nr;
70070-};
70071+} __do_const;
70072 #define to_sensor_dev_attr_2(_dev_attr) \
70073 container_of(_dev_attr, struct sensor_device_attribute_2, dev_attr)
70074
70075diff --git a/include/linux/i2c.h b/include/linux/i2c.h
70076index e988fa9..ff9f17e 100644
70077--- a/include/linux/i2c.h
70078+++ b/include/linux/i2c.h
70079@@ -366,6 +366,7 @@ struct i2c_algorithm {
70080 /* To determine what the adapter supports */
70081 u32 (*functionality) (struct i2c_adapter *);
70082 };
70083+typedef struct i2c_algorithm __no_const i2c_algorithm_no_const;
70084
70085 /**
70086 * struct i2c_bus_recovery_info - I2C bus recovery information
70087diff --git a/include/linux/i2o.h b/include/linux/i2o.h
70088index d23c3c2..eb63c81 100644
70089--- a/include/linux/i2o.h
70090+++ b/include/linux/i2o.h
70091@@ -565,7 +565,7 @@ struct i2o_controller {
70092 struct i2o_device *exec; /* Executive */
70093 #if BITS_PER_LONG == 64
70094 spinlock_t context_list_lock; /* lock for context_list */
70095- atomic_t context_list_counter; /* needed for unique contexts */
70096+ atomic_unchecked_t context_list_counter; /* needed for unique contexts */
70097 struct list_head context_list; /* list of context id's
70098 and pointers */
70099 #endif
70100diff --git a/include/linux/if_pppox.h b/include/linux/if_pppox.h
70101index aff7ad8..3942bbd 100644
70102--- a/include/linux/if_pppox.h
70103+++ b/include/linux/if_pppox.h
70104@@ -76,7 +76,7 @@ struct pppox_proto {
70105 int (*ioctl)(struct socket *sock, unsigned int cmd,
70106 unsigned long arg);
70107 struct module *owner;
70108-};
70109+} __do_const;
70110
70111 extern int register_pppox_proto(int proto_num, const struct pppox_proto *pp);
70112 extern void unregister_pppox_proto(int proto_num);
70113diff --git a/include/linux/init.h b/include/linux/init.h
70114index 8618147..0821126 100644
70115--- a/include/linux/init.h
70116+++ b/include/linux/init.h
70117@@ -39,9 +39,36 @@
70118 * Also note, that this data cannot be "const".
70119 */
70120
70121+#ifdef MODULE
70122+#define add_init_latent_entropy
70123+#define add_devinit_latent_entropy
70124+#define add_cpuinit_latent_entropy
70125+#define add_meminit_latent_entropy
70126+#else
70127+#define add_init_latent_entropy __latent_entropy
70128+
70129+#ifdef CONFIG_HOTPLUG
70130+#define add_devinit_latent_entropy
70131+#else
70132+#define add_devinit_latent_entropy __latent_entropy
70133+#endif
70134+
70135+#ifdef CONFIG_HOTPLUG_CPU
70136+#define add_cpuinit_latent_entropy
70137+#else
70138+#define add_cpuinit_latent_entropy __latent_entropy
70139+#endif
70140+
70141+#ifdef CONFIG_MEMORY_HOTPLUG
70142+#define add_meminit_latent_entropy
70143+#else
70144+#define add_meminit_latent_entropy __latent_entropy
70145+#endif
70146+#endif
70147+
70148 /* These are for everybody (although not all archs will actually
70149 discard it in modules) */
70150-#define __init __section(.init.text) __cold notrace
70151+#define __init __section(.init.text) __cold notrace add_init_latent_entropy
70152 #define __initdata __section(.init.data)
70153 #define __initconst __constsection(.init.rodata)
70154 #define __exitdata __section(.exit.data)
70155@@ -94,7 +121,7 @@
70156 #define __exit __section(.exit.text) __exitused __cold notrace
70157
70158 /* Used for HOTPLUG_CPU */
70159-#define __cpuinit __section(.cpuinit.text) __cold notrace
70160+#define __cpuinit __section(.cpuinit.text) __cold notrace add_cpuinit_latent_entropy
70161 #define __cpuinitdata __section(.cpuinit.data)
70162 #define __cpuinitconst __constsection(.cpuinit.rodata)
70163 #define __cpuexit __section(.cpuexit.text) __exitused __cold notrace
70164@@ -102,7 +129,7 @@
70165 #define __cpuexitconst __constsection(.cpuexit.rodata)
70166
70167 /* Used for MEMORY_HOTPLUG */
70168-#define __meminit __section(.meminit.text) __cold notrace
70169+#define __meminit __section(.meminit.text) __cold notrace add_meminit_latent_entropy
70170 #define __meminitdata __section(.meminit.data)
70171 #define __meminitconst __constsection(.meminit.rodata)
70172 #define __memexit __section(.memexit.text) __exitused __cold notrace
70173diff --git a/include/linux/init_task.h b/include/linux/init_task.h
70174index 5cd0f09..c9f67cc 100644
70175--- a/include/linux/init_task.h
70176+++ b/include/linux/init_task.h
70177@@ -154,6 +154,12 @@ extern struct task_group root_task_group;
70178
70179 #define INIT_TASK_COMM "swapper"
70180
70181+#ifdef CONFIG_X86
70182+#define INIT_TASK_THREAD_INFO .tinfo = INIT_THREAD_INFO,
70183+#else
70184+#define INIT_TASK_THREAD_INFO
70185+#endif
70186+
70187 /*
70188 * INIT_TASK is used to set up the first task table, touch at
70189 * your own risk!. Base=0, limit=0x1fffff (=2MB)
70190@@ -193,6 +199,7 @@ extern struct task_group root_task_group;
70191 RCU_POINTER_INITIALIZER(cred, &init_cred), \
70192 .comm = INIT_TASK_COMM, \
70193 .thread = INIT_THREAD, \
70194+ INIT_TASK_THREAD_INFO \
70195 .fs = &init_fs, \
70196 .files = &init_files, \
70197 .signal = &init_signals, \
70198diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
70199index 5fa5afe..ac55b25 100644
70200--- a/include/linux/interrupt.h
70201+++ b/include/linux/interrupt.h
70202@@ -430,7 +430,7 @@ enum
70203 /* map softirq index to softirq name. update 'softirq_to_name' in
70204 * kernel/softirq.c when adding a new softirq.
70205 */
70206-extern char *softirq_to_name[NR_SOFTIRQS];
70207+extern const char * const softirq_to_name[NR_SOFTIRQS];
70208
70209 /* softirq mask and active fields moved to irq_cpustat_t in
70210 * asm/hardirq.h to get better cache usage. KAO
70211@@ -438,12 +438,12 @@ extern char *softirq_to_name[NR_SOFTIRQS];
70212
70213 struct softirq_action
70214 {
70215- void (*action)(struct softirq_action *);
70216-};
70217+ void (*action)(void);
70218+} __no_const;
70219
70220 asmlinkage void do_softirq(void);
70221 asmlinkage void __do_softirq(void);
70222-extern void open_softirq(int nr, void (*action)(struct softirq_action *));
70223+extern void open_softirq(int nr, void (*action)(void));
70224 extern void softirq_init(void);
70225 extern void __raise_softirq_irqoff(unsigned int nr);
70226
70227diff --git a/include/linux/iommu.h b/include/linux/iommu.h
70228index 3aeb730..2177f39 100644
70229--- a/include/linux/iommu.h
70230+++ b/include/linux/iommu.h
70231@@ -113,7 +113,7 @@ struct iommu_ops {
70232 u32 (*domain_get_windows)(struct iommu_domain *domain);
70233
70234 unsigned long pgsize_bitmap;
70235-};
70236+} __do_const;
70237
70238 #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */
70239 #define IOMMU_GROUP_NOTIFY_DEL_DEVICE 2 /* Pre Device removed */
70240diff --git a/include/linux/ioport.h b/include/linux/ioport.h
70241index 89b7c24..382af74 100644
70242--- a/include/linux/ioport.h
70243+++ b/include/linux/ioport.h
70244@@ -161,7 +161,7 @@ struct resource *lookup_resource(struct resource *root, resource_size_t start);
70245 int adjust_resource(struct resource *res, resource_size_t start,
70246 resource_size_t size);
70247 resource_size_t resource_alignment(struct resource *res);
70248-static inline resource_size_t resource_size(const struct resource *res)
70249+static inline resource_size_t __intentional_overflow(-1) resource_size(const struct resource *res)
70250 {
70251 return res->end - res->start + 1;
70252 }
70253diff --git a/include/linux/irq.h b/include/linux/irq.h
70254index bc4e066..50468a9 100644
70255--- a/include/linux/irq.h
70256+++ b/include/linux/irq.h
70257@@ -328,7 +328,8 @@ struct irq_chip {
70258 void (*irq_print_chip)(struct irq_data *data, struct seq_file *p);
70259
70260 unsigned long flags;
70261-};
70262+} __do_const;
70263+typedef struct irq_chip __no_const irq_chip_no_const;
70264
70265 /*
70266 * irq_chip specific flags
70267diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h
70268index 3e203eb..3fe68d0 100644
70269--- a/include/linux/irqchip/arm-gic.h
70270+++ b/include/linux/irqchip/arm-gic.h
70271@@ -59,9 +59,11 @@
70272
70273 #ifndef __ASSEMBLY__
70274
70275+#include <linux/irq.h>
70276+
70277 struct device_node;
70278
70279-extern struct irq_chip gic_arch_extn;
70280+extern irq_chip_no_const gic_arch_extn;
70281
70282 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
70283 u32 offset, struct device_node *);
70284diff --git a/include/linux/kallsyms.h b/include/linux/kallsyms.h
70285index 6883e19..06992b1 100644
70286--- a/include/linux/kallsyms.h
70287+++ b/include/linux/kallsyms.h
70288@@ -15,7 +15,8 @@
70289
70290 struct module;
70291
70292-#ifdef CONFIG_KALLSYMS
70293+#if !defined(__INCLUDED_BY_HIDESYM) || !defined(CONFIG_KALLSYMS)
70294+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
70295 /* Lookup the address for a symbol. Returns 0 if not found. */
70296 unsigned long kallsyms_lookup_name(const char *name);
70297
70298@@ -106,6 +107,17 @@ static inline int lookup_symbol_attrs(unsigned long addr, unsigned long *size, u
70299 /* Stupid that this does nothing, but I didn't create this mess. */
70300 #define __print_symbol(fmt, addr)
70301 #endif /*CONFIG_KALLSYMS*/
70302+#else /* when included by kallsyms.c, vsnprintf.c, or
70303+ arch/x86/kernel/dumpstack.c, with HIDESYM enabled */
70304+extern void __print_symbol(const char *fmt, unsigned long address);
70305+extern int sprint_backtrace(char *buffer, unsigned long address);
70306+extern int sprint_symbol(char *buffer, unsigned long address);
70307+extern int sprint_symbol_no_offset(char *buffer, unsigned long address);
70308+const char *kallsyms_lookup(unsigned long addr,
70309+ unsigned long *symbolsize,
70310+ unsigned long *offset,
70311+ char **modname, char *namebuf);
70312+#endif
70313
70314 /* This macro allows us to keep printk typechecking */
70315 static __printf(1, 2)
70316diff --git a/include/linux/key-type.h b/include/linux/key-type.h
70317index 518a53a..5e28358 100644
70318--- a/include/linux/key-type.h
70319+++ b/include/linux/key-type.h
70320@@ -125,7 +125,7 @@ struct key_type {
70321 /* internal fields */
70322 struct list_head link; /* link in types list */
70323 struct lock_class_key lock_class; /* key->sem lock class */
70324-};
70325+} __do_const;
70326
70327 extern struct key_type key_type_keyring;
70328
70329diff --git a/include/linux/kgdb.h b/include/linux/kgdb.h
70330index c6e091b..a940adf 100644
70331--- a/include/linux/kgdb.h
70332+++ b/include/linux/kgdb.h
70333@@ -52,7 +52,7 @@ extern int kgdb_connected;
70334 extern int kgdb_io_module_registered;
70335
70336 extern atomic_t kgdb_setting_breakpoint;
70337-extern atomic_t kgdb_cpu_doing_single_step;
70338+extern atomic_unchecked_t kgdb_cpu_doing_single_step;
70339
70340 extern struct task_struct *kgdb_usethread;
70341 extern struct task_struct *kgdb_contthread;
70342@@ -254,7 +254,7 @@ struct kgdb_arch {
70343 void (*correct_hw_break)(void);
70344
70345 void (*enable_nmi)(bool on);
70346-};
70347+} __do_const;
70348
70349 /**
70350 * struct kgdb_io - Describe the interface for an I/O driver to talk with KGDB.
70351@@ -279,7 +279,7 @@ struct kgdb_io {
70352 void (*pre_exception) (void);
70353 void (*post_exception) (void);
70354 int is_console;
70355-};
70356+} __do_const;
70357
70358 extern struct kgdb_arch arch_kgdb_ops;
70359
70360diff --git a/include/linux/kmod.h b/include/linux/kmod.h
70361index 0555cc6..b16a7a4 100644
70362--- a/include/linux/kmod.h
70363+++ b/include/linux/kmod.h
70364@@ -34,6 +34,8 @@ extern char modprobe_path[]; /* for sysctl */
70365 * usually useless though. */
70366 extern __printf(2, 3)
70367 int __request_module(bool wait, const char *name, ...);
70368+extern __printf(3, 4)
70369+int ___request_module(bool wait, char *param_name, const char *name, ...);
70370 #define request_module(mod...) __request_module(true, mod)
70371 #define request_module_nowait(mod...) __request_module(false, mod)
70372 #define try_then_request_module(x, mod...) \
70373diff --git a/include/linux/kobject.h b/include/linux/kobject.h
70374index 939b112..ed6ed51 100644
70375--- a/include/linux/kobject.h
70376+++ b/include/linux/kobject.h
70377@@ -111,7 +111,7 @@ struct kobj_type {
70378 struct attribute **default_attrs;
70379 const struct kobj_ns_type_operations *(*child_ns_type)(struct kobject *kobj);
70380 const void *(*namespace)(struct kobject *kobj);
70381-};
70382+} __do_const;
70383
70384 struct kobj_uevent_env {
70385 char *envp[UEVENT_NUM_ENVP];
70386@@ -134,6 +134,7 @@ struct kobj_attribute {
70387 ssize_t (*store)(struct kobject *kobj, struct kobj_attribute *attr,
70388 const char *buf, size_t count);
70389 };
70390+typedef struct kobj_attribute __no_const kobj_attribute_no_const;
70391
70392 extern const struct sysfs_ops kobj_sysfs_ops;
70393
70394diff --git a/include/linux/kobject_ns.h b/include/linux/kobject_ns.h
70395index f66b065..c2c29b4 100644
70396--- a/include/linux/kobject_ns.h
70397+++ b/include/linux/kobject_ns.h
70398@@ -43,7 +43,7 @@ struct kobj_ns_type_operations {
70399 const void *(*netlink_ns)(struct sock *sk);
70400 const void *(*initial_ns)(void);
70401 void (*drop_ns)(void *);
70402-};
70403+} __do_const;
70404
70405 int kobj_ns_type_register(const struct kobj_ns_type_operations *ops);
70406 int kobj_ns_type_registered(enum kobj_ns_type type);
70407diff --git a/include/linux/kref.h b/include/linux/kref.h
70408index 484604d..0f6c5b6 100644
70409--- a/include/linux/kref.h
70410+++ b/include/linux/kref.h
70411@@ -68,7 +68,7 @@ static inline void kref_get(struct kref *kref)
70412 static inline int kref_sub(struct kref *kref, unsigned int count,
70413 void (*release)(struct kref *kref))
70414 {
70415- WARN_ON(release == NULL);
70416+ BUG_ON(release == NULL);
70417
70418 if (atomic_sub_and_test((int) count, &kref->refcount)) {
70419 release(kref);
70420diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
70421index 8db53cf..c21121d 100644
70422--- a/include/linux/kvm_host.h
70423+++ b/include/linux/kvm_host.h
70424@@ -444,7 +444,7 @@ static inline void kvm_irqfd_exit(void)
70425 {
70426 }
70427 #endif
70428-int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70429+int kvm_init(const void *opaque, unsigned vcpu_size, unsigned vcpu_align,
70430 struct module *module);
70431 void kvm_exit(void);
70432
70433@@ -616,7 +616,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
70434 struct kvm_guest_debug *dbg);
70435 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
70436
70437-int kvm_arch_init(void *opaque);
70438+int kvm_arch_init(const void *opaque);
70439 void kvm_arch_exit(void);
70440
70441 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
70442diff --git a/include/linux/libata.h b/include/linux/libata.h
70443index eae7a05..2cdd875 100644
70444--- a/include/linux/libata.h
70445+++ b/include/linux/libata.h
70446@@ -919,7 +919,7 @@ struct ata_port_operations {
70447 * fields must be pointers.
70448 */
70449 const struct ata_port_operations *inherits;
70450-};
70451+} __do_const;
70452
70453 struct ata_port_info {
70454 unsigned long flags;
70455diff --git a/include/linux/list.h b/include/linux/list.h
70456index b83e565..baa6c1d 100644
70457--- a/include/linux/list.h
70458+++ b/include/linux/list.h
70459@@ -112,6 +112,19 @@ extern void __list_del_entry(struct list_head *entry);
70460 extern void list_del(struct list_head *entry);
70461 #endif
70462
70463+extern void __pax_list_add(struct list_head *new,
70464+ struct list_head *prev,
70465+ struct list_head *next);
70466+static inline void pax_list_add(struct list_head *new, struct list_head *head)
70467+{
70468+ __pax_list_add(new, head, head->next);
70469+}
70470+static inline void pax_list_add_tail(struct list_head *new, struct list_head *head)
70471+{
70472+ __pax_list_add(new, head->prev, head);
70473+}
70474+extern void pax_list_del(struct list_head *entry);
70475+
70476 /**
70477 * list_replace - replace old entry by new one
70478 * @old : the element to be replaced
70479@@ -145,6 +158,8 @@ static inline void list_del_init(struct list_head *entry)
70480 INIT_LIST_HEAD(entry);
70481 }
70482
70483+extern void pax_list_del_init(struct list_head *entry);
70484+
70485 /**
70486 * list_move - delete from one list and add as another's head
70487 * @list: the entry to move
70488diff --git a/include/linux/math64.h b/include/linux/math64.h
70489index 2913b86..4209244 100644
70490--- a/include/linux/math64.h
70491+++ b/include/linux/math64.h
70492@@ -15,7 +15,7 @@
70493 * This is commonly provided by 32bit archs to provide an optimized 64bit
70494 * divide.
70495 */
70496-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70497+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70498 {
70499 *remainder = dividend % divisor;
70500 return dividend / divisor;
70501@@ -52,7 +52,7 @@ static inline s64 div64_s64(s64 dividend, s64 divisor)
70502 #define div64_ul(x, y) div_u64((x), (y))
70503
70504 #ifndef div_u64_rem
70505-static inline u64 div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70506+static inline u64 __intentional_overflow(-1) div_u64_rem(u64 dividend, u32 divisor, u32 *remainder)
70507 {
70508 *remainder = do_div(dividend, divisor);
70509 return dividend;
70510@@ -81,7 +81,7 @@ extern s64 div64_s64(s64 dividend, s64 divisor);
70511 * divide.
70512 */
70513 #ifndef div_u64
70514-static inline u64 div_u64(u64 dividend, u32 divisor)
70515+static inline u64 __intentional_overflow(-1) div_u64(u64 dividend, u32 divisor)
70516 {
70517 u32 remainder;
70518 return div_u64_rem(dividend, divisor, &remainder);
70519diff --git a/include/linux/mm.h b/include/linux/mm.h
70520index e0c8528..bcf0c29 100644
70521--- a/include/linux/mm.h
70522+++ b/include/linux/mm.h
70523@@ -104,6 +104,11 @@ extern unsigned int kobjsize(const void *objp);
70524 #define VM_HUGETLB 0x00400000 /* Huge TLB Page VM */
70525 #define VM_NONLINEAR 0x00800000 /* Is non-linear (remap_file_pages) */
70526 #define VM_ARCH_1 0x01000000 /* Architecture-specific flag */
70527+
70528+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
70529+#define VM_PAGEEXEC 0x02000000 /* vma->vm_page_prot needs special handling */
70530+#endif
70531+
70532 #define VM_DONTDUMP 0x04000000 /* Do not include in the core dump */
70533
70534 #define VM_MIXEDMAP 0x10000000 /* Can contain "struct page" and pure PFN pages */
70535@@ -205,8 +210,8 @@ struct vm_operations_struct {
70536 /* called by access_process_vm when get_user_pages() fails, typically
70537 * for use by special VMAs that can switch between memory and hardware
70538 */
70539- int (*access)(struct vm_area_struct *vma, unsigned long addr,
70540- void *buf, int len, int write);
70541+ ssize_t (*access)(struct vm_area_struct *vma, unsigned long addr,
70542+ void *buf, size_t len, int write);
70543 #ifdef CONFIG_NUMA
70544 /*
70545 * set_policy() op must add a reference to any non-NULL @new mempolicy
70546@@ -236,6 +241,7 @@ struct vm_operations_struct {
70547 int (*remap_pages)(struct vm_area_struct *vma, unsigned long addr,
70548 unsigned long size, pgoff_t pgoff);
70549 };
70550+typedef struct vm_operations_struct __no_const vm_operations_struct_no_const;
70551
70552 struct mmu_gather;
70553 struct inode;
70554@@ -980,8 +986,8 @@ int follow_pfn(struct vm_area_struct *vma, unsigned long address,
70555 unsigned long *pfn);
70556 int follow_phys(struct vm_area_struct *vma, unsigned long address,
70557 unsigned int flags, unsigned long *prot, resource_size_t *phys);
70558-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70559- void *buf, int len, int write);
70560+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
70561+ void *buf, size_t len, int write);
70562
70563 static inline void unmap_shared_mapping_range(struct address_space *mapping,
70564 loff_t const holebegin, loff_t const holelen)
70565@@ -1020,9 +1026,9 @@ static inline int fixup_user_fault(struct task_struct *tsk,
70566 }
70567 #endif
70568
70569-extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write);
70570-extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
70571- void *buf, int len, int write);
70572+extern ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write);
70573+extern ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
70574+ void *buf, size_t len, int write);
70575
70576 long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
70577 unsigned long start, unsigned long nr_pages,
70578@@ -1053,34 +1059,6 @@ int set_page_dirty(struct page *page);
70579 int set_page_dirty_lock(struct page *page);
70580 int clear_page_dirty_for_io(struct page *page);
70581
70582-/* Is the vma a continuation of the stack vma above it? */
70583-static inline int vma_growsdown(struct vm_area_struct *vma, unsigned long addr)
70584-{
70585- return vma && (vma->vm_end == addr) && (vma->vm_flags & VM_GROWSDOWN);
70586-}
70587-
70588-static inline int stack_guard_page_start(struct vm_area_struct *vma,
70589- unsigned long addr)
70590-{
70591- return (vma->vm_flags & VM_GROWSDOWN) &&
70592- (vma->vm_start == addr) &&
70593- !vma_growsdown(vma->vm_prev, addr);
70594-}
70595-
70596-/* Is the vma a continuation of the stack vma below it? */
70597-static inline int vma_growsup(struct vm_area_struct *vma, unsigned long addr)
70598-{
70599- return vma && (vma->vm_start == addr) && (vma->vm_flags & VM_GROWSUP);
70600-}
70601-
70602-static inline int stack_guard_page_end(struct vm_area_struct *vma,
70603- unsigned long addr)
70604-{
70605- return (vma->vm_flags & VM_GROWSUP) &&
70606- (vma->vm_end == addr) &&
70607- !vma_growsup(vma->vm_next, addr);
70608-}
70609-
70610 extern pid_t
70611 vm_is_stack(struct task_struct *task, struct vm_area_struct *vma, int in_group);
70612
70613@@ -1180,6 +1158,15 @@ static inline void sync_mm_rss(struct mm_struct *mm)
70614 }
70615 #endif
70616
70617+#ifdef CONFIG_MMU
70618+pgprot_t vm_get_page_prot(vm_flags_t vm_flags);
70619+#else
70620+static inline pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
70621+{
70622+ return __pgprot(0);
70623+}
70624+#endif
70625+
70626 int vma_wants_writenotify(struct vm_area_struct *vma);
70627
70628 extern pte_t *__get_locked_pte(struct mm_struct *mm, unsigned long addr,
70629@@ -1198,8 +1185,15 @@ static inline int __pud_alloc(struct mm_struct *mm, pgd_t *pgd,
70630 {
70631 return 0;
70632 }
70633+
70634+static inline int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd,
70635+ unsigned long address)
70636+{
70637+ return 0;
70638+}
70639 #else
70640 int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70641+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address);
70642 #endif
70643
70644 #ifdef __PAGETABLE_PMD_FOLDED
70645@@ -1208,8 +1202,15 @@ static inline int __pmd_alloc(struct mm_struct *mm, pud_t *pud,
70646 {
70647 return 0;
70648 }
70649+
70650+static inline int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud,
70651+ unsigned long address)
70652+{
70653+ return 0;
70654+}
70655 #else
70656 int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address);
70657+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address);
70658 #endif
70659
70660 int __pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
70661@@ -1227,11 +1228,23 @@ static inline pud_t *pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long a
70662 NULL: pud_offset(pgd, address);
70663 }
70664
70665+static inline pud_t *pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
70666+{
70667+ return (unlikely(pgd_none(*pgd)) && __pud_alloc_kernel(mm, pgd, address))?
70668+ NULL: pud_offset(pgd, address);
70669+}
70670+
70671 static inline pmd_t *pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
70672 {
70673 return (unlikely(pud_none(*pud)) && __pmd_alloc(mm, pud, address))?
70674 NULL: pmd_offset(pud, address);
70675 }
70676+
70677+static inline pmd_t *pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
70678+{
70679+ return (unlikely(pud_none(*pud)) && __pmd_alloc_kernel(mm, pud, address))?
70680+ NULL: pmd_offset(pud, address);
70681+}
70682 #endif /* CONFIG_MMU && !__ARCH_HAS_4LEVEL_HACK */
70683
70684 #if USE_SPLIT_PTLOCKS
70685@@ -1517,6 +1530,7 @@ extern unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
70686 unsigned long len, unsigned long prot, unsigned long flags,
70687 unsigned long pgoff, unsigned long *populate);
70688 extern int do_munmap(struct mm_struct *, unsigned long, size_t);
70689+extern int __do_munmap(struct mm_struct *, unsigned long, size_t);
70690
70691 #ifdef CONFIG_MMU
70692 extern int __mm_populate(unsigned long addr, unsigned long len,
70693@@ -1545,10 +1559,11 @@ struct vm_unmapped_area_info {
70694 unsigned long high_limit;
70695 unsigned long align_mask;
70696 unsigned long align_offset;
70697+ unsigned long threadstack_offset;
70698 };
70699
70700-extern unsigned long unmapped_area(struct vm_unmapped_area_info *info);
70701-extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
70702+extern unsigned long unmapped_area(const struct vm_unmapped_area_info *info);
70703+extern unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info);
70704
70705 /*
70706 * Search for an unmapped address range.
70707@@ -1560,7 +1575,7 @@ extern unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info);
70708 * - satisfies (begin_addr & align_mask) == (align_offset & align_mask)
70709 */
70710 static inline unsigned long
70711-vm_unmapped_area(struct vm_unmapped_area_info *info)
70712+vm_unmapped_area(const struct vm_unmapped_area_info *info)
70713 {
70714 if (!(info->flags & VM_UNMAPPED_AREA_TOPDOWN))
70715 return unmapped_area(info);
70716@@ -1623,6 +1638,10 @@ extern struct vm_area_struct * find_vma(struct mm_struct * mm, unsigned long add
70717 extern struct vm_area_struct * find_vma_prev(struct mm_struct * mm, unsigned long addr,
70718 struct vm_area_struct **pprev);
70719
70720+extern struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma);
70721+extern __must_check long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma);
70722+extern void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl);
70723+
70724 /* Look up the first VMA which intersects the interval start_addr..end_addr-1,
70725 NULL if none. Assume start_addr < end_addr. */
70726 static inline struct vm_area_struct * find_vma_intersection(struct mm_struct * mm, unsigned long start_addr, unsigned long end_addr)
70727@@ -1651,15 +1670,6 @@ static inline struct vm_area_struct *find_exact_vma(struct mm_struct *mm,
70728 return vma;
70729 }
70730
70731-#ifdef CONFIG_MMU
70732-pgprot_t vm_get_page_prot(unsigned long vm_flags);
70733-#else
70734-static inline pgprot_t vm_get_page_prot(unsigned long vm_flags)
70735-{
70736- return __pgprot(0);
70737-}
70738-#endif
70739-
70740 #ifdef CONFIG_ARCH_USES_NUMA_PROT_NONE
70741 unsigned long change_prot_numa(struct vm_area_struct *vma,
70742 unsigned long start, unsigned long end);
70743@@ -1711,6 +1721,11 @@ void vm_stat_account(struct mm_struct *, unsigned long, struct file *, long);
70744 static inline void vm_stat_account(struct mm_struct *mm,
70745 unsigned long flags, struct file *file, long pages)
70746 {
70747+
70748+#ifdef CONFIG_PAX_RANDMMAP
70749+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
70750+#endif
70751+
70752 mm->total_vm += pages;
70753 }
70754 #endif /* CONFIG_PROC_FS */
70755@@ -1791,7 +1806,7 @@ extern int unpoison_memory(unsigned long pfn);
70756 extern int sysctl_memory_failure_early_kill;
70757 extern int sysctl_memory_failure_recovery;
70758 extern void shake_page(struct page *p, int access);
70759-extern atomic_long_t num_poisoned_pages;
70760+extern atomic_long_unchecked_t num_poisoned_pages;
70761 extern int soft_offline_page(struct page *page, int flags);
70762
70763 extern void dump_page(struct page *page);
70764@@ -1828,5 +1843,11 @@ void __init setup_nr_node_ids(void);
70765 static inline void setup_nr_node_ids(void) {}
70766 #endif
70767
70768+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
70769+extern void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot);
70770+#else
70771+static inline void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot) {}
70772+#endif
70773+
70774 #endif /* __KERNEL__ */
70775 #endif /* _LINUX_MM_H */
70776diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
70777index ace9a5f..81bdb59 100644
70778--- a/include/linux/mm_types.h
70779+++ b/include/linux/mm_types.h
70780@@ -289,6 +289,8 @@ struct vm_area_struct {
70781 #ifdef CONFIG_NUMA
70782 struct mempolicy *vm_policy; /* NUMA policy for the VMA */
70783 #endif
70784+
70785+ struct vm_area_struct *vm_mirror;/* PaX: mirror vma or NULL */
70786 };
70787
70788 struct core_thread {
70789@@ -437,6 +439,24 @@ struct mm_struct {
70790 int first_nid;
70791 #endif
70792 struct uprobes_state uprobes_state;
70793+
70794+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
70795+ unsigned long pax_flags;
70796+#endif
70797+
70798+#ifdef CONFIG_PAX_DLRESOLVE
70799+ unsigned long call_dl_resolve;
70800+#endif
70801+
70802+#if defined(CONFIG_PPC32) && defined(CONFIG_PAX_EMUSIGRT)
70803+ unsigned long call_syscall;
70804+#endif
70805+
70806+#ifdef CONFIG_PAX_ASLR
70807+ unsigned long delta_mmap; /* randomized offset */
70808+ unsigned long delta_stack; /* randomized offset */
70809+#endif
70810+
70811 };
70812
70813 /* first nid will either be a valid NID or one of these values */
70814diff --git a/include/linux/mmiotrace.h b/include/linux/mmiotrace.h
70815index c5d5278..f0b68c8 100644
70816--- a/include/linux/mmiotrace.h
70817+++ b/include/linux/mmiotrace.h
70818@@ -46,7 +46,7 @@ extern int kmmio_handler(struct pt_regs *regs, unsigned long addr);
70819 /* Called from ioremap.c */
70820 extern void mmiotrace_ioremap(resource_size_t offset, unsigned long size,
70821 void __iomem *addr);
70822-extern void mmiotrace_iounmap(volatile void __iomem *addr);
70823+extern void mmiotrace_iounmap(const volatile void __iomem *addr);
70824
70825 /* For anyone to insert markers. Remember trailing newline. */
70826 extern __printf(1, 2) int mmiotrace_printk(const char *fmt, ...);
70827@@ -66,7 +66,7 @@ static inline void mmiotrace_ioremap(resource_size_t offset,
70828 {
70829 }
70830
70831-static inline void mmiotrace_iounmap(volatile void __iomem *addr)
70832+static inline void mmiotrace_iounmap(const volatile void __iomem *addr)
70833 {
70834 }
70835
70836diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
70837index 5c76737..61f518e 100644
70838--- a/include/linux/mmzone.h
70839+++ b/include/linux/mmzone.h
70840@@ -396,7 +396,7 @@ struct zone {
70841 unsigned long flags; /* zone flags, see below */
70842
70843 /* Zone statistics */
70844- atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70845+ atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
70846
70847 /*
70848 * The target ratio of ACTIVE_ANON to INACTIVE_ANON pages on
70849diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
70850index b508016..237cfe5 100644
70851--- a/include/linux/mod_devicetable.h
70852+++ b/include/linux/mod_devicetable.h
70853@@ -13,7 +13,7 @@
70854 typedef unsigned long kernel_ulong_t;
70855 #endif
70856
70857-#define PCI_ANY_ID (~0)
70858+#define PCI_ANY_ID ((__u16)~0)
70859
70860 struct pci_device_id {
70861 __u32 vendor, device; /* Vendor and device ID or PCI_ANY_ID*/
70862@@ -139,7 +139,7 @@ struct usb_device_id {
70863 #define USB_DEVICE_ID_MATCH_INT_PROTOCOL 0x0200
70864 #define USB_DEVICE_ID_MATCH_INT_NUMBER 0x0400
70865
70866-#define HID_ANY_ID (~0)
70867+#define HID_ANY_ID (~0U)
70868 #define HID_BUS_ANY 0xffff
70869 #define HID_GROUP_ANY 0x0000
70870
70871@@ -465,7 +465,7 @@ struct dmi_system_id {
70872 const char *ident;
70873 struct dmi_strmatch matches[4];
70874 void *driver_data;
70875-};
70876+} __do_const;
70877 /*
70878 * struct dmi_device_id appears during expansion of
70879 * "MODULE_DEVICE_TABLE(dmi, x)". Compiler doesn't look inside it
70880diff --git a/include/linux/module.h b/include/linux/module.h
70881index 46f1ea0..a34ca37 100644
70882--- a/include/linux/module.h
70883+++ b/include/linux/module.h
70884@@ -17,9 +17,11 @@
70885 #include <linux/moduleparam.h>
70886 #include <linux/tracepoint.h>
70887 #include <linux/export.h>
70888+#include <linux/fs.h>
70889
70890 #include <linux/percpu.h>
70891 #include <asm/module.h>
70892+#include <asm/pgtable.h>
70893
70894 /* In stripped ARM and x86-64 modules, ~ is surprisingly rare. */
70895 #define MODULE_SIG_STRING "~Module signature appended~\n"
70896@@ -54,12 +56,13 @@ struct module_attribute {
70897 int (*test)(struct module *);
70898 void (*free)(struct module *);
70899 };
70900+typedef struct module_attribute __no_const module_attribute_no_const;
70901
70902 struct module_version_attribute {
70903 struct module_attribute mattr;
70904 const char *module_name;
70905 const char *version;
70906-} __attribute__ ((__aligned__(sizeof(void *))));
70907+} __do_const __attribute__ ((__aligned__(sizeof(void *))));
70908
70909 extern ssize_t __modver_version_show(struct module_attribute *,
70910 struct module_kobject *, char *);
70911@@ -232,7 +235,7 @@ struct module
70912
70913 /* Sysfs stuff. */
70914 struct module_kobject mkobj;
70915- struct module_attribute *modinfo_attrs;
70916+ module_attribute_no_const *modinfo_attrs;
70917 const char *version;
70918 const char *srcversion;
70919 struct kobject *holders_dir;
70920@@ -281,19 +284,16 @@ struct module
70921 int (*init)(void);
70922
70923 /* If this is non-NULL, vfree after init() returns */
70924- void *module_init;
70925+ void *module_init_rx, *module_init_rw;
70926
70927 /* Here is the actual code + data, vfree'd on unload. */
70928- void *module_core;
70929+ void *module_core_rx, *module_core_rw;
70930
70931 /* Here are the sizes of the init and core sections */
70932- unsigned int init_size, core_size;
70933+ unsigned int init_size_rw, core_size_rw;
70934
70935 /* The size of the executable code in each section. */
70936- unsigned int init_text_size, core_text_size;
70937-
70938- /* Size of RO sections of the module (text+rodata) */
70939- unsigned int init_ro_size, core_ro_size;
70940+ unsigned int init_size_rx, core_size_rx;
70941
70942 /* Arch-specific module values */
70943 struct mod_arch_specific arch;
70944@@ -349,6 +349,10 @@ struct module
70945 #ifdef CONFIG_EVENT_TRACING
70946 struct ftrace_event_call **trace_events;
70947 unsigned int num_trace_events;
70948+ struct file_operations trace_id;
70949+ struct file_operations trace_enable;
70950+ struct file_operations trace_format;
70951+ struct file_operations trace_filter;
70952 #endif
70953 #ifdef CONFIG_FTRACE_MCOUNT_RECORD
70954 unsigned int num_ftrace_callsites;
70955@@ -396,16 +400,46 @@ bool is_module_address(unsigned long addr);
70956 bool is_module_percpu_address(unsigned long addr);
70957 bool is_module_text_address(unsigned long addr);
70958
70959+static inline int within_module_range(unsigned long addr, void *start, unsigned long size)
70960+{
70961+
70962+#ifdef CONFIG_PAX_KERNEXEC
70963+ if (ktla_ktva(addr) >= (unsigned long)start &&
70964+ ktla_ktva(addr) < (unsigned long)start + size)
70965+ return 1;
70966+#endif
70967+
70968+ return ((void *)addr >= start && (void *)addr < start + size);
70969+}
70970+
70971+static inline int within_module_core_rx(unsigned long addr, const struct module *mod)
70972+{
70973+ return within_module_range(addr, mod->module_core_rx, mod->core_size_rx);
70974+}
70975+
70976+static inline int within_module_core_rw(unsigned long addr, const struct module *mod)
70977+{
70978+ return within_module_range(addr, mod->module_core_rw, mod->core_size_rw);
70979+}
70980+
70981+static inline int within_module_init_rx(unsigned long addr, const struct module *mod)
70982+{
70983+ return within_module_range(addr, mod->module_init_rx, mod->init_size_rx);
70984+}
70985+
70986+static inline int within_module_init_rw(unsigned long addr, const struct module *mod)
70987+{
70988+ return within_module_range(addr, mod->module_init_rw, mod->init_size_rw);
70989+}
70990+
70991 static inline int within_module_core(unsigned long addr, const struct module *mod)
70992 {
70993- return (unsigned long)mod->module_core <= addr &&
70994- addr < (unsigned long)mod->module_core + mod->core_size;
70995+ return within_module_core_rx(addr, mod) || within_module_core_rw(addr, mod);
70996 }
70997
70998 static inline int within_module_init(unsigned long addr, const struct module *mod)
70999 {
71000- return (unsigned long)mod->module_init <= addr &&
71001- addr < (unsigned long)mod->module_init + mod->init_size;
71002+ return within_module_init_rx(addr, mod) || within_module_init_rw(addr, mod);
71003 }
71004
71005 /* Search for module by name: must hold module_mutex. */
71006diff --git a/include/linux/moduleloader.h b/include/linux/moduleloader.h
71007index 560ca53..ef621ef 100644
71008--- a/include/linux/moduleloader.h
71009+++ b/include/linux/moduleloader.h
71010@@ -25,9 +25,21 @@ unsigned int arch_mod_section_prepend(struct module *mod, unsigned int section);
71011 sections. Returns NULL on failure. */
71012 void *module_alloc(unsigned long size);
71013
71014+#ifdef CONFIG_PAX_KERNEXEC
71015+void *module_alloc_exec(unsigned long size);
71016+#else
71017+#define module_alloc_exec(x) module_alloc(x)
71018+#endif
71019+
71020 /* Free memory returned from module_alloc. */
71021 void module_free(struct module *mod, void *module_region);
71022
71023+#ifdef CONFIG_PAX_KERNEXEC
71024+void module_free_exec(struct module *mod, void *module_region);
71025+#else
71026+#define module_free_exec(x, y) module_free((x), (y))
71027+#endif
71028+
71029 /*
71030 * Apply the given relocation to the (simplified) ELF. Return -error
71031 * or 0.
71032@@ -45,7 +57,9 @@ static inline int apply_relocate(Elf_Shdr *sechdrs,
71033 unsigned int relsec,
71034 struct module *me)
71035 {
71036+#ifdef CONFIG_MODULES
71037 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
71038+#endif
71039 return -ENOEXEC;
71040 }
71041 #endif
71042@@ -67,7 +81,9 @@ static inline int apply_relocate_add(Elf_Shdr *sechdrs,
71043 unsigned int relsec,
71044 struct module *me)
71045 {
71046+#ifdef CONFIG_MODULES
71047 printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
71048+#endif
71049 return -ENOEXEC;
71050 }
71051 #endif
71052diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
71053index 137b419..fe663ec 100644
71054--- a/include/linux/moduleparam.h
71055+++ b/include/linux/moduleparam.h
71056@@ -284,7 +284,7 @@ static inline void __kernel_param_unlock(void)
71057 * @len is usually just sizeof(string).
71058 */
71059 #define module_param_string(name, string, len, perm) \
71060- static const struct kparam_string __param_string_##name \
71061+ static const struct kparam_string __param_string_##name __used \
71062 = { len, string }; \
71063 __module_param_call(MODULE_PARAM_PREFIX, name, \
71064 &param_ops_string, \
71065@@ -423,7 +423,7 @@ extern int param_set_bint(const char *val, const struct kernel_param *kp);
71066 */
71067 #define module_param_array_named(name, array, type, nump, perm) \
71068 param_check_##type(name, &(array)[0]); \
71069- static const struct kparam_array __param_arr_##name \
71070+ static const struct kparam_array __param_arr_##name __used \
71071 = { .max = ARRAY_SIZE(array), .num = nump, \
71072 .ops = &param_ops_##type, \
71073 .elemsize = sizeof(array[0]), .elem = array }; \
71074diff --git a/include/linux/namei.h b/include/linux/namei.h
71075index 5a5ff57..5ae5070 100644
71076--- a/include/linux/namei.h
71077+++ b/include/linux/namei.h
71078@@ -19,7 +19,7 @@ struct nameidata {
71079 unsigned seq;
71080 int last_type;
71081 unsigned depth;
71082- char *saved_names[MAX_NESTED_LINKS + 1];
71083+ const char *saved_names[MAX_NESTED_LINKS + 1];
71084 };
71085
71086 /*
71087@@ -84,12 +84,12 @@ extern void unlock_rename(struct dentry *, struct dentry *);
71088
71089 extern void nd_jump_link(struct nameidata *nd, struct path *path);
71090
71091-static inline void nd_set_link(struct nameidata *nd, char *path)
71092+static inline void nd_set_link(struct nameidata *nd, const char *path)
71093 {
71094 nd->saved_names[nd->depth] = path;
71095 }
71096
71097-static inline char *nd_get_link(struct nameidata *nd)
71098+static inline const char *nd_get_link(const struct nameidata *nd)
71099 {
71100 return nd->saved_names[nd->depth];
71101 }
71102diff --git a/include/linux/net.h b/include/linux/net.h
71103index 99c9f0c..e1cf296 100644
71104--- a/include/linux/net.h
71105+++ b/include/linux/net.h
71106@@ -183,7 +183,7 @@ struct net_proto_family {
71107 int (*create)(struct net *net, struct socket *sock,
71108 int protocol, int kern);
71109 struct module *owner;
71110-};
71111+} __do_const;
71112
71113 struct iovec;
71114 struct kvec;
71115diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
71116index 96e4c21..9cc8278 100644
71117--- a/include/linux/netdevice.h
71118+++ b/include/linux/netdevice.h
71119@@ -1026,6 +1026,7 @@ struct net_device_ops {
71120 int (*ndo_change_carrier)(struct net_device *dev,
71121 bool new_carrier);
71122 };
71123+typedef struct net_device_ops __no_const net_device_ops_no_const;
71124
71125 /*
71126 * The DEVICE structure.
71127@@ -1094,7 +1095,7 @@ struct net_device {
71128 int iflink;
71129
71130 struct net_device_stats stats;
71131- atomic_long_t rx_dropped; /* dropped packets by core network
71132+ atomic_long_unchecked_t rx_dropped; /* dropped packets by core network
71133 * Do not use this in drivers.
71134 */
71135
71136diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
71137index 0060fde..481c6ae 100644
71138--- a/include/linux/netfilter.h
71139+++ b/include/linux/netfilter.h
71140@@ -82,7 +82,7 @@ struct nf_sockopt_ops {
71141 #endif
71142 /* Use the module struct to lock set/get code in place */
71143 struct module *owner;
71144-};
71145+} __do_const;
71146
71147 /* Function to register/unregister hook points. */
71148 int nf_register_hook(struct nf_hook_ops *reg);
71149diff --git a/include/linux/netfilter/ipset/ip_set.h b/include/linux/netfilter/ipset/ip_set.h
71150index d80e275..c3510b8 100644
71151--- a/include/linux/netfilter/ipset/ip_set.h
71152+++ b/include/linux/netfilter/ipset/ip_set.h
71153@@ -124,7 +124,7 @@ struct ip_set_type_variant {
71154 /* Return true if "b" set is the same as "a"
71155 * according to the create set parameters */
71156 bool (*same_set)(const struct ip_set *a, const struct ip_set *b);
71157-};
71158+} __do_const;
71159
71160 /* The core set type structure */
71161 struct ip_set_type {
71162diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
71163index cadb740..d7c37c0 100644
71164--- a/include/linux/netfilter/nfnetlink.h
71165+++ b/include/linux/netfilter/nfnetlink.h
71166@@ -16,7 +16,7 @@ struct nfnl_callback {
71167 const struct nlattr * const cda[]);
71168 const struct nla_policy *policy; /* netlink attribute policy */
71169 const u_int16_t attr_count; /* number of nlattr's */
71170-};
71171+} __do_const;
71172
71173 struct nfnetlink_subsystem {
71174 const char *name;
71175diff --git a/include/linux/netfilter/xt_gradm.h b/include/linux/netfilter/xt_gradm.h
71176new file mode 100644
71177index 0000000..33f4af8
71178--- /dev/null
71179+++ b/include/linux/netfilter/xt_gradm.h
71180@@ -0,0 +1,9 @@
71181+#ifndef _LINUX_NETFILTER_XT_GRADM_H
71182+#define _LINUX_NETFILTER_XT_GRADM_H 1
71183+
71184+struct xt_gradm_mtinfo {
71185+ __u16 flags;
71186+ __u16 invflags;
71187+};
71188+
71189+#endif
71190diff --git a/include/linux/nls.h b/include/linux/nls.h
71191index 5dc635f..35f5e11 100644
71192--- a/include/linux/nls.h
71193+++ b/include/linux/nls.h
71194@@ -31,7 +31,7 @@ struct nls_table {
71195 const unsigned char *charset2upper;
71196 struct module *owner;
71197 struct nls_table *next;
71198-};
71199+} __do_const;
71200
71201 /* this value hold the maximum octet of charset */
71202 #define NLS_MAX_CHARSET_SIZE 6 /* for UTF-8 */
71203diff --git a/include/linux/notifier.h b/include/linux/notifier.h
71204index d14a4c3..a078786 100644
71205--- a/include/linux/notifier.h
71206+++ b/include/linux/notifier.h
71207@@ -54,7 +54,8 @@ struct notifier_block {
71208 notifier_fn_t notifier_call;
71209 struct notifier_block __rcu *next;
71210 int priority;
71211-};
71212+} __do_const;
71213+typedef struct notifier_block __no_const notifier_block_no_const;
71214
71215 struct atomic_notifier_head {
71216 spinlock_t lock;
71217diff --git a/include/linux/oprofile.h b/include/linux/oprofile.h
71218index a4c5624..79d6d88 100644
71219--- a/include/linux/oprofile.h
71220+++ b/include/linux/oprofile.h
71221@@ -139,9 +139,9 @@ int oprofilefs_create_ulong(struct super_block * sb, struct dentry * root,
71222 int oprofilefs_create_ro_ulong(struct super_block * sb, struct dentry * root,
71223 char const * name, ulong * val);
71224
71225-/** Create a file for read-only access to an atomic_t. */
71226+/** Create a file for read-only access to an atomic_unchecked_t. */
71227 int oprofilefs_create_ro_atomic(struct super_block * sb, struct dentry * root,
71228- char const * name, atomic_t * val);
71229+ char const * name, atomic_unchecked_t * val);
71230
71231 /** create a directory */
71232 struct dentry * oprofilefs_mkdir(struct super_block * sb, struct dentry * root,
71233diff --git a/include/linux/pci_hotplug.h b/include/linux/pci_hotplug.h
71234index 8db71dc..a76bf2c 100644
71235--- a/include/linux/pci_hotplug.h
71236+++ b/include/linux/pci_hotplug.h
71237@@ -80,7 +80,8 @@ struct hotplug_slot_ops {
71238 int (*get_attention_status) (struct hotplug_slot *slot, u8 *value);
71239 int (*get_latch_status) (struct hotplug_slot *slot, u8 *value);
71240 int (*get_adapter_status) (struct hotplug_slot *slot, u8 *value);
71241-};
71242+} __do_const;
71243+typedef struct hotplug_slot_ops __no_const hotplug_slot_ops_no_const;
71244
71245 /**
71246 * struct hotplug_slot_info - used to notify the hotplug pci core of the state of the slot
71247diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
71248index c5b6dbf..b124155 100644
71249--- a/include/linux/perf_event.h
71250+++ b/include/linux/perf_event.h
71251@@ -318,8 +318,8 @@ struct perf_event {
71252
71253 enum perf_event_active_state state;
71254 unsigned int attach_state;
71255- local64_t count;
71256- atomic64_t child_count;
71257+ local64_t count; /* PaX: fix it one day */
71258+ atomic64_unchecked_t child_count;
71259
71260 /*
71261 * These are the total time in nanoseconds that the event
71262@@ -370,8 +370,8 @@ struct perf_event {
71263 * These accumulate total time (in nanoseconds) that children
71264 * events have been enabled and running, respectively.
71265 */
71266- atomic64_t child_total_time_enabled;
71267- atomic64_t child_total_time_running;
71268+ atomic64_unchecked_t child_total_time_enabled;
71269+ atomic64_unchecked_t child_total_time_running;
71270
71271 /*
71272 * Protect attach/detach and child_list:
71273@@ -692,7 +692,7 @@ static inline void perf_callchain_store(struct perf_callchain_entry *entry, u64
71274 entry->ip[entry->nr++] = ip;
71275 }
71276
71277-extern int sysctl_perf_event_paranoid;
71278+extern int sysctl_perf_event_legitimately_concerned;
71279 extern int sysctl_perf_event_mlock;
71280 extern int sysctl_perf_event_sample_rate;
71281
71282@@ -700,19 +700,24 @@ extern int perf_proc_update_handler(struct ctl_table *table, int write,
71283 void __user *buffer, size_t *lenp,
71284 loff_t *ppos);
71285
71286+static inline bool perf_paranoid_any(void)
71287+{
71288+ return sysctl_perf_event_legitimately_concerned > 2;
71289+}
71290+
71291 static inline bool perf_paranoid_tracepoint_raw(void)
71292 {
71293- return sysctl_perf_event_paranoid > -1;
71294+ return sysctl_perf_event_legitimately_concerned > -1;
71295 }
71296
71297 static inline bool perf_paranoid_cpu(void)
71298 {
71299- return sysctl_perf_event_paranoid > 0;
71300+ return sysctl_perf_event_legitimately_concerned > 0;
71301 }
71302
71303 static inline bool perf_paranoid_kernel(void)
71304 {
71305- return sysctl_perf_event_paranoid > 1;
71306+ return sysctl_perf_event_legitimately_concerned > 1;
71307 }
71308
71309 extern void perf_event_init(void);
71310@@ -806,7 +811,7 @@ static inline void perf_restore_debug_store(void) { }
71311 */
71312 #define perf_cpu_notifier(fn) \
71313 do { \
71314- static struct notifier_block fn##_nb __cpuinitdata = \
71315+ static struct notifier_block fn##_nb = \
71316 { .notifier_call = fn, .priority = CPU_PRI_PERF }; \
71317 unsigned long cpu = smp_processor_id(); \
71318 unsigned long flags; \
71319@@ -826,7 +831,7 @@ struct perf_pmu_events_attr {
71320 struct device_attribute attr;
71321 u64 id;
71322 const char *event_str;
71323-};
71324+} __do_const;
71325
71326 #define PMU_EVENT_ATTR(_name, _var, _id, _show) \
71327 static struct perf_pmu_events_attr _var = { \
71328diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
71329index b8809fe..ae4ccd0 100644
71330--- a/include/linux/pipe_fs_i.h
71331+++ b/include/linux/pipe_fs_i.h
71332@@ -47,10 +47,10 @@ struct pipe_inode_info {
71333 struct mutex mutex;
71334 wait_queue_head_t wait;
71335 unsigned int nrbufs, curbuf, buffers;
71336- unsigned int readers;
71337- unsigned int writers;
71338- unsigned int files;
71339- unsigned int waiting_writers;
71340+ atomic_t readers;
71341+ atomic_t writers;
71342+ atomic_t files;
71343+ atomic_t waiting_writers;
71344 unsigned int r_counter;
71345 unsigned int w_counter;
71346 struct page *tmp_page;
71347diff --git a/include/linux/platform_data/usb-ehci-s5p.h b/include/linux/platform_data/usb-ehci-s5p.h
71348index 5f28cae..3d23723 100644
71349--- a/include/linux/platform_data/usb-ehci-s5p.h
71350+++ b/include/linux/platform_data/usb-ehci-s5p.h
71351@@ -14,7 +14,7 @@
71352 struct s5p_ehci_platdata {
71353 int (*phy_init)(struct platform_device *pdev, int type);
71354 int (*phy_exit)(struct platform_device *pdev, int type);
71355-};
71356+} __no_const;
71357
71358 extern void s5p_ehci_set_platdata(struct s5p_ehci_platdata *pd);
71359
71360diff --git a/include/linux/platform_data/usb-ohci-exynos.h b/include/linux/platform_data/usb-ohci-exynos.h
71361index c256c59..8ea94c7 100644
71362--- a/include/linux/platform_data/usb-ohci-exynos.h
71363+++ b/include/linux/platform_data/usb-ohci-exynos.h
71364@@ -14,7 +14,7 @@
71365 struct exynos4_ohci_platdata {
71366 int (*phy_init)(struct platform_device *pdev, int type);
71367 int (*phy_exit)(struct platform_device *pdev, int type);
71368-};
71369+} __no_const;
71370
71371 extern void exynos4_ohci_set_platdata(struct exynos4_ohci_platdata *pd);
71372
71373diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
71374index 7c1d252..c5c773e 100644
71375--- a/include/linux/pm_domain.h
71376+++ b/include/linux/pm_domain.h
71377@@ -48,7 +48,7 @@ struct gpd_dev_ops {
71378
71379 struct gpd_cpu_data {
71380 unsigned int saved_exit_latency;
71381- struct cpuidle_state *idle_state;
71382+ cpuidle_state_no_const *idle_state;
71383 };
71384
71385 struct generic_pm_domain {
71386diff --git a/include/linux/pm_runtime.h b/include/linux/pm_runtime.h
71387index 7d7e09e..8671ef8 100644
71388--- a/include/linux/pm_runtime.h
71389+++ b/include/linux/pm_runtime.h
71390@@ -104,7 +104,7 @@ static inline bool pm_runtime_callbacks_present(struct device *dev)
71391
71392 static inline void pm_runtime_mark_last_busy(struct device *dev)
71393 {
71394- ACCESS_ONCE(dev->power.last_busy) = jiffies;
71395+ ACCESS_ONCE_RW(dev->power.last_busy) = jiffies;
71396 }
71397
71398 #else /* !CONFIG_PM_RUNTIME */
71399diff --git a/include/linux/pnp.h b/include/linux/pnp.h
71400index 195aafc..49a7bc2 100644
71401--- a/include/linux/pnp.h
71402+++ b/include/linux/pnp.h
71403@@ -297,7 +297,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
71404 struct pnp_fixup {
71405 char id[7];
71406 void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
71407-};
71408+} __do_const;
71409
71410 /* config parameters */
71411 #define PNP_CONFIG_NORMAL 0x0001
71412diff --git a/include/linux/poison.h b/include/linux/poison.h
71413index 2110a81..13a11bb 100644
71414--- a/include/linux/poison.h
71415+++ b/include/linux/poison.h
71416@@ -19,8 +19,8 @@
71417 * under normal circumstances, used to verify that nobody uses
71418 * non-initialized list entries.
71419 */
71420-#define LIST_POISON1 ((void *) 0x00100100 + POISON_POINTER_DELTA)
71421-#define LIST_POISON2 ((void *) 0x00200200 + POISON_POINTER_DELTA)
71422+#define LIST_POISON1 ((void *) (long)0xFFFFFF01)
71423+#define LIST_POISON2 ((void *) (long)0xFFFFFF02)
71424
71425 /********** include/linux/timer.h **********/
71426 /*
71427diff --git a/include/linux/power/smartreflex.h b/include/linux/power/smartreflex.h
71428index c0f44c2..1572583 100644
71429--- a/include/linux/power/smartreflex.h
71430+++ b/include/linux/power/smartreflex.h
71431@@ -238,7 +238,7 @@ struct omap_sr_class_data {
71432 int (*notify)(struct omap_sr *sr, u32 status);
71433 u8 notify_flags;
71434 u8 class_type;
71435-};
71436+} __do_const;
71437
71438 /**
71439 * struct omap_sr_nvalue_table - Smartreflex n-target value info
71440diff --git a/include/linux/ppp-comp.h b/include/linux/ppp-comp.h
71441index 4ea1d37..80f4b33 100644
71442--- a/include/linux/ppp-comp.h
71443+++ b/include/linux/ppp-comp.h
71444@@ -84,7 +84,7 @@ struct compressor {
71445 struct module *owner;
71446 /* Extra skb space needed by the compressor algorithm */
71447 unsigned int comp_extra;
71448-};
71449+} __do_const;
71450
71451 /*
71452 * The return value from decompress routine is the length of the
71453diff --git a/include/linux/printk.h b/include/linux/printk.h
71454index 22c7052..ad3fa0a 100644
71455--- a/include/linux/printk.h
71456+++ b/include/linux/printk.h
71457@@ -106,6 +106,8 @@ static inline __printf(1, 2) __cold
71458 void early_printk(const char *s, ...) { }
71459 #endif
71460
71461+extern int kptr_restrict;
71462+
71463 #ifdef CONFIG_PRINTK
71464 asmlinkage __printf(5, 0)
71465 int vprintk_emit(int facility, int level,
71466@@ -140,7 +142,6 @@ extern bool printk_timed_ratelimit(unsigned long *caller_jiffies,
71467
71468 extern int printk_delay_msec;
71469 extern int dmesg_restrict;
71470-extern int kptr_restrict;
71471
71472 extern void wake_up_klogd(void);
71473
71474diff --git a/include/linux/proc_fs.h b/include/linux/proc_fs.h
71475index 608e60a..c26f864 100644
71476--- a/include/linux/proc_fs.h
71477+++ b/include/linux/proc_fs.h
71478@@ -34,6 +34,19 @@ static inline struct proc_dir_entry *proc_create(
71479 return proc_create_data(name, mode, parent, proc_fops, NULL);
71480 }
71481
71482+static inline struct proc_dir_entry *proc_create_grsec(const char *name, umode_t mode,
71483+ struct proc_dir_entry *parent, const struct file_operations *proc_fops)
71484+{
71485+#ifdef CONFIG_GRKERNSEC_PROC_USER
71486+ return proc_create_data(name, S_IRUSR, parent, proc_fops, NULL);
71487+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
71488+ return proc_create_data(name, S_IRUSR | S_IRGRP, parent, proc_fops, NULL);
71489+#else
71490+ return proc_create_data(name, mode, parent, proc_fops, NULL);
71491+#endif
71492+}
71493+
71494+
71495 extern void proc_set_size(struct proc_dir_entry *, loff_t);
71496 extern void proc_set_user(struct proc_dir_entry *, kuid_t, kgid_t);
71497 extern void *PDE_DATA(const struct inode *);
71498diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
71499index 34a1e10..03a6d03 100644
71500--- a/include/linux/proc_ns.h
71501+++ b/include/linux/proc_ns.h
71502@@ -14,7 +14,7 @@ struct proc_ns_operations {
71503 void (*put)(void *ns);
71504 int (*install)(struct nsproxy *nsproxy, void *ns);
71505 unsigned int (*inum)(void *ns);
71506-};
71507+} __do_const;
71508
71509 struct proc_ns {
71510 void *ns;
71511diff --git a/include/linux/random.h b/include/linux/random.h
71512index 3b9377d..61b506a 100644
71513--- a/include/linux/random.h
71514+++ b/include/linux/random.h
71515@@ -32,6 +32,11 @@ void prandom_seed(u32 seed);
71516 u32 prandom_u32_state(struct rnd_state *);
71517 void prandom_bytes_state(struct rnd_state *state, void *buf, int nbytes);
71518
71519+static inline unsigned long pax_get_random_long(void)
71520+{
71521+ return prandom_u32() + (sizeof(long) > 4 ? (unsigned long)prandom_u32() << 32 : 0);
71522+}
71523+
71524 /*
71525 * Handle minimum values for seeds
71526 */
71527diff --git a/include/linux/rculist.h b/include/linux/rculist.h
71528index f4b1001..8ddb2b6 100644
71529--- a/include/linux/rculist.h
71530+++ b/include/linux/rculist.h
71531@@ -44,6 +44,9 @@ extern void __list_add_rcu(struct list_head *new,
71532 struct list_head *prev, struct list_head *next);
71533 #endif
71534
71535+extern void __pax_list_add_rcu(struct list_head *new,
71536+ struct list_head *prev, struct list_head *next);
71537+
71538 /**
71539 * list_add_rcu - add a new entry to rcu-protected list
71540 * @new: new entry to be added
71541@@ -65,6 +68,11 @@ static inline void list_add_rcu(struct list_head *new, struct list_head *head)
71542 __list_add_rcu(new, head, head->next);
71543 }
71544
71545+static inline void pax_list_add_rcu(struct list_head *new, struct list_head *head)
71546+{
71547+ __pax_list_add_rcu(new, head, head->next);
71548+}
71549+
71550 /**
71551 * list_add_tail_rcu - add a new entry to rcu-protected list
71552 * @new: new entry to be added
71553@@ -87,6 +95,12 @@ static inline void list_add_tail_rcu(struct list_head *new,
71554 __list_add_rcu(new, head->prev, head);
71555 }
71556
71557+static inline void pax_list_add_tail_rcu(struct list_head *new,
71558+ struct list_head *head)
71559+{
71560+ __pax_list_add_rcu(new, head->prev, head);
71561+}
71562+
71563 /**
71564 * list_del_rcu - deletes entry from list without re-initialization
71565 * @entry: the element to delete from the list.
71566@@ -117,6 +131,8 @@ static inline void list_del_rcu(struct list_head *entry)
71567 entry->prev = LIST_POISON2;
71568 }
71569
71570+extern void pax_list_del_rcu(struct list_head *entry);
71571+
71572 /**
71573 * hlist_del_init_rcu - deletes entry from hash list with re-initialization
71574 * @n: the element to delete from the hash list.
71575diff --git a/include/linux/reboot.h b/include/linux/reboot.h
71576index 23b3630..e1bc12b 100644
71577--- a/include/linux/reboot.h
71578+++ b/include/linux/reboot.h
71579@@ -18,9 +18,9 @@ extern int unregister_reboot_notifier(struct notifier_block *);
71580 * Architecture-specific implementations of sys_reboot commands.
71581 */
71582
71583-extern void machine_restart(char *cmd);
71584-extern void machine_halt(void);
71585-extern void machine_power_off(void);
71586+extern void machine_restart(char *cmd) __noreturn;
71587+extern void machine_halt(void) __noreturn;
71588+extern void machine_power_off(void) __noreturn;
71589
71590 extern void machine_shutdown(void);
71591 struct pt_regs;
71592@@ -31,9 +31,9 @@ extern void machine_crash_shutdown(struct pt_regs *);
71593 */
71594
71595 extern void kernel_restart_prepare(char *cmd);
71596-extern void kernel_restart(char *cmd);
71597-extern void kernel_halt(void);
71598-extern void kernel_power_off(void);
71599+extern void kernel_restart(char *cmd) __noreturn;
71600+extern void kernel_halt(void) __noreturn;
71601+extern void kernel_power_off(void) __noreturn;
71602
71603 extern int C_A_D; /* for sysctl */
71604 void ctrl_alt_del(void);
71605@@ -47,7 +47,7 @@ extern int orderly_poweroff(bool force);
71606 * Emergency restart, callable from an interrupt handler.
71607 */
71608
71609-extern void emergency_restart(void);
71610+extern void emergency_restart(void) __noreturn;
71611 #include <asm/emergency-restart.h>
71612
71613 #endif /* _LINUX_REBOOT_H */
71614diff --git a/include/linux/regset.h b/include/linux/regset.h
71615index 8e0c9fe..ac4d221 100644
71616--- a/include/linux/regset.h
71617+++ b/include/linux/regset.h
71618@@ -161,7 +161,8 @@ struct user_regset {
71619 unsigned int align;
71620 unsigned int bias;
71621 unsigned int core_note_type;
71622-};
71623+} __do_const;
71624+typedef struct user_regset __no_const user_regset_no_const;
71625
71626 /**
71627 * struct user_regset_view - available regsets
71628diff --git a/include/linux/relay.h b/include/linux/relay.h
71629index d7c8359..818daf5 100644
71630--- a/include/linux/relay.h
71631+++ b/include/linux/relay.h
71632@@ -157,7 +157,7 @@ struct rchan_callbacks
71633 * The callback should return 0 if successful, negative if not.
71634 */
71635 int (*remove_buf_file)(struct dentry *dentry);
71636-};
71637+} __no_const;
71638
71639 /*
71640 * CONFIG_RELAY kernel API, kernel/relay.c
71641diff --git a/include/linux/rio.h b/include/linux/rio.h
71642index 18e0993..8ab5b21 100644
71643--- a/include/linux/rio.h
71644+++ b/include/linux/rio.h
71645@@ -345,7 +345,7 @@ struct rio_ops {
71646 int (*map_inb)(struct rio_mport *mport, dma_addr_t lstart,
71647 u64 rstart, u32 size, u32 flags);
71648 void (*unmap_inb)(struct rio_mport *mport, dma_addr_t lstart);
71649-};
71650+} __no_const;
71651
71652 #define RIO_RESOURCE_MEM 0x00000100
71653 #define RIO_RESOURCE_DOORBELL 0x00000200
71654diff --git a/include/linux/rmap.h b/include/linux/rmap.h
71655index 6dacb93..6174423 100644
71656--- a/include/linux/rmap.h
71657+++ b/include/linux/rmap.h
71658@@ -145,8 +145,8 @@ static inline void anon_vma_unlock_read(struct anon_vma *anon_vma)
71659 void anon_vma_init(void); /* create anon_vma_cachep */
71660 int anon_vma_prepare(struct vm_area_struct *);
71661 void unlink_anon_vmas(struct vm_area_struct *);
71662-int anon_vma_clone(struct vm_area_struct *, struct vm_area_struct *);
71663-int anon_vma_fork(struct vm_area_struct *, struct vm_area_struct *);
71664+int anon_vma_clone(struct vm_area_struct *, const struct vm_area_struct *);
71665+int anon_vma_fork(struct vm_area_struct *, const struct vm_area_struct *);
71666
71667 static inline void anon_vma_merge(struct vm_area_struct *vma,
71668 struct vm_area_struct *next)
71669diff --git a/include/linux/sched.h b/include/linux/sched.h
71670index 178a8d9..52e71a3 100644
71671--- a/include/linux/sched.h
71672+++ b/include/linux/sched.h
71673@@ -62,6 +62,7 @@ struct bio_list;
71674 struct fs_struct;
71675 struct perf_event_context;
71676 struct blk_plug;
71677+struct linux_binprm;
71678
71679 /*
71680 * List of flags we want to share for kernel threads,
71681@@ -303,7 +304,7 @@ extern char __sched_text_start[], __sched_text_end[];
71682 extern int in_sched_functions(unsigned long addr);
71683
71684 #define MAX_SCHEDULE_TIMEOUT LONG_MAX
71685-extern signed long schedule_timeout(signed long timeout);
71686+extern signed long schedule_timeout(signed long timeout) __intentional_overflow(-1);
71687 extern signed long schedule_timeout_interruptible(signed long timeout);
71688 extern signed long schedule_timeout_killable(signed long timeout);
71689 extern signed long schedule_timeout_uninterruptible(signed long timeout);
71690@@ -314,6 +315,19 @@ struct nsproxy;
71691 struct user_namespace;
71692
71693 #ifdef CONFIG_MMU
71694+
71695+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
71696+extern unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags);
71697+#else
71698+static inline unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
71699+{
71700+ return 0;
71701+}
71702+#endif
71703+
71704+extern bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset);
71705+extern unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset);
71706+
71707 extern void arch_pick_mmap_layout(struct mm_struct *mm);
71708 extern unsigned long
71709 arch_get_unmapped_area(struct file *, unsigned long, unsigned long,
71710@@ -591,6 +605,17 @@ struct signal_struct {
71711 #ifdef CONFIG_TASKSTATS
71712 struct taskstats *stats;
71713 #endif
71714+
71715+#ifdef CONFIG_GRKERNSEC
71716+ u32 curr_ip;
71717+ u32 saved_ip;
71718+ u32 gr_saddr;
71719+ u32 gr_daddr;
71720+ u16 gr_sport;
71721+ u16 gr_dport;
71722+ u8 used_accept:1;
71723+#endif
71724+
71725 #ifdef CONFIG_AUDIT
71726 unsigned audit_tty;
71727 unsigned audit_tty_log_passwd;
71728@@ -671,6 +696,14 @@ struct user_struct {
71729 struct key *session_keyring; /* UID's default session keyring */
71730 #endif
71731
71732+#ifdef CONFIG_GRKERNSEC_KERN_LOCKOUT
71733+ unsigned char kernel_banned;
71734+#endif
71735+#ifdef CONFIG_GRKERNSEC_BRUTE
71736+ unsigned char suid_banned;
71737+ unsigned long suid_ban_expires;
71738+#endif
71739+
71740 /* Hash table maintenance information */
71741 struct hlist_node uidhash_node;
71742 kuid_t uid;
71743@@ -1158,8 +1191,8 @@ struct task_struct {
71744 struct list_head thread_group;
71745
71746 struct completion *vfork_done; /* for vfork() */
71747- int __user *set_child_tid; /* CLONE_CHILD_SETTID */
71748- int __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71749+ pid_t __user *set_child_tid; /* CLONE_CHILD_SETTID */
71750+ pid_t __user *clear_child_tid; /* CLONE_CHILD_CLEARTID */
71751
71752 cputime_t utime, stime, utimescaled, stimescaled;
71753 cputime_t gtime;
71754@@ -1184,11 +1217,6 @@ struct task_struct {
71755 struct task_cputime cputime_expires;
71756 struct list_head cpu_timers[3];
71757
71758-/* process credentials */
71759- const struct cred __rcu *real_cred; /* objective and real subjective task
71760- * credentials (COW) */
71761- const struct cred __rcu *cred; /* effective (overridable) subjective task
71762- * credentials (COW) */
71763 char comm[TASK_COMM_LEN]; /* executable name excluding path
71764 - access with [gs]et_task_comm (which lock
71765 it with task_lock())
71766@@ -1205,6 +1233,10 @@ struct task_struct {
71767 #endif
71768 /* CPU-specific state of this task */
71769 struct thread_struct thread;
71770+/* thread_info moved to task_struct */
71771+#ifdef CONFIG_X86
71772+ struct thread_info tinfo;
71773+#endif
71774 /* filesystem information */
71775 struct fs_struct *fs;
71776 /* open file information */
71777@@ -1278,6 +1310,10 @@ struct task_struct {
71778 gfp_t lockdep_reclaim_gfp;
71779 #endif
71780
71781+/* process credentials */
71782+ const struct cred __rcu *real_cred; /* objective and real subjective task
71783+ * credentials (COW) */
71784+
71785 /* journalling filesystem info */
71786 void *journal_info;
71787
71788@@ -1316,6 +1352,10 @@ struct task_struct {
71789 /* cg_list protected by css_set_lock and tsk->alloc_lock */
71790 struct list_head cg_list;
71791 #endif
71792+
71793+ const struct cred __rcu *cred; /* effective (overridable) subjective task
71794+ * credentials (COW) */
71795+
71796 #ifdef CONFIG_FUTEX
71797 struct robust_list_head __user *robust_list;
71798 #ifdef CONFIG_COMPAT
71799@@ -1416,8 +1456,74 @@ struct task_struct {
71800 unsigned int sequential_io;
71801 unsigned int sequential_io_avg;
71802 #endif
71803+
71804+#ifdef CONFIG_GRKERNSEC
71805+ /* grsecurity */
71806+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71807+ u64 exec_id;
71808+#endif
71809+#ifdef CONFIG_GRKERNSEC_SETXID
71810+ const struct cred *delayed_cred;
71811+#endif
71812+ struct dentry *gr_chroot_dentry;
71813+ struct acl_subject_label *acl;
71814+ struct acl_role_label *role;
71815+ struct file *exec_file;
71816+ unsigned long brute_expires;
71817+ u16 acl_role_id;
71818+ /* is this the task that authenticated to the special role */
71819+ u8 acl_sp_role;
71820+ u8 is_writable;
71821+ u8 brute;
71822+ u8 gr_is_chrooted;
71823+#endif
71824+
71825 };
71826
71827+#define MF_PAX_PAGEEXEC 0x01000000 /* Paging based non-executable pages */
71828+#define MF_PAX_EMUTRAMP 0x02000000 /* Emulate trampolines */
71829+#define MF_PAX_MPROTECT 0x04000000 /* Restrict mprotect() */
71830+#define MF_PAX_RANDMMAP 0x08000000 /* Randomize mmap() base */
71831+/*#define MF_PAX_RANDEXEC 0x10000000*/ /* Randomize ET_EXEC base */
71832+#define MF_PAX_SEGMEXEC 0x20000000 /* Segmentation based non-executable pages */
71833+
71834+#ifdef CONFIG_PAX_SOFTMODE
71835+extern int pax_softmode;
71836+#endif
71837+
71838+extern int pax_check_flags(unsigned long *);
71839+
71840+/* if tsk != current then task_lock must be held on it */
71841+#if defined(CONFIG_PAX_NOEXEC) || defined(CONFIG_PAX_ASLR)
71842+static inline unsigned long pax_get_flags(struct task_struct *tsk)
71843+{
71844+ if (likely(tsk->mm))
71845+ return tsk->mm->pax_flags;
71846+ else
71847+ return 0UL;
71848+}
71849+
71850+/* if tsk != current then task_lock must be held on it */
71851+static inline long pax_set_flags(struct task_struct *tsk, unsigned long flags)
71852+{
71853+ if (likely(tsk->mm)) {
71854+ tsk->mm->pax_flags = flags;
71855+ return 0;
71856+ }
71857+ return -EINVAL;
71858+}
71859+#endif
71860+
71861+#ifdef CONFIG_PAX_HAVE_ACL_FLAGS
71862+extern void pax_set_initial_flags(struct linux_binprm *bprm);
71863+#elif defined(CONFIG_PAX_HOOK_ACL_FLAGS)
71864+extern void (*pax_set_initial_flags_func)(struct linux_binprm *bprm);
71865+#endif
71866+
71867+extern void pax_report_fault(struct pt_regs *regs, void *pc, void *sp);
71868+extern void pax_report_insns(struct pt_regs *regs, void *pc, void *sp);
71869+extern void pax_report_refcount_overflow(struct pt_regs *regs);
71870+
71871 /* Future-safe accessor for struct task_struct's cpus_allowed. */
71872 #define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed)
71873
71874@@ -1476,7 +1582,7 @@ struct pid_namespace;
71875 pid_t __task_pid_nr_ns(struct task_struct *task, enum pid_type type,
71876 struct pid_namespace *ns);
71877
71878-static inline pid_t task_pid_nr(struct task_struct *tsk)
71879+static inline pid_t task_pid_nr(const struct task_struct *tsk)
71880 {
71881 return tsk->pid;
71882 }
71883@@ -1919,7 +2025,9 @@ void yield(void);
71884 extern struct exec_domain default_exec_domain;
71885
71886 union thread_union {
71887+#ifndef CONFIG_X86
71888 struct thread_info thread_info;
71889+#endif
71890 unsigned long stack[THREAD_SIZE/sizeof(long)];
71891 };
71892
71893@@ -1952,6 +2060,7 @@ extern struct pid_namespace init_pid_ns;
71894 */
71895
71896 extern struct task_struct *find_task_by_vpid(pid_t nr);
71897+extern struct task_struct *find_task_by_vpid_unrestricted(pid_t nr);
71898 extern struct task_struct *find_task_by_pid_ns(pid_t nr,
71899 struct pid_namespace *ns);
71900
71901@@ -2118,7 +2227,7 @@ extern void __cleanup_sighand(struct sighand_struct *);
71902 extern void exit_itimers(struct signal_struct *);
71903 extern void flush_itimer_signals(void);
71904
71905-extern void do_group_exit(int);
71906+extern __noreturn void do_group_exit(int);
71907
71908 extern int allow_signal(int);
71909 extern int disallow_signal(int);
71910@@ -2309,9 +2418,9 @@ static inline unsigned long *end_of_stack(struct task_struct *p)
71911
71912 #endif
71913
71914-static inline int object_is_on_stack(void *obj)
71915+static inline int object_starts_on_stack(void *obj)
71916 {
71917- void *stack = task_stack_page(current);
71918+ const void *stack = task_stack_page(current);
71919
71920 return (obj >= stack) && (obj < (stack + THREAD_SIZE));
71921 }
71922diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
71923index bf8086b..962b035 100644
71924--- a/include/linux/sched/sysctl.h
71925+++ b/include/linux/sched/sysctl.h
71926@@ -30,6 +30,7 @@ enum { sysctl_hung_task_timeout_secs = 0 };
71927 #define DEFAULT_MAX_MAP_COUNT (USHRT_MAX - MAPCOUNT_ELF_CORE_MARGIN)
71928
71929 extern int sysctl_max_map_count;
71930+extern unsigned long sysctl_heap_stack_gap;
71931
71932 extern unsigned int sysctl_sched_latency;
71933 extern unsigned int sysctl_sched_min_granularity;
71934diff --git a/include/linux/security.h b/include/linux/security.h
71935index 4686491..2bd210e 100644
71936--- a/include/linux/security.h
71937+++ b/include/linux/security.h
71938@@ -26,6 +26,7 @@
71939 #include <linux/capability.h>
71940 #include <linux/slab.h>
71941 #include <linux/err.h>
71942+#include <linux/grsecurity.h>
71943
71944 struct linux_binprm;
71945 struct cred;
71946diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
71947index 2da29ac..aac448ec 100644
71948--- a/include/linux/seq_file.h
71949+++ b/include/linux/seq_file.h
71950@@ -26,6 +26,9 @@ struct seq_file {
71951 struct mutex lock;
71952 const struct seq_operations *op;
71953 int poll_event;
71954+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
71955+ u64 exec_id;
71956+#endif
71957 #ifdef CONFIG_USER_NS
71958 struct user_namespace *user_ns;
71959 #endif
71960@@ -38,6 +41,7 @@ struct seq_operations {
71961 void * (*next) (struct seq_file *m, void *v, loff_t *pos);
71962 int (*show) (struct seq_file *m, void *v);
71963 };
71964+typedef struct seq_operations __no_const seq_operations_no_const;
71965
71966 #define SEQ_SKIP 1
71967
71968diff --git a/include/linux/shm.h b/include/linux/shm.h
71969index 429c199..4d42e38 100644
71970--- a/include/linux/shm.h
71971+++ b/include/linux/shm.h
71972@@ -21,6 +21,10 @@ struct shmid_kernel /* private to the kernel */
71973
71974 /* The task created the shm object. NULL if the task is dead. */
71975 struct task_struct *shm_creator;
71976+#ifdef CONFIG_GRKERNSEC
71977+ time_t shm_createtime;
71978+ pid_t shm_lapid;
71979+#endif
71980 };
71981
71982 /* shm_mode upper byte flags */
71983diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
71984index dec1748..112c1f9 100644
71985--- a/include/linux/skbuff.h
71986+++ b/include/linux/skbuff.h
71987@@ -640,7 +640,7 @@ extern bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from,
71988 extern struct sk_buff *__alloc_skb(unsigned int size,
71989 gfp_t priority, int flags, int node);
71990 extern struct sk_buff *build_skb(void *data, unsigned int frag_size);
71991-static inline struct sk_buff *alloc_skb(unsigned int size,
71992+static inline struct sk_buff * __intentional_overflow(0) alloc_skb(unsigned int size,
71993 gfp_t priority)
71994 {
71995 return __alloc_skb(size, priority, 0, NUMA_NO_NODE);
71996@@ -756,7 +756,7 @@ static inline struct skb_shared_hwtstamps *skb_hwtstamps(struct sk_buff *skb)
71997 */
71998 static inline int skb_queue_empty(const struct sk_buff_head *list)
71999 {
72000- return list->next == (struct sk_buff *)list;
72001+ return list->next == (const struct sk_buff *)list;
72002 }
72003
72004 /**
72005@@ -769,7 +769,7 @@ static inline int skb_queue_empty(const struct sk_buff_head *list)
72006 static inline bool skb_queue_is_last(const struct sk_buff_head *list,
72007 const struct sk_buff *skb)
72008 {
72009- return skb->next == (struct sk_buff *)list;
72010+ return skb->next == (const struct sk_buff *)list;
72011 }
72012
72013 /**
72014@@ -782,7 +782,7 @@ static inline bool skb_queue_is_last(const struct sk_buff_head *list,
72015 static inline bool skb_queue_is_first(const struct sk_buff_head *list,
72016 const struct sk_buff *skb)
72017 {
72018- return skb->prev == (struct sk_buff *)list;
72019+ return skb->prev == (const struct sk_buff *)list;
72020 }
72021
72022 /**
72023@@ -1848,7 +1848,7 @@ static inline int pskb_network_may_pull(struct sk_buff *skb, unsigned int len)
72024 * NET_IP_ALIGN(2) + ethernet_header(14) + IP_header(20/40) + ports(8)
72025 */
72026 #ifndef NET_SKB_PAD
72027-#define NET_SKB_PAD max(32, L1_CACHE_BYTES)
72028+#define NET_SKB_PAD max(_AC(32,UL), L1_CACHE_BYTES)
72029 #endif
72030
72031 extern int ___pskb_trim(struct sk_buff *skb, unsigned int len);
72032@@ -2443,7 +2443,7 @@ extern struct sk_buff *skb_recv_datagram(struct sock *sk, unsigned flags,
72033 int noblock, int *err);
72034 extern unsigned int datagram_poll(struct file *file, struct socket *sock,
72035 struct poll_table_struct *wait);
72036-extern int skb_copy_datagram_iovec(const struct sk_buff *from,
72037+extern int __intentional_overflow(0) skb_copy_datagram_iovec(const struct sk_buff *from,
72038 int offset, struct iovec *to,
72039 int size);
72040 extern int skb_copy_and_csum_datagram_iovec(struct sk_buff *skb,
72041@@ -2733,6 +2733,9 @@ static inline void nf_reset(struct sk_buff *skb)
72042 nf_bridge_put(skb->nf_bridge);
72043 skb->nf_bridge = NULL;
72044 #endif
72045+#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
72046+ skb->nf_trace = 0;
72047+#endif
72048 }
72049
72050 static inline void nf_reset_trace(struct sk_buff *skb)
72051diff --git a/include/linux/slab.h b/include/linux/slab.h
72052index 0c62175..f016ac1 100644
72053--- a/include/linux/slab.h
72054+++ b/include/linux/slab.h
72055@@ -12,15 +12,29 @@
72056 #include <linux/gfp.h>
72057 #include <linux/types.h>
72058 #include <linux/workqueue.h>
72059-
72060+#include <linux/err.h>
72061
72062 /*
72063 * Flags to pass to kmem_cache_create().
72064 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
72065 */
72066 #define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
72067+
72068+#ifdef CONFIG_PAX_USERCOPY_SLABS
72069+#define SLAB_USERCOPY 0x00000200UL /* PaX: Allow copying objs to/from userland */
72070+#else
72071+#define SLAB_USERCOPY 0x00000000UL
72072+#endif
72073+
72074 #define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
72075 #define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
72076+
72077+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72078+#define SLAB_NO_SANITIZE 0x00001000UL /* PaX: Do not sanitize objs on free */
72079+#else
72080+#define SLAB_NO_SANITIZE 0x00000000UL
72081+#endif
72082+
72083 #define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
72084 #define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
72085 #define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
72086@@ -89,10 +103,13 @@
72087 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
72088 * Both make kfree a no-op.
72089 */
72090-#define ZERO_SIZE_PTR ((void *)16)
72091+#define ZERO_SIZE_PTR \
72092+({ \
72093+ BUILD_BUG_ON(!(MAX_ERRNO & ~PAGE_MASK));\
72094+ (void *)(-MAX_ERRNO-1L); \
72095+})
72096
72097-#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
72098- (unsigned long)ZERO_SIZE_PTR)
72099+#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) - 1 >= (unsigned long)ZERO_SIZE_PTR - 1)
72100
72101
72102 struct mem_cgroup;
72103@@ -132,6 +149,8 @@ void * __must_check krealloc(const void *, size_t, gfp_t);
72104 void kfree(const void *);
72105 void kzfree(const void *);
72106 size_t ksize(const void *);
72107+const char *check_heap_object(const void *ptr, unsigned long n);
72108+bool is_usercopy_object(const void *ptr);
72109
72110 /*
72111 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
72112@@ -164,7 +183,7 @@ struct kmem_cache {
72113 unsigned int align; /* Alignment as calculated */
72114 unsigned long flags; /* Active flags on the slab */
72115 const char *name; /* Slab name for sysfs */
72116- int refcount; /* Use counter */
72117+ atomic_t refcount; /* Use counter */
72118 void (*ctor)(void *); /* Called on object slot creation */
72119 struct list_head list; /* List of all slab caches on the system */
72120 };
72121@@ -226,6 +245,10 @@ extern struct kmem_cache *kmalloc_caches[KMALLOC_SHIFT_HIGH + 1];
72122 extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
72123 #endif
72124
72125+#ifdef CONFIG_PAX_USERCOPY_SLABS
72126+extern struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
72127+#endif
72128+
72129 /*
72130 * Figure out which kmalloc slab an allocation of a certain size
72131 * belongs to.
72132@@ -234,7 +257,7 @@ extern struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
72133 * 2 = 120 .. 192 bytes
72134 * n = 2^(n-1) .. 2^n -1
72135 */
72136-static __always_inline int kmalloc_index(size_t size)
72137+static __always_inline __size_overflow(1) int kmalloc_index(size_t size)
72138 {
72139 if (!size)
72140 return 0;
72141@@ -406,6 +429,7 @@ void print_slabinfo_header(struct seq_file *m);
72142 * for general use, and so are not documented here. For a full list of
72143 * potential flags, always refer to linux/gfp.h.
72144 */
72145+
72146 static inline void *kmalloc_array(size_t n, size_t size, gfp_t flags)
72147 {
72148 if (size != 0 && n > SIZE_MAX / size)
72149@@ -465,7 +489,7 @@ static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
72150 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
72151 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
72152 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
72153-extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
72154+extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long) __size_overflow(1);
72155 #define kmalloc_track_caller(size, flags) \
72156 __kmalloc_track_caller(size, flags, _RET_IP_)
72157 #else
72158@@ -485,7 +509,7 @@ extern void *__kmalloc_track_caller(size_t, gfp_t, unsigned long);
72159 #if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB) || \
72160 (defined(CONFIG_SLAB) && defined(CONFIG_TRACING)) || \
72161 (defined(CONFIG_SLOB) && defined(CONFIG_TRACING))
72162-extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long);
72163+extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, unsigned long) __size_overflow(1);
72164 #define kmalloc_node_track_caller(size, flags, node) \
72165 __kmalloc_node_track_caller(size, flags, node, \
72166 _RET_IP_)
72167diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
72168index cd40158..4e2f7af 100644
72169--- a/include/linux/slab_def.h
72170+++ b/include/linux/slab_def.h
72171@@ -50,7 +50,7 @@ struct kmem_cache {
72172 /* 4) cache creation/removal */
72173 const char *name;
72174 struct list_head list;
72175- int refcount;
72176+ atomic_t refcount;
72177 int object_size;
72178 int align;
72179
72180@@ -66,10 +66,14 @@ struct kmem_cache {
72181 unsigned long node_allocs;
72182 unsigned long node_frees;
72183 unsigned long node_overflow;
72184- atomic_t allochit;
72185- atomic_t allocmiss;
72186- atomic_t freehit;
72187- atomic_t freemiss;
72188+ atomic_unchecked_t allochit;
72189+ atomic_unchecked_t allocmiss;
72190+ atomic_unchecked_t freehit;
72191+ atomic_unchecked_t freemiss;
72192+#ifdef CONFIG_PAX_MEMORY_SANITIZE
72193+ atomic_unchecked_t sanitized;
72194+ atomic_unchecked_t not_sanitized;
72195+#endif
72196
72197 /*
72198 * If debugging is enabled, then the allocator can add additional
72199@@ -103,7 +107,7 @@ struct kmem_cache {
72200 };
72201
72202 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
72203-void *__kmalloc(size_t size, gfp_t flags);
72204+void *__kmalloc(size_t size, gfp_t flags) __size_overflow(1);
72205
72206 #ifdef CONFIG_TRACING
72207 extern void *kmem_cache_alloc_trace(struct kmem_cache *, gfp_t, size_t);
72208@@ -136,6 +140,13 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72209 cachep = kmalloc_dma_caches[i];
72210 else
72211 #endif
72212+
72213+#ifdef CONFIG_PAX_USERCOPY_SLABS
72214+ if (flags & GFP_USERCOPY)
72215+ cachep = kmalloc_usercopy_caches[i];
72216+ else
72217+#endif
72218+
72219 cachep = kmalloc_caches[i];
72220
72221 ret = kmem_cache_alloc_trace(cachep, flags, size);
72222@@ -146,7 +157,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72223 }
72224
72225 #ifdef CONFIG_NUMA
72226-extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
72227+extern void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72228 extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72229
72230 #ifdef CONFIG_TRACING
72231@@ -185,6 +196,13 @@ static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
72232 cachep = kmalloc_dma_caches[i];
72233 else
72234 #endif
72235+
72236+#ifdef CONFIG_PAX_USERCOPY_SLABS
72237+ if (flags & GFP_USERCOPY)
72238+ cachep = kmalloc_usercopy_caches[i];
72239+ else
72240+#endif
72241+
72242 cachep = kmalloc_caches[i];
72243
72244 return kmem_cache_alloc_node_trace(cachep, flags, node, size);
72245diff --git a/include/linux/slob_def.h b/include/linux/slob_def.h
72246index f28e14a..7831211 100644
72247--- a/include/linux/slob_def.h
72248+++ b/include/linux/slob_def.h
72249@@ -11,7 +11,7 @@ static __always_inline void *kmem_cache_alloc(struct kmem_cache *cachep,
72250 return kmem_cache_alloc_node(cachep, flags, NUMA_NO_NODE);
72251 }
72252
72253-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72254+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72255
72256 static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
72257 {
72258@@ -31,7 +31,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72259 return __kmalloc_node(size, flags, NUMA_NO_NODE);
72260 }
72261
72262-static __always_inline void *__kmalloc(size_t size, gfp_t flags)
72263+static __always_inline __size_overflow(1) void *__kmalloc(size_t size, gfp_t flags)
72264 {
72265 return kmalloc(size, flags);
72266 }
72267diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
72268index 027276f..092bfe8 100644
72269--- a/include/linux/slub_def.h
72270+++ b/include/linux/slub_def.h
72271@@ -80,7 +80,7 @@ struct kmem_cache {
72272 struct kmem_cache_order_objects max;
72273 struct kmem_cache_order_objects min;
72274 gfp_t allocflags; /* gfp flags to use on each alloc */
72275- int refcount; /* Refcount for slab cache destroy */
72276+ atomic_t refcount; /* Refcount for slab cache destroy */
72277 void (*ctor)(void *);
72278 int inuse; /* Offset to metadata */
72279 int align; /* Alignment */
72280@@ -105,7 +105,7 @@ struct kmem_cache {
72281 };
72282
72283 void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
72284-void *__kmalloc(size_t size, gfp_t flags);
72285+void *__kmalloc(size_t size, gfp_t flags) __alloc_size(1) __size_overflow(1);
72286
72287 static __always_inline void *
72288 kmalloc_order(size_t size, gfp_t flags, unsigned int order)
72289@@ -149,7 +149,7 @@ kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
72290 }
72291 #endif
72292
72293-static __always_inline void *kmalloc_large(size_t size, gfp_t flags)
72294+static __always_inline __size_overflow(1) void *kmalloc_large(size_t size, gfp_t flags)
72295 {
72296 unsigned int order = get_order(size);
72297 return kmalloc_order_trace(size, flags, order);
72298@@ -175,7 +175,7 @@ static __always_inline void *kmalloc(size_t size, gfp_t flags)
72299 }
72300
72301 #ifdef CONFIG_NUMA
72302-void *__kmalloc_node(size_t size, gfp_t flags, int node);
72303+void *__kmalloc_node(size_t size, gfp_t flags, int node) __size_overflow(1);
72304 void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
72305
72306 #ifdef CONFIG_TRACING
72307diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
72308index 54f91d3..be2c379 100644
72309--- a/include/linux/sock_diag.h
72310+++ b/include/linux/sock_diag.h
72311@@ -11,7 +11,7 @@ struct sock;
72312 struct sock_diag_handler {
72313 __u8 family;
72314 int (*dump)(struct sk_buff *skb, struct nlmsghdr *nlh);
72315-};
72316+} __do_const;
72317
72318 int sock_diag_register(const struct sock_diag_handler *h);
72319 void sock_diag_unregister(const struct sock_diag_handler *h);
72320diff --git a/include/linux/sonet.h b/include/linux/sonet.h
72321index 680f9a3..f13aeb0 100644
72322--- a/include/linux/sonet.h
72323+++ b/include/linux/sonet.h
72324@@ -7,7 +7,7 @@
72325 #include <uapi/linux/sonet.h>
72326
72327 struct k_sonet_stats {
72328-#define __HANDLE_ITEM(i) atomic_t i
72329+#define __HANDLE_ITEM(i) atomic_unchecked_t i
72330 __SONET_ITEMS
72331 #undef __HANDLE_ITEM
72332 };
72333diff --git a/include/linux/sunrpc/addr.h b/include/linux/sunrpc/addr.h
72334index 07d8e53..dc934c9 100644
72335--- a/include/linux/sunrpc/addr.h
72336+++ b/include/linux/sunrpc/addr.h
72337@@ -23,9 +23,9 @@ static inline unsigned short rpc_get_port(const struct sockaddr *sap)
72338 {
72339 switch (sap->sa_family) {
72340 case AF_INET:
72341- return ntohs(((struct sockaddr_in *)sap)->sin_port);
72342+ return ntohs(((const struct sockaddr_in *)sap)->sin_port);
72343 case AF_INET6:
72344- return ntohs(((struct sockaddr_in6 *)sap)->sin6_port);
72345+ return ntohs(((const struct sockaddr_in6 *)sap)->sin6_port);
72346 }
72347 return 0;
72348 }
72349@@ -58,7 +58,7 @@ static inline bool __rpc_cmp_addr4(const struct sockaddr *sap1,
72350 static inline bool __rpc_copy_addr4(struct sockaddr *dst,
72351 const struct sockaddr *src)
72352 {
72353- const struct sockaddr_in *ssin = (struct sockaddr_in *) src;
72354+ const struct sockaddr_in *ssin = (const struct sockaddr_in *) src;
72355 struct sockaddr_in *dsin = (struct sockaddr_in *) dst;
72356
72357 dsin->sin_family = ssin->sin_family;
72358@@ -164,7 +164,7 @@ static inline u32 rpc_get_scope_id(const struct sockaddr *sa)
72359 if (sa->sa_family != AF_INET6)
72360 return 0;
72361
72362- return ((struct sockaddr_in6 *) sa)->sin6_scope_id;
72363+ return ((const struct sockaddr_in6 *) sa)->sin6_scope_id;
72364 }
72365
72366 #endif /* _LINUX_SUNRPC_ADDR_H */
72367diff --git a/include/linux/sunrpc/clnt.h b/include/linux/sunrpc/clnt.h
72368index bfe11be..12bc8c4 100644
72369--- a/include/linux/sunrpc/clnt.h
72370+++ b/include/linux/sunrpc/clnt.h
72371@@ -96,7 +96,7 @@ struct rpc_procinfo {
72372 unsigned int p_timer; /* Which RTT timer to use */
72373 u32 p_statidx; /* Which procedure to account */
72374 const char * p_name; /* name of procedure */
72375-};
72376+} __do_const;
72377
72378 #ifdef __KERNEL__
72379
72380diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
72381index 1f0216b..6a4fa50 100644
72382--- a/include/linux/sunrpc/svc.h
72383+++ b/include/linux/sunrpc/svc.h
72384@@ -411,7 +411,7 @@ struct svc_procedure {
72385 unsigned int pc_count; /* call count */
72386 unsigned int pc_cachetype; /* cache info (NFS) */
72387 unsigned int pc_xdrressize; /* maximum size of XDR reply */
72388-};
72389+} __do_const;
72390
72391 /*
72392 * Function prototypes.
72393diff --git a/include/linux/sunrpc/svc_rdma.h b/include/linux/sunrpc/svc_rdma.h
72394index 0b8e3e6..33e0a01 100644
72395--- a/include/linux/sunrpc/svc_rdma.h
72396+++ b/include/linux/sunrpc/svc_rdma.h
72397@@ -53,15 +53,15 @@ extern unsigned int svcrdma_ord;
72398 extern unsigned int svcrdma_max_requests;
72399 extern unsigned int svcrdma_max_req_size;
72400
72401-extern atomic_t rdma_stat_recv;
72402-extern atomic_t rdma_stat_read;
72403-extern atomic_t rdma_stat_write;
72404-extern atomic_t rdma_stat_sq_starve;
72405-extern atomic_t rdma_stat_rq_starve;
72406-extern atomic_t rdma_stat_rq_poll;
72407-extern atomic_t rdma_stat_rq_prod;
72408-extern atomic_t rdma_stat_sq_poll;
72409-extern atomic_t rdma_stat_sq_prod;
72410+extern atomic_unchecked_t rdma_stat_recv;
72411+extern atomic_unchecked_t rdma_stat_read;
72412+extern atomic_unchecked_t rdma_stat_write;
72413+extern atomic_unchecked_t rdma_stat_sq_starve;
72414+extern atomic_unchecked_t rdma_stat_rq_starve;
72415+extern atomic_unchecked_t rdma_stat_rq_poll;
72416+extern atomic_unchecked_t rdma_stat_rq_prod;
72417+extern atomic_unchecked_t rdma_stat_sq_poll;
72418+extern atomic_unchecked_t rdma_stat_sq_prod;
72419
72420 #define RPCRDMA_VERSION 1
72421
72422diff --git a/include/linux/sunrpc/svcauth.h b/include/linux/sunrpc/svcauth.h
72423index ff374ab..7fd2ecb 100644
72424--- a/include/linux/sunrpc/svcauth.h
72425+++ b/include/linux/sunrpc/svcauth.h
72426@@ -109,7 +109,7 @@ struct auth_ops {
72427 int (*release)(struct svc_rqst *rq);
72428 void (*domain_release)(struct auth_domain *);
72429 int (*set_client)(struct svc_rqst *rq);
72430-};
72431+} __do_const;
72432
72433 #define SVC_GARBAGE 1
72434 #define SVC_SYSERR 2
72435diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
72436index a5ffd32..0935dea 100644
72437--- a/include/linux/swiotlb.h
72438+++ b/include/linux/swiotlb.h
72439@@ -60,7 +60,8 @@ extern void
72440
72441 extern void
72442 swiotlb_free_coherent(struct device *hwdev, size_t size,
72443- void *vaddr, dma_addr_t dma_handle);
72444+ void *vaddr, dma_addr_t dma_handle,
72445+ struct dma_attrs *attrs);
72446
72447 extern dma_addr_t swiotlb_map_page(struct device *dev, struct page *page,
72448 unsigned long offset, size_t size,
72449diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
72450index 4147d70..31a1791 100644
72451--- a/include/linux/syscalls.h
72452+++ b/include/linux/syscalls.h
72453@@ -362,11 +362,11 @@ asmlinkage long sys_sync(void);
72454 asmlinkage long sys_fsync(unsigned int fd);
72455 asmlinkage long sys_fdatasync(unsigned int fd);
72456 asmlinkage long sys_bdflush(int func, long data);
72457-asmlinkage long sys_mount(char __user *dev_name, char __user *dir_name,
72458- char __user *type, unsigned long flags,
72459+asmlinkage long sys_mount(const char __user *dev_name, const char __user *dir_name,
72460+ const char __user *type, unsigned long flags,
72461 void __user *data);
72462-asmlinkage long sys_umount(char __user *name, int flags);
72463-asmlinkage long sys_oldumount(char __user *name);
72464+asmlinkage long sys_umount(const char __user *name, int flags);
72465+asmlinkage long sys_oldumount(const char __user *name);
72466 asmlinkage long sys_truncate(const char __user *path, long length);
72467 asmlinkage long sys_ftruncate(unsigned int fd, unsigned long length);
72468 asmlinkage long sys_stat(const char __user *filename,
72469@@ -578,7 +578,7 @@ asmlinkage long sys_getsockname(int, struct sockaddr __user *, int __user *);
72470 asmlinkage long sys_getpeername(int, struct sockaddr __user *, int __user *);
72471 asmlinkage long sys_send(int, void __user *, size_t, unsigned);
72472 asmlinkage long sys_sendto(int, void __user *, size_t, unsigned,
72473- struct sockaddr __user *, int);
72474+ struct sockaddr __user *, int) __intentional_overflow(0);
72475 asmlinkage long sys_sendmsg(int fd, struct msghdr __user *msg, unsigned flags);
72476 asmlinkage long sys_sendmmsg(int fd, struct mmsghdr __user *msg,
72477 unsigned int vlen, unsigned flags);
72478diff --git a/include/linux/syscore_ops.h b/include/linux/syscore_ops.h
72479index 27b3b0b..e093dd9 100644
72480--- a/include/linux/syscore_ops.h
72481+++ b/include/linux/syscore_ops.h
72482@@ -16,7 +16,7 @@ struct syscore_ops {
72483 int (*suspend)(void);
72484 void (*resume)(void);
72485 void (*shutdown)(void);
72486-};
72487+} __do_const;
72488
72489 extern void register_syscore_ops(struct syscore_ops *ops);
72490 extern void unregister_syscore_ops(struct syscore_ops *ops);
72491diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
72492index 14a8ff2..af52bad 100644
72493--- a/include/linux/sysctl.h
72494+++ b/include/linux/sysctl.h
72495@@ -34,13 +34,13 @@ struct ctl_table_root;
72496 struct ctl_table_header;
72497 struct ctl_dir;
72498
72499-typedef struct ctl_table ctl_table;
72500-
72501 typedef int proc_handler (struct ctl_table *ctl, int write,
72502 void __user *buffer, size_t *lenp, loff_t *ppos);
72503
72504 extern int proc_dostring(struct ctl_table *, int,
72505 void __user *, size_t *, loff_t *);
72506+extern int proc_dostring_modpriv(struct ctl_table *, int,
72507+ void __user *, size_t *, loff_t *);
72508 extern int proc_dointvec(struct ctl_table *, int,
72509 void __user *, size_t *, loff_t *);
72510 extern int proc_dointvec_minmax(struct ctl_table *, int,
72511@@ -115,7 +115,9 @@ struct ctl_table
72512 struct ctl_table_poll *poll;
72513 void *extra1;
72514 void *extra2;
72515-};
72516+} __do_const;
72517+typedef struct ctl_table __no_const ctl_table_no_const;
72518+typedef struct ctl_table ctl_table;
72519
72520 struct ctl_node {
72521 struct rb_node node;
72522diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
72523index e2cee22..3ddb921 100644
72524--- a/include/linux/sysfs.h
72525+++ b/include/linux/sysfs.h
72526@@ -31,7 +31,8 @@ struct attribute {
72527 struct lock_class_key *key;
72528 struct lock_class_key skey;
72529 #endif
72530-};
72531+} __do_const;
72532+typedef struct attribute __no_const attribute_no_const;
72533
72534 /**
72535 * sysfs_attr_init - initialize a dynamically allocated sysfs attribute
72536@@ -59,8 +60,8 @@ struct attribute_group {
72537 umode_t (*is_visible)(struct kobject *,
72538 struct attribute *, int);
72539 struct attribute **attrs;
72540-};
72541-
72542+} __do_const;
72543+typedef struct attribute_group __no_const attribute_group_no_const;
72544
72545
72546 /**
72547@@ -107,7 +108,8 @@ struct bin_attribute {
72548 char *, loff_t, size_t);
72549 int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
72550 struct vm_area_struct *vma);
72551-};
72552+} __do_const;
72553+typedef struct bin_attribute __no_const bin_attribute_no_const;
72554
72555 /**
72556 * sysfs_bin_attr_init - initialize a dynamically allocated bin_attribute
72557diff --git a/include/linux/sysrq.h b/include/linux/sysrq.h
72558index 7faf933..9b85a0c 100644
72559--- a/include/linux/sysrq.h
72560+++ b/include/linux/sysrq.h
72561@@ -16,6 +16,7 @@
72562
72563 #include <linux/errno.h>
72564 #include <linux/types.h>
72565+#include <linux/compiler.h>
72566
72567 /* Enable/disable SYSRQ support by default (0==no, 1==yes). */
72568 #define SYSRQ_DEFAULT_ENABLE 1
72569@@ -36,7 +37,7 @@ struct sysrq_key_op {
72570 char *help_msg;
72571 char *action_msg;
72572 int enable_mask;
72573-};
72574+} __do_const;
72575
72576 #ifdef CONFIG_MAGIC_SYSRQ
72577
72578diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
72579index e7e0473..7989295 100644
72580--- a/include/linux/thread_info.h
72581+++ b/include/linux/thread_info.h
72582@@ -148,6 +148,15 @@ static inline bool test_and_clear_restore_sigmask(void)
72583 #error "no set_restore_sigmask() provided and default one won't work"
72584 #endif
72585
72586+extern void __check_object_size(const void *ptr, unsigned long n, bool to_user);
72587+static inline void check_object_size(const void *ptr, unsigned long n, bool to_user)
72588+{
72589+#ifndef CONFIG_PAX_USERCOPY_DEBUG
72590+ if (!__builtin_constant_p(n))
72591+#endif
72592+ __check_object_size(ptr, n, to_user);
72593+}
72594+
72595 #endif /* __KERNEL__ */
72596
72597 #endif /* _LINUX_THREAD_INFO_H */
72598diff --git a/include/linux/tty.h b/include/linux/tty.h
72599index 8780bd2..d1ae08b 100644
72600--- a/include/linux/tty.h
72601+++ b/include/linux/tty.h
72602@@ -194,7 +194,7 @@ struct tty_port {
72603 const struct tty_port_operations *ops; /* Port operations */
72604 spinlock_t lock; /* Lock protecting tty field */
72605 int blocked_open; /* Waiting to open */
72606- int count; /* Usage count */
72607+ atomic_t count; /* Usage count */
72608 wait_queue_head_t open_wait; /* Open waiters */
72609 wait_queue_head_t close_wait; /* Close waiters */
72610 wait_queue_head_t delta_msr_wait; /* Modem status change */
72611@@ -550,7 +550,7 @@ extern int tty_port_open(struct tty_port *port,
72612 struct tty_struct *tty, struct file *filp);
72613 static inline int tty_port_users(struct tty_port *port)
72614 {
72615- return port->count + port->blocked_open;
72616+ return atomic_read(&port->count) + port->blocked_open;
72617 }
72618
72619 extern int tty_register_ldisc(int disc, struct tty_ldisc_ops *new_ldisc);
72620diff --git a/include/linux/tty_driver.h b/include/linux/tty_driver.h
72621index 756a609..b302dd6 100644
72622--- a/include/linux/tty_driver.h
72623+++ b/include/linux/tty_driver.h
72624@@ -285,7 +285,7 @@ struct tty_operations {
72625 void (*poll_put_char)(struct tty_driver *driver, int line, char ch);
72626 #endif
72627 const struct file_operations *proc_fops;
72628-};
72629+} __do_const;
72630
72631 struct tty_driver {
72632 int magic; /* magic number for this structure */
72633diff --git a/include/linux/tty_ldisc.h b/include/linux/tty_ldisc.h
72634index 58390c7..95e214c 100644
72635--- a/include/linux/tty_ldisc.h
72636+++ b/include/linux/tty_ldisc.h
72637@@ -146,7 +146,7 @@ struct tty_ldisc_ops {
72638
72639 struct module *owner;
72640
72641- int refcount;
72642+ atomic_t refcount;
72643 };
72644
72645 struct tty_ldisc {
72646diff --git a/include/linux/types.h b/include/linux/types.h
72647index 4d118ba..c3ee9bf 100644
72648--- a/include/linux/types.h
72649+++ b/include/linux/types.h
72650@@ -176,10 +176,26 @@ typedef struct {
72651 int counter;
72652 } atomic_t;
72653
72654+#ifdef CONFIG_PAX_REFCOUNT
72655+typedef struct {
72656+ int counter;
72657+} atomic_unchecked_t;
72658+#else
72659+typedef atomic_t atomic_unchecked_t;
72660+#endif
72661+
72662 #ifdef CONFIG_64BIT
72663 typedef struct {
72664 long counter;
72665 } atomic64_t;
72666+
72667+#ifdef CONFIG_PAX_REFCOUNT
72668+typedef struct {
72669+ long counter;
72670+} atomic64_unchecked_t;
72671+#else
72672+typedef atomic64_t atomic64_unchecked_t;
72673+#endif
72674 #endif
72675
72676 struct list_head {
72677diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
72678index 5ca0951..ab496a5 100644
72679--- a/include/linux/uaccess.h
72680+++ b/include/linux/uaccess.h
72681@@ -76,11 +76,11 @@ static inline unsigned long __copy_from_user_nocache(void *to,
72682 long ret; \
72683 mm_segment_t old_fs = get_fs(); \
72684 \
72685- set_fs(KERNEL_DS); \
72686 pagefault_disable(); \
72687- ret = __copy_from_user_inatomic(&(retval), (__force typeof(retval) __user *)(addr), sizeof(retval)); \
72688- pagefault_enable(); \
72689+ set_fs(KERNEL_DS); \
72690+ ret = __copy_from_user_inatomic(&(retval), (typeof(retval) __force_user *)(addr), sizeof(retval)); \
72691 set_fs(old_fs); \
72692+ pagefault_enable(); \
72693 ret; \
72694 })
72695
72696diff --git a/include/linux/uidgid.h b/include/linux/uidgid.h
72697index 8e522cbc..aa8572d 100644
72698--- a/include/linux/uidgid.h
72699+++ b/include/linux/uidgid.h
72700@@ -197,4 +197,9 @@ static inline bool kgid_has_mapping(struct user_namespace *ns, kgid_t gid)
72701
72702 #endif /* CONFIG_USER_NS */
72703
72704+#define GR_GLOBAL_UID(x) from_kuid_munged(&init_user_ns, (x))
72705+#define GR_GLOBAL_GID(x) from_kgid_munged(&init_user_ns, (x))
72706+#define gr_is_global_root(x) uid_eq((x), GLOBAL_ROOT_UID)
72707+#define gr_is_global_nonroot(x) (!uid_eq((x), GLOBAL_ROOT_UID))
72708+
72709 #endif /* _LINUX_UIDGID_H */
72710diff --git a/include/linux/unaligned/access_ok.h b/include/linux/unaligned/access_ok.h
72711index 99c1b4d..562e6f3 100644
72712--- a/include/linux/unaligned/access_ok.h
72713+++ b/include/linux/unaligned/access_ok.h
72714@@ -4,34 +4,34 @@
72715 #include <linux/kernel.h>
72716 #include <asm/byteorder.h>
72717
72718-static inline u16 get_unaligned_le16(const void *p)
72719+static inline u16 __intentional_overflow(-1) get_unaligned_le16(const void *p)
72720 {
72721- return le16_to_cpup((__le16 *)p);
72722+ return le16_to_cpup((const __le16 *)p);
72723 }
72724
72725-static inline u32 get_unaligned_le32(const void *p)
72726+static inline u32 __intentional_overflow(-1) get_unaligned_le32(const void *p)
72727 {
72728- return le32_to_cpup((__le32 *)p);
72729+ return le32_to_cpup((const __le32 *)p);
72730 }
72731
72732-static inline u64 get_unaligned_le64(const void *p)
72733+static inline u64 __intentional_overflow(-1) get_unaligned_le64(const void *p)
72734 {
72735- return le64_to_cpup((__le64 *)p);
72736+ return le64_to_cpup((const __le64 *)p);
72737 }
72738
72739-static inline u16 get_unaligned_be16(const void *p)
72740+static inline u16 __intentional_overflow(-1) get_unaligned_be16(const void *p)
72741 {
72742- return be16_to_cpup((__be16 *)p);
72743+ return be16_to_cpup((const __be16 *)p);
72744 }
72745
72746-static inline u32 get_unaligned_be32(const void *p)
72747+static inline u32 __intentional_overflow(-1) get_unaligned_be32(const void *p)
72748 {
72749- return be32_to_cpup((__be32 *)p);
72750+ return be32_to_cpup((const __be32 *)p);
72751 }
72752
72753-static inline u64 get_unaligned_be64(const void *p)
72754+static inline u64 __intentional_overflow(-1) get_unaligned_be64(const void *p)
72755 {
72756- return be64_to_cpup((__be64 *)p);
72757+ return be64_to_cpup((const __be64 *)p);
72758 }
72759
72760 static inline void put_unaligned_le16(u16 val, void *p)
72761diff --git a/include/linux/usb.h b/include/linux/usb.h
72762index a0bee5a..5533a52 100644
72763--- a/include/linux/usb.h
72764+++ b/include/linux/usb.h
72765@@ -552,7 +552,7 @@ struct usb_device {
72766 int maxchild;
72767
72768 u32 quirks;
72769- atomic_t urbnum;
72770+ atomic_unchecked_t urbnum;
72771
72772 unsigned long active_duration;
72773
72774@@ -1607,7 +1607,7 @@ void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
72775
72776 extern int usb_control_msg(struct usb_device *dev, unsigned int pipe,
72777 __u8 request, __u8 requesttype, __u16 value, __u16 index,
72778- void *data, __u16 size, int timeout);
72779+ void *data, __u16 size, int timeout) __intentional_overflow(-1);
72780 extern int usb_interrupt_msg(struct usb_device *usb_dev, unsigned int pipe,
72781 void *data, int len, int *actual_length, int timeout);
72782 extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe,
72783diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
72784index e452ba6..78f8e80 100644
72785--- a/include/linux/usb/renesas_usbhs.h
72786+++ b/include/linux/usb/renesas_usbhs.h
72787@@ -39,7 +39,7 @@ enum {
72788 */
72789 struct renesas_usbhs_driver_callback {
72790 int (*notify_hotplug)(struct platform_device *pdev);
72791-};
72792+} __no_const;
72793
72794 /*
72795 * callback functions for platform
72796diff --git a/include/linux/vermagic.h b/include/linux/vermagic.h
72797index 6f8fbcf..8259001 100644
72798--- a/include/linux/vermagic.h
72799+++ b/include/linux/vermagic.h
72800@@ -25,9 +25,35 @@
72801 #define MODULE_ARCH_VERMAGIC ""
72802 #endif
72803
72804+#ifdef CONFIG_PAX_REFCOUNT
72805+#define MODULE_PAX_REFCOUNT "REFCOUNT "
72806+#else
72807+#define MODULE_PAX_REFCOUNT ""
72808+#endif
72809+
72810+#ifdef CONSTIFY_PLUGIN
72811+#define MODULE_CONSTIFY_PLUGIN "CONSTIFY_PLUGIN "
72812+#else
72813+#define MODULE_CONSTIFY_PLUGIN ""
72814+#endif
72815+
72816+#ifdef STACKLEAK_PLUGIN
72817+#define MODULE_STACKLEAK_PLUGIN "STACKLEAK_PLUGIN "
72818+#else
72819+#define MODULE_STACKLEAK_PLUGIN ""
72820+#endif
72821+
72822+#ifdef CONFIG_GRKERNSEC
72823+#define MODULE_GRSEC "GRSEC "
72824+#else
72825+#define MODULE_GRSEC ""
72826+#endif
72827+
72828 #define VERMAGIC_STRING \
72829 UTS_RELEASE " " \
72830 MODULE_VERMAGIC_SMP MODULE_VERMAGIC_PREEMPT \
72831 MODULE_VERMAGIC_MODULE_UNLOAD MODULE_VERMAGIC_MODVERSIONS \
72832- MODULE_ARCH_VERMAGIC
72833+ MODULE_ARCH_VERMAGIC \
72834+ MODULE_PAX_REFCOUNT MODULE_CONSTIFY_PLUGIN MODULE_STACKLEAK_PLUGIN \
72835+ MODULE_GRSEC
72836
72837diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
72838index 7d5773a..541c01c 100644
72839--- a/include/linux/vmalloc.h
72840+++ b/include/linux/vmalloc.h
72841@@ -16,6 +16,11 @@ struct vm_area_struct; /* vma defining user mapping in mm_types.h */
72842 #define VM_USERMAP 0x00000008 /* suitable for remap_vmalloc_range */
72843 #define VM_VPAGES 0x00000010 /* buffer for pages was vmalloc'ed */
72844 #define VM_UNLIST 0x00000020 /* vm_struct is not listed in vmlist */
72845+
72846+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
72847+#define VM_KERNEXEC 0x00000040 /* allocate from executable kernel memory range */
72848+#endif
72849+
72850 /* bits [20..32] reserved for arch specific ioremap internals */
72851
72852 /*
72853@@ -75,7 +80,7 @@ extern void *vmalloc_32_user(unsigned long size);
72854 extern void *__vmalloc(unsigned long size, gfp_t gfp_mask, pgprot_t prot);
72855 extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
72856 unsigned long start, unsigned long end, gfp_t gfp_mask,
72857- pgprot_t prot, int node, const void *caller);
72858+ pgprot_t prot, int node, const void *caller) __size_overflow(1);
72859 extern void vfree(const void *addr);
72860
72861 extern void *vmap(struct page **pages, unsigned int count,
72862@@ -137,8 +142,8 @@ extern struct vm_struct *alloc_vm_area(size_t size, pte_t **ptes);
72863 extern void free_vm_area(struct vm_struct *area);
72864
72865 /* for /dev/kmem */
72866-extern long vread(char *buf, char *addr, unsigned long count);
72867-extern long vwrite(char *buf, char *addr, unsigned long count);
72868+extern long vread(char *buf, char *addr, unsigned long count) __size_overflow(3);
72869+extern long vwrite(char *buf, char *addr, unsigned long count) __size_overflow(3);
72870
72871 /*
72872 * Internals. Dont't use..
72873diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h
72874index c586679..f06b389 100644
72875--- a/include/linux/vmstat.h
72876+++ b/include/linux/vmstat.h
72877@@ -90,18 +90,18 @@ static inline void vm_events_fold_cpu(int cpu)
72878 /*
72879 * Zone based page accounting with per cpu differentials.
72880 */
72881-extern atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72882+extern atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS];
72883
72884 static inline void zone_page_state_add(long x, struct zone *zone,
72885 enum zone_stat_item item)
72886 {
72887- atomic_long_add(x, &zone->vm_stat[item]);
72888- atomic_long_add(x, &vm_stat[item]);
72889+ atomic_long_add_unchecked(x, &zone->vm_stat[item]);
72890+ atomic_long_add_unchecked(x, &vm_stat[item]);
72891 }
72892
72893 static inline unsigned long global_page_state(enum zone_stat_item item)
72894 {
72895- long x = atomic_long_read(&vm_stat[item]);
72896+ long x = atomic_long_read_unchecked(&vm_stat[item]);
72897 #ifdef CONFIG_SMP
72898 if (x < 0)
72899 x = 0;
72900@@ -112,7 +112,7 @@ static inline unsigned long global_page_state(enum zone_stat_item item)
72901 static inline unsigned long zone_page_state(struct zone *zone,
72902 enum zone_stat_item item)
72903 {
72904- long x = atomic_long_read(&zone->vm_stat[item]);
72905+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72906 #ifdef CONFIG_SMP
72907 if (x < 0)
72908 x = 0;
72909@@ -129,7 +129,7 @@ static inline unsigned long zone_page_state(struct zone *zone,
72910 static inline unsigned long zone_page_state_snapshot(struct zone *zone,
72911 enum zone_stat_item item)
72912 {
72913- long x = atomic_long_read(&zone->vm_stat[item]);
72914+ long x = atomic_long_read_unchecked(&zone->vm_stat[item]);
72915
72916 #ifdef CONFIG_SMP
72917 int cpu;
72918@@ -221,8 +221,8 @@ static inline void __mod_zone_page_state(struct zone *zone,
72919
72920 static inline void __inc_zone_state(struct zone *zone, enum zone_stat_item item)
72921 {
72922- atomic_long_inc(&zone->vm_stat[item]);
72923- atomic_long_inc(&vm_stat[item]);
72924+ atomic_long_inc_unchecked(&zone->vm_stat[item]);
72925+ atomic_long_inc_unchecked(&vm_stat[item]);
72926 }
72927
72928 static inline void __inc_zone_page_state(struct page *page,
72929@@ -233,8 +233,8 @@ static inline void __inc_zone_page_state(struct page *page,
72930
72931 static inline void __dec_zone_state(struct zone *zone, enum zone_stat_item item)
72932 {
72933- atomic_long_dec(&zone->vm_stat[item]);
72934- atomic_long_dec(&vm_stat[item]);
72935+ atomic_long_dec_unchecked(&zone->vm_stat[item]);
72936+ atomic_long_dec_unchecked(&vm_stat[item]);
72937 }
72938
72939 static inline void __dec_zone_page_state(struct page *page,
72940diff --git a/include/linux/xattr.h b/include/linux/xattr.h
72941index fdbafc6..49dfe4f 100644
72942--- a/include/linux/xattr.h
72943+++ b/include/linux/xattr.h
72944@@ -28,7 +28,7 @@ struct xattr_handler {
72945 size_t size, int handler_flags);
72946 int (*set)(struct dentry *dentry, const char *name, const void *buffer,
72947 size_t size, int flags, int handler_flags);
72948-};
72949+} __do_const;
72950
72951 struct xattr {
72952 char *name;
72953@@ -37,6 +37,9 @@ struct xattr {
72954 };
72955
72956 ssize_t xattr_getsecurity(struct inode *, const char *, void *, size_t);
72957+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
72958+ssize_t pax_getxattr(struct dentry *, void *, size_t);
72959+#endif
72960 ssize_t vfs_getxattr(struct dentry *, const char *, void *, size_t);
72961 ssize_t vfs_listxattr(struct dentry *d, char *list, size_t size);
72962 int __vfs_setxattr_noperm(struct dentry *, const char *, const void *, size_t, int);
72963diff --git a/include/linux/zlib.h b/include/linux/zlib.h
72964index 9c5a6b4..09c9438 100644
72965--- a/include/linux/zlib.h
72966+++ b/include/linux/zlib.h
72967@@ -31,6 +31,7 @@
72968 #define _ZLIB_H
72969
72970 #include <linux/zconf.h>
72971+#include <linux/compiler.h>
72972
72973 /* zlib deflate based on ZLIB_VERSION "1.1.3" */
72974 /* zlib inflate based on ZLIB_VERSION "1.2.3" */
72975@@ -179,7 +180,7 @@ typedef z_stream *z_streamp;
72976
72977 /* basic functions */
72978
72979-extern int zlib_deflate_workspacesize (int windowBits, int memLevel);
72980+extern int zlib_deflate_workspacesize (int windowBits, int memLevel) __intentional_overflow(0);
72981 /*
72982 Returns the number of bytes that needs to be allocated for a per-
72983 stream workspace with the specified parameters. A pointer to this
72984diff --git a/include/media/v4l2-dev.h b/include/media/v4l2-dev.h
72985index 95d1c91..6798cca 100644
72986--- a/include/media/v4l2-dev.h
72987+++ b/include/media/v4l2-dev.h
72988@@ -76,7 +76,7 @@ struct v4l2_file_operations {
72989 int (*mmap) (struct file *, struct vm_area_struct *);
72990 int (*open) (struct file *);
72991 int (*release) (struct file *);
72992-};
72993+} __do_const;
72994
72995 /*
72996 * Newer version of video_device, handled by videodev2.c
72997diff --git a/include/net/9p/transport.h b/include/net/9p/transport.h
72998index adcbb20..62c2559 100644
72999--- a/include/net/9p/transport.h
73000+++ b/include/net/9p/transport.h
73001@@ -57,7 +57,7 @@ struct p9_trans_module {
73002 int (*cancel) (struct p9_client *, struct p9_req_t *req);
73003 int (*zc_request)(struct p9_client *, struct p9_req_t *,
73004 char *, char *, int , int, int, int);
73005-};
73006+} __do_const;
73007
73008 void v9fs_register_trans(struct p9_trans_module *m);
73009 void v9fs_unregister_trans(struct p9_trans_module *m);
73010diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
73011index fb94cf1..7c0c987 100644
73012--- a/include/net/bluetooth/l2cap.h
73013+++ b/include/net/bluetooth/l2cap.h
73014@@ -551,7 +551,7 @@ struct l2cap_ops {
73015 void (*defer) (struct l2cap_chan *chan);
73016 struct sk_buff *(*alloc_skb) (struct l2cap_chan *chan,
73017 unsigned long len, int nb);
73018-};
73019+} __do_const;
73020
73021 struct l2cap_conn {
73022 struct hci_conn *hcon;
73023diff --git a/include/net/caif/cfctrl.h b/include/net/caif/cfctrl.h
73024index f2ae33d..c457cf0 100644
73025--- a/include/net/caif/cfctrl.h
73026+++ b/include/net/caif/cfctrl.h
73027@@ -52,7 +52,7 @@ struct cfctrl_rsp {
73028 void (*radioset_rsp)(void);
73029 void (*reject_rsp)(struct cflayer *layer, u8 linkid,
73030 struct cflayer *client_layer);
73031-};
73032+} __no_const;
73033
73034 /* Link Setup Parameters for CAIF-Links. */
73035 struct cfctrl_link_param {
73036@@ -101,8 +101,8 @@ struct cfctrl_request_info {
73037 struct cfctrl {
73038 struct cfsrvl serv;
73039 struct cfctrl_rsp res;
73040- atomic_t req_seq_no;
73041- atomic_t rsp_seq_no;
73042+ atomic_unchecked_t req_seq_no;
73043+ atomic_unchecked_t rsp_seq_no;
73044 struct list_head list;
73045 /* Protects from simultaneous access to first_req list */
73046 spinlock_t info_list_lock;
73047diff --git a/include/net/flow.h b/include/net/flow.h
73048index 628e11b..4c475df 100644
73049--- a/include/net/flow.h
73050+++ b/include/net/flow.h
73051@@ -221,6 +221,6 @@ extern struct flow_cache_object *flow_cache_lookup(
73052
73053 extern void flow_cache_flush(void);
73054 extern void flow_cache_flush_deferred(void);
73055-extern atomic_t flow_cache_genid;
73056+extern atomic_unchecked_t flow_cache_genid;
73057
73058 #endif
73059diff --git a/include/net/genetlink.h b/include/net/genetlink.h
73060index 93024a4..eeb6b6e 100644
73061--- a/include/net/genetlink.h
73062+++ b/include/net/genetlink.h
73063@@ -119,7 +119,7 @@ struct genl_ops {
73064 struct netlink_callback *cb);
73065 int (*done)(struct netlink_callback *cb);
73066 struct list_head ops_list;
73067-};
73068+} __do_const;
73069
73070 extern int genl_register_family(struct genl_family *family);
73071 extern int genl_register_family_with_ops(struct genl_family *family,
73072diff --git a/include/net/gro_cells.h b/include/net/gro_cells.h
73073index 734d9b5..48a9a4b 100644
73074--- a/include/net/gro_cells.h
73075+++ b/include/net/gro_cells.h
73076@@ -29,7 +29,7 @@ static inline void gro_cells_receive(struct gro_cells *gcells, struct sk_buff *s
73077 cell += skb_get_rx_queue(skb) & gcells->gro_cells_mask;
73078
73079 if (skb_queue_len(&cell->napi_skbs) > netdev_max_backlog) {
73080- atomic_long_inc(&dev->rx_dropped);
73081+ atomic_long_inc_unchecked(&dev->rx_dropped);
73082 kfree_skb(skb);
73083 return;
73084 }
73085diff --git a/include/net/inet_connection_sock.h b/include/net/inet_connection_sock.h
73086index de2c785..0588a6b 100644
73087--- a/include/net/inet_connection_sock.h
73088+++ b/include/net/inet_connection_sock.h
73089@@ -62,7 +62,7 @@ struct inet_connection_sock_af_ops {
73090 void (*addr2sockaddr)(struct sock *sk, struct sockaddr *);
73091 int (*bind_conflict)(const struct sock *sk,
73092 const struct inet_bind_bucket *tb, bool relax);
73093-};
73094+} __do_const;
73095
73096 /** inet_connection_sock - INET connection oriented sock
73097 *
73098diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
73099index 53f464d..ba76aaa 100644
73100--- a/include/net/inetpeer.h
73101+++ b/include/net/inetpeer.h
73102@@ -47,8 +47,8 @@ struct inet_peer {
73103 */
73104 union {
73105 struct {
73106- atomic_t rid; /* Frag reception counter */
73107- atomic_t ip_id_count; /* IP ID for the next packet */
73108+ atomic_unchecked_t rid; /* Frag reception counter */
73109+ atomic_unchecked_t ip_id_count; /* IP ID for the next packet */
73110 };
73111 struct rcu_head rcu;
73112 struct inet_peer *gc_next;
73113@@ -182,11 +182,11 @@ static inline int inet_getid(struct inet_peer *p, int more)
73114 more++;
73115 inet_peer_refcheck(p);
73116 do {
73117- old = atomic_read(&p->ip_id_count);
73118+ old = atomic_read_unchecked(&p->ip_id_count);
73119 new = old + more;
73120 if (!new)
73121 new = 1;
73122- } while (atomic_cmpxchg(&p->ip_id_count, old, new) != old);
73123+ } while (atomic_cmpxchg_unchecked(&p->ip_id_count, old, new) != old);
73124 return new;
73125 }
73126
73127diff --git a/include/net/ip.h b/include/net/ip.h
73128index a68f838..74518ab 100644
73129--- a/include/net/ip.h
73130+++ b/include/net/ip.h
73131@@ -202,7 +202,7 @@ extern struct local_ports {
73132 } sysctl_local_ports;
73133 extern void inet_get_local_port_range(int *low, int *high);
73134
73135-extern unsigned long *sysctl_local_reserved_ports;
73136+extern unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
73137 static inline int inet_is_reserved_local_port(int port)
73138 {
73139 return test_bit(port, sysctl_local_reserved_ports);
73140diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
73141index e49db91..76a81de 100644
73142--- a/include/net/ip_fib.h
73143+++ b/include/net/ip_fib.h
73144@@ -167,7 +167,7 @@ extern __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh);
73145
73146 #define FIB_RES_SADDR(net, res) \
73147 ((FIB_RES_NH(res).nh_saddr_genid == \
73148- atomic_read(&(net)->ipv4.dev_addr_genid)) ? \
73149+ atomic_read_unchecked(&(net)->ipv4.dev_addr_genid)) ? \
73150 FIB_RES_NH(res).nh_saddr : \
73151 fib_info_update_nh_saddr((net), &FIB_RES_NH(res)))
73152 #define FIB_RES_GW(res) (FIB_RES_NH(res).nh_gw)
73153diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
73154index 4c062cc..3562c31 100644
73155--- a/include/net/ip_vs.h
73156+++ b/include/net/ip_vs.h
73157@@ -612,7 +612,7 @@ struct ip_vs_conn {
73158 struct ip_vs_conn *control; /* Master control connection */
73159 atomic_t n_control; /* Number of controlled ones */
73160 struct ip_vs_dest *dest; /* real server */
73161- atomic_t in_pkts; /* incoming packet counter */
73162+ atomic_unchecked_t in_pkts; /* incoming packet counter */
73163
73164 /* packet transmitter for different forwarding methods. If it
73165 mangles the packet, it must return NF_DROP or better NF_STOLEN,
73166@@ -761,7 +761,7 @@ struct ip_vs_dest {
73167 __be16 port; /* port number of the server */
73168 union nf_inet_addr addr; /* IP address of the server */
73169 volatile unsigned int flags; /* dest status flags */
73170- atomic_t conn_flags; /* flags to copy to conn */
73171+ atomic_unchecked_t conn_flags; /* flags to copy to conn */
73172 atomic_t weight; /* server weight */
73173
73174 atomic_t refcnt; /* reference counter */
73175@@ -1013,11 +1013,11 @@ struct netns_ipvs {
73176 /* ip_vs_lblc */
73177 int sysctl_lblc_expiration;
73178 struct ctl_table_header *lblc_ctl_header;
73179- struct ctl_table *lblc_ctl_table;
73180+ ctl_table_no_const *lblc_ctl_table;
73181 /* ip_vs_lblcr */
73182 int sysctl_lblcr_expiration;
73183 struct ctl_table_header *lblcr_ctl_header;
73184- struct ctl_table *lblcr_ctl_table;
73185+ ctl_table_no_const *lblcr_ctl_table;
73186 /* ip_vs_est */
73187 struct list_head est_list; /* estimator list */
73188 spinlock_t est_lock;
73189diff --git a/include/net/irda/ircomm_tty.h b/include/net/irda/ircomm_tty.h
73190index 80ffde3..968b0f4 100644
73191--- a/include/net/irda/ircomm_tty.h
73192+++ b/include/net/irda/ircomm_tty.h
73193@@ -35,6 +35,7 @@
73194 #include <linux/termios.h>
73195 #include <linux/timer.h>
73196 #include <linux/tty.h> /* struct tty_struct */
73197+#include <asm/local.h>
73198
73199 #include <net/irda/irias_object.h>
73200 #include <net/irda/ircomm_core.h>
73201diff --git a/include/net/iucv/af_iucv.h b/include/net/iucv/af_iucv.h
73202index 714cc9a..ea05f3e 100644
73203--- a/include/net/iucv/af_iucv.h
73204+++ b/include/net/iucv/af_iucv.h
73205@@ -149,7 +149,7 @@ struct iucv_skb_cb {
73206 struct iucv_sock_list {
73207 struct hlist_head head;
73208 rwlock_t lock;
73209- atomic_t autobind_name;
73210+ atomic_unchecked_t autobind_name;
73211 };
73212
73213 unsigned int iucv_sock_poll(struct file *file, struct socket *sock,
73214diff --git a/include/net/llc_c_ac.h b/include/net/llc_c_ac.h
73215index df83f69..9b640b8 100644
73216--- a/include/net/llc_c_ac.h
73217+++ b/include/net/llc_c_ac.h
73218@@ -87,7 +87,7 @@
73219 #define LLC_CONN_AC_STOP_SENDACK_TMR 70
73220 #define LLC_CONN_AC_START_SENDACK_TMR_IF_NOT_RUNNING 71
73221
73222-typedef int (*llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
73223+typedef int (* const llc_conn_action_t)(struct sock *sk, struct sk_buff *skb);
73224
73225 extern int llc_conn_ac_clear_remote_busy(struct sock *sk, struct sk_buff *skb);
73226 extern int llc_conn_ac_conn_ind(struct sock *sk, struct sk_buff *skb);
73227diff --git a/include/net/llc_c_ev.h b/include/net/llc_c_ev.h
73228index 6ca3113..f8026dd 100644
73229--- a/include/net/llc_c_ev.h
73230+++ b/include/net/llc_c_ev.h
73231@@ -125,8 +125,8 @@ static __inline__ struct llc_conn_state_ev *llc_conn_ev(struct sk_buff *skb)
73232 return (struct llc_conn_state_ev *)skb->cb;
73233 }
73234
73235-typedef int (*llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
73236-typedef int (*llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
73237+typedef int (* const llc_conn_ev_t)(struct sock *sk, struct sk_buff *skb);
73238+typedef int (* const llc_conn_ev_qfyr_t)(struct sock *sk, struct sk_buff *skb);
73239
73240 extern int llc_conn_ev_conn_req(struct sock *sk, struct sk_buff *skb);
73241 extern int llc_conn_ev_data_req(struct sock *sk, struct sk_buff *skb);
73242diff --git a/include/net/llc_c_st.h b/include/net/llc_c_st.h
73243index 0e79cfb..f46db31 100644
73244--- a/include/net/llc_c_st.h
73245+++ b/include/net/llc_c_st.h
73246@@ -37,7 +37,7 @@ struct llc_conn_state_trans {
73247 u8 next_state;
73248 llc_conn_ev_qfyr_t *ev_qualifiers;
73249 llc_conn_action_t *ev_actions;
73250-};
73251+} __do_const;
73252
73253 struct llc_conn_state {
73254 u8 current_state;
73255diff --git a/include/net/llc_s_ac.h b/include/net/llc_s_ac.h
73256index 37a3bbd..55a4241 100644
73257--- a/include/net/llc_s_ac.h
73258+++ b/include/net/llc_s_ac.h
73259@@ -23,7 +23,7 @@
73260 #define SAP_ACT_TEST_IND 9
73261
73262 /* All action functions must look like this */
73263-typedef int (*llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
73264+typedef int (* const llc_sap_action_t)(struct llc_sap *sap, struct sk_buff *skb);
73265
73266 extern int llc_sap_action_unitdata_ind(struct llc_sap *sap,
73267 struct sk_buff *skb);
73268diff --git a/include/net/llc_s_st.h b/include/net/llc_s_st.h
73269index 567c681..cd73ac0 100644
73270--- a/include/net/llc_s_st.h
73271+++ b/include/net/llc_s_st.h
73272@@ -20,7 +20,7 @@ struct llc_sap_state_trans {
73273 llc_sap_ev_t ev;
73274 u8 next_state;
73275 llc_sap_action_t *ev_actions;
73276-};
73277+} __do_const;
73278
73279 struct llc_sap_state {
73280 u8 curr_state;
73281diff --git a/include/net/mac80211.h b/include/net/mac80211.h
73282index 885898a..cdace34 100644
73283--- a/include/net/mac80211.h
73284+++ b/include/net/mac80211.h
73285@@ -4205,7 +4205,7 @@ struct rate_control_ops {
73286 void (*add_sta_debugfs)(void *priv, void *priv_sta,
73287 struct dentry *dir);
73288 void (*remove_sta_debugfs)(void *priv, void *priv_sta);
73289-};
73290+} __do_const;
73291
73292 static inline int rate_supported(struct ieee80211_sta *sta,
73293 enum ieee80211_band band,
73294diff --git a/include/net/neighbour.h b/include/net/neighbour.h
73295index 7e748ad..5c6229b 100644
73296--- a/include/net/neighbour.h
73297+++ b/include/net/neighbour.h
73298@@ -123,7 +123,7 @@ struct neigh_ops {
73299 void (*error_report)(struct neighbour *, struct sk_buff *);
73300 int (*output)(struct neighbour *, struct sk_buff *);
73301 int (*connected_output)(struct neighbour *, struct sk_buff *);
73302-};
73303+} __do_const;
73304
73305 struct pneigh_entry {
73306 struct pneigh_entry *next;
73307diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
73308index b176978..ea169f4 100644
73309--- a/include/net/net_namespace.h
73310+++ b/include/net/net_namespace.h
73311@@ -117,7 +117,7 @@ struct net {
73312 #endif
73313 struct netns_ipvs *ipvs;
73314 struct sock *diag_nlsk;
73315- atomic_t rt_genid;
73316+ atomic_unchecked_t rt_genid;
73317 };
73318
73319 /*
73320@@ -274,7 +274,11 @@ static inline struct net *read_pnet(struct net * const *pnet)
73321 #define __net_init __init
73322 #define __net_exit __exit_refok
73323 #define __net_initdata __initdata
73324+#ifdef CONSTIFY_PLUGIN
73325 #define __net_initconst __initconst
73326+#else
73327+#define __net_initconst __initdata
73328+#endif
73329 #endif
73330
73331 struct pernet_operations {
73332@@ -284,7 +288,7 @@ struct pernet_operations {
73333 void (*exit_batch)(struct list_head *net_exit_list);
73334 int *id;
73335 size_t size;
73336-};
73337+} __do_const;
73338
73339 /*
73340 * Use these carefully. If you implement a network device and it
73341@@ -332,12 +336,12 @@ static inline void unregister_net_sysctl_table(struct ctl_table_header *header)
73342
73343 static inline int rt_genid(struct net *net)
73344 {
73345- return atomic_read(&net->rt_genid);
73346+ return atomic_read_unchecked(&net->rt_genid);
73347 }
73348
73349 static inline void rt_genid_bump(struct net *net)
73350 {
73351- atomic_inc(&net->rt_genid);
73352+ atomic_inc_unchecked(&net->rt_genid);
73353 }
73354
73355 #endif /* __NET_NET_NAMESPACE_H */
73356diff --git a/include/net/netdma.h b/include/net/netdma.h
73357index 8ba8ce2..99b7fff 100644
73358--- a/include/net/netdma.h
73359+++ b/include/net/netdma.h
73360@@ -24,7 +24,7 @@
73361 #include <linux/dmaengine.h>
73362 #include <linux/skbuff.h>
73363
73364-int dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73365+int __intentional_overflow(3,5) dma_skb_copy_datagram_iovec(struct dma_chan* chan,
73366 struct sk_buff *skb, int offset, struct iovec *to,
73367 size_t len, struct dma_pinned_list *pinned_list);
73368
73369diff --git a/include/net/netlink.h b/include/net/netlink.h
73370index 9690b0f..87aded7 100644
73371--- a/include/net/netlink.h
73372+++ b/include/net/netlink.h
73373@@ -534,7 +534,7 @@ static inline void *nlmsg_get_pos(struct sk_buff *skb)
73374 static inline void nlmsg_trim(struct sk_buff *skb, const void *mark)
73375 {
73376 if (mark)
73377- skb_trim(skb, (unsigned char *) mark - skb->data);
73378+ skb_trim(skb, (const unsigned char *) mark - skb->data);
73379 }
73380
73381 /**
73382diff --git a/include/net/netns/conntrack.h b/include/net/netns/conntrack.h
73383index c9c0c53..53f24c3 100644
73384--- a/include/net/netns/conntrack.h
73385+++ b/include/net/netns/conntrack.h
73386@@ -12,10 +12,10 @@ struct nf_conntrack_ecache;
73387 struct nf_proto_net {
73388 #ifdef CONFIG_SYSCTL
73389 struct ctl_table_header *ctl_table_header;
73390- struct ctl_table *ctl_table;
73391+ ctl_table_no_const *ctl_table;
73392 #ifdef CONFIG_NF_CONNTRACK_PROC_COMPAT
73393 struct ctl_table_header *ctl_compat_header;
73394- struct ctl_table *ctl_compat_table;
73395+ ctl_table_no_const *ctl_compat_table;
73396 #endif
73397 #endif
73398 unsigned int users;
73399@@ -58,7 +58,7 @@ struct nf_ip_net {
73400 struct nf_icmp_net icmpv6;
73401 #if defined(CONFIG_SYSCTL) && defined(CONFIG_NF_CONNTRACK_PROC_COMPAT)
73402 struct ctl_table_header *ctl_table_header;
73403- struct ctl_table *ctl_table;
73404+ ctl_table_no_const *ctl_table;
73405 #endif
73406 };
73407
73408diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
73409index 2ba9de8..47bd6c7 100644
73410--- a/include/net/netns/ipv4.h
73411+++ b/include/net/netns/ipv4.h
73412@@ -67,7 +67,7 @@ struct netns_ipv4 {
73413 kgid_t sysctl_ping_group_range[2];
73414 long sysctl_tcp_mem[3];
73415
73416- atomic_t dev_addr_genid;
73417+ atomic_unchecked_t dev_addr_genid;
73418
73419 #ifdef CONFIG_IP_MROUTE
73420 #ifndef CONFIG_IP_MROUTE_MULTIPLE_TABLES
73421diff --git a/include/net/netns/ipv6.h b/include/net/netns/ipv6.h
73422index 005e2c2..023d340 100644
73423--- a/include/net/netns/ipv6.h
73424+++ b/include/net/netns/ipv6.h
73425@@ -71,7 +71,7 @@ struct netns_ipv6 {
73426 struct fib_rules_ops *mr6_rules_ops;
73427 #endif
73428 #endif
73429- atomic_t dev_addr_genid;
73430+ atomic_unchecked_t dev_addr_genid;
73431 };
73432
73433 #if IS_ENABLED(CONFIG_NF_DEFRAG_IPV6)
73434diff --git a/include/net/protocol.h b/include/net/protocol.h
73435index 047c047..b9dad15 100644
73436--- a/include/net/protocol.h
73437+++ b/include/net/protocol.h
73438@@ -44,7 +44,7 @@ struct net_protocol {
73439 void (*err_handler)(struct sk_buff *skb, u32 info);
73440 unsigned int no_policy:1,
73441 netns_ok:1;
73442-};
73443+} __do_const;
73444
73445 #if IS_ENABLED(CONFIG_IPV6)
73446 struct inet6_protocol {
73447@@ -57,7 +57,7 @@ struct inet6_protocol {
73448 u8 type, u8 code, int offset,
73449 __be32 info);
73450 unsigned int flags; /* INET6_PROTO_xxx */
73451-};
73452+} __do_const;
73453
73454 #define INET6_PROTO_NOPOLICY 0x1
73455 #define INET6_PROTO_FINAL 0x2
73456diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
73457index 7026648..584cc8c 100644
73458--- a/include/net/rtnetlink.h
73459+++ b/include/net/rtnetlink.h
73460@@ -81,7 +81,7 @@ struct rtnl_link_ops {
73461 const struct net_device *dev);
73462 unsigned int (*get_num_tx_queues)(void);
73463 unsigned int (*get_num_rx_queues)(void);
73464-};
73465+} __do_const;
73466
73467 extern int __rtnl_link_register(struct rtnl_link_ops *ops);
73468 extern void __rtnl_link_unregister(struct rtnl_link_ops *ops);
73469diff --git a/include/net/sctp/sctp.h b/include/net/sctp/sctp.h
73470index cd89510..d67810f 100644
73471--- a/include/net/sctp/sctp.h
73472+++ b/include/net/sctp/sctp.h
73473@@ -330,9 +330,9 @@ do { \
73474
73475 #else /* SCTP_DEBUG */
73476
73477-#define SCTP_DEBUG_PRINTK(whatever...)
73478-#define SCTP_DEBUG_PRINTK_CONT(fmt, args...)
73479-#define SCTP_DEBUG_PRINTK_IPADDR(whatever...)
73480+#define SCTP_DEBUG_PRINTK(whatever...) do {} while (0)
73481+#define SCTP_DEBUG_PRINTK_CONT(fmt, args...) do {} while (0)
73482+#define SCTP_DEBUG_PRINTK_IPADDR(whatever...) do {} while (0)
73483 #define SCTP_ENABLE_DEBUG
73484 #define SCTP_DISABLE_DEBUG
73485 #define SCTP_ASSERT(expr, str, func)
73486diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
73487index 2a82d13..62a31c2 100644
73488--- a/include/net/sctp/sm.h
73489+++ b/include/net/sctp/sm.h
73490@@ -87,7 +87,7 @@ typedef void (sctp_timer_event_t) (unsigned long);
73491 typedef struct {
73492 sctp_state_fn_t *fn;
73493 const char *name;
73494-} sctp_sm_table_entry_t;
73495+} __do_const sctp_sm_table_entry_t;
73496
73497 /* A naming convention of "sctp_sf_xxx" applies to all the state functions
73498 * currently in use.
73499@@ -299,7 +299,7 @@ __u32 sctp_generate_tag(const struct sctp_endpoint *);
73500 __u32 sctp_generate_tsn(const struct sctp_endpoint *);
73501
73502 /* Extern declarations for major data structures. */
73503-extern sctp_timer_event_t *sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73504+extern sctp_timer_event_t * const sctp_timer_events[SCTP_NUM_TIMEOUT_TYPES];
73505
73506
73507 /* Get the size of a DATA chunk payload. */
73508diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
73509index 1bd4c41..9250b5b 100644
73510--- a/include/net/sctp/structs.h
73511+++ b/include/net/sctp/structs.h
73512@@ -516,7 +516,7 @@ struct sctp_pf {
73513 struct sctp_association *asoc);
73514 void (*addr_v4map) (struct sctp_sock *, union sctp_addr *);
73515 struct sctp_af *af;
73516-};
73517+} __do_const;
73518
73519
73520 /* Structure to track chunk fragments that have been acked, but peer
73521diff --git a/include/net/sock.h b/include/net/sock.h
73522index 66772cf..25bc45b 100644
73523--- a/include/net/sock.h
73524+++ b/include/net/sock.h
73525@@ -325,7 +325,7 @@ struct sock {
73526 #ifdef CONFIG_RPS
73527 __u32 sk_rxhash;
73528 #endif
73529- atomic_t sk_drops;
73530+ atomic_unchecked_t sk_drops;
73531 int sk_rcvbuf;
73532
73533 struct sk_filter __rcu *sk_filter;
73534@@ -1797,7 +1797,7 @@ static inline void sk_nocaps_add(struct sock *sk, netdev_features_t flags)
73535 }
73536
73537 static inline int skb_do_copy_data_nocache(struct sock *sk, struct sk_buff *skb,
73538- char __user *from, char *to,
73539+ char __user *from, unsigned char *to,
73540 int copy, int offset)
73541 {
73542 if (skb->ip_summed == CHECKSUM_NONE) {
73543@@ -2056,7 +2056,7 @@ static inline void sk_stream_moderate_sndbuf(struct sock *sk)
73544 }
73545 }
73546
73547-struct sk_buff *sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73548+struct sk_buff * __intentional_overflow(0) sk_stream_alloc_skb(struct sock *sk, int size, gfp_t gfp);
73549
73550 /**
73551 * sk_page_frag - return an appropriate page_frag
73552diff --git a/include/net/tcp.h b/include/net/tcp.h
73553index 5bba80f..8520a82 100644
73554--- a/include/net/tcp.h
73555+++ b/include/net/tcp.h
73556@@ -524,7 +524,7 @@ extern void tcp_retransmit_timer(struct sock *sk);
73557 extern void tcp_xmit_retransmit_queue(struct sock *);
73558 extern void tcp_simple_retransmit(struct sock *);
73559 extern int tcp_trim_head(struct sock *, struct sk_buff *, u32);
73560-extern int tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73561+extern int __intentional_overflow(3) tcp_fragment(struct sock *, struct sk_buff *, u32, unsigned int);
73562
73563 extern void tcp_send_probe0(struct sock *);
73564 extern void tcp_send_partial(struct sock *);
73565@@ -697,8 +697,8 @@ struct tcp_skb_cb {
73566 struct inet6_skb_parm h6;
73567 #endif
73568 } header; /* For incoming frames */
73569- __u32 seq; /* Starting sequence number */
73570- __u32 end_seq; /* SEQ + FIN + SYN + datalen */
73571+ __u32 seq __intentional_overflow(0); /* Starting sequence number */
73572+ __u32 end_seq __intentional_overflow(0); /* SEQ + FIN + SYN + datalen */
73573 __u32 when; /* used to compute rtt's */
73574 __u8 tcp_flags; /* TCP header flags. (tcp[13]) */
73575
73576@@ -712,7 +712,7 @@ struct tcp_skb_cb {
73577
73578 __u8 ip_dsfield; /* IPv4 tos or IPv6 dsfield */
73579 /* 1 byte hole */
73580- __u32 ack_seq; /* Sequence number ACK'd */
73581+ __u32 ack_seq __intentional_overflow(0); /* Sequence number ACK'd */
73582 };
73583
73584 #define TCP_SKB_CB(__skb) ((struct tcp_skb_cb *)&((__skb)->cb[0]))
73585diff --git a/include/net/udp.h b/include/net/udp.h
73586index 065f379..ad99eed 100644
73587--- a/include/net/udp.h
73588+++ b/include/net/udp.h
73589@@ -181,6 +181,7 @@ extern int udp_get_port(struct sock *sk, unsigned short snum,
73590 extern void udp_err(struct sk_buff *, u32);
73591 extern int udp_sendmsg(struct kiocb *iocb, struct sock *sk,
73592 struct msghdr *msg, size_t len);
73593+extern int udp_push_pending_frames(struct sock *sk);
73594 extern void udp_flush_pending_frames(struct sock *sk);
73595 extern int udp_rcv(struct sk_buff *skb);
73596 extern int udp_ioctl(struct sock *sk, int cmd, unsigned long arg);
73597diff --git a/include/net/xfrm.h b/include/net/xfrm.h
73598index 94ce082..62b278d 100644
73599--- a/include/net/xfrm.h
73600+++ b/include/net/xfrm.h
73601@@ -305,7 +305,7 @@ struct xfrm_policy_afinfo {
73602 struct net_device *dev,
73603 const struct flowi *fl);
73604 struct dst_entry *(*blackhole_route)(struct net *net, struct dst_entry *orig);
73605-};
73606+} __do_const;
73607
73608 extern int xfrm_policy_register_afinfo(struct xfrm_policy_afinfo *afinfo);
73609 extern int xfrm_policy_unregister_afinfo(struct xfrm_policy_afinfo *afinfo);
73610@@ -341,7 +341,7 @@ struct xfrm_state_afinfo {
73611 struct sk_buff *skb);
73612 int (*transport_finish)(struct sk_buff *skb,
73613 int async);
73614-};
73615+} __do_const;
73616
73617 extern int xfrm_state_register_afinfo(struct xfrm_state_afinfo *afinfo);
73618 extern int xfrm_state_unregister_afinfo(struct xfrm_state_afinfo *afinfo);
73619@@ -424,7 +424,7 @@ struct xfrm_mode {
73620 struct module *owner;
73621 unsigned int encap;
73622 int flags;
73623-};
73624+} __do_const;
73625
73626 /* Flags for xfrm_mode. */
73627 enum {
73628@@ -521,7 +521,7 @@ struct xfrm_policy {
73629 struct timer_list timer;
73630
73631 struct flow_cache_object flo;
73632- atomic_t genid;
73633+ atomic_unchecked_t genid;
73634 u32 priority;
73635 u32 index;
73636 struct xfrm_mark mark;
73637diff --git a/include/rdma/iw_cm.h b/include/rdma/iw_cm.h
73638index 1a046b1..ee0bef0 100644
73639--- a/include/rdma/iw_cm.h
73640+++ b/include/rdma/iw_cm.h
73641@@ -122,7 +122,7 @@ struct iw_cm_verbs {
73642 int backlog);
73643
73644 int (*destroy_listen)(struct iw_cm_id *cm_id);
73645-};
73646+} __no_const;
73647
73648 /**
73649 * iw_create_cm_id - Create an IW CM identifier.
73650diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
73651index e1379b4..67eafbe 100644
73652--- a/include/scsi/libfc.h
73653+++ b/include/scsi/libfc.h
73654@@ -762,6 +762,7 @@ struct libfc_function_template {
73655 */
73656 void (*disc_stop_final) (struct fc_lport *);
73657 };
73658+typedef struct libfc_function_template __no_const libfc_function_template_no_const;
73659
73660 /**
73661 * struct fc_disc - Discovery context
73662@@ -866,7 +867,7 @@ struct fc_lport {
73663 struct fc_vport *vport;
73664
73665 /* Operational Information */
73666- struct libfc_function_template tt;
73667+ libfc_function_template_no_const tt;
73668 u8 link_up;
73669 u8 qfull;
73670 enum fc_lport_state state;
73671diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
73672index cc64587..608f523 100644
73673--- a/include/scsi/scsi_device.h
73674+++ b/include/scsi/scsi_device.h
73675@@ -171,9 +171,9 @@ struct scsi_device {
73676 unsigned int max_device_blocked; /* what device_blocked counts down from */
73677 #define SCSI_DEFAULT_DEVICE_BLOCKED 3
73678
73679- atomic_t iorequest_cnt;
73680- atomic_t iodone_cnt;
73681- atomic_t ioerr_cnt;
73682+ atomic_unchecked_t iorequest_cnt;
73683+ atomic_unchecked_t iodone_cnt;
73684+ atomic_unchecked_t ioerr_cnt;
73685
73686 struct device sdev_gendev,
73687 sdev_dev;
73688diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
73689index b797e8f..8e2c3aa 100644
73690--- a/include/scsi/scsi_transport_fc.h
73691+++ b/include/scsi/scsi_transport_fc.h
73692@@ -751,7 +751,8 @@ struct fc_function_template {
73693 unsigned long show_host_system_hostname:1;
73694
73695 unsigned long disable_target_scan:1;
73696-};
73697+} __do_const;
73698+typedef struct fc_function_template __no_const fc_function_template_no_const;
73699
73700
73701 /**
73702diff --git a/include/sound/compress_driver.h b/include/sound/compress_driver.h
73703index 9031a26..750d592 100644
73704--- a/include/sound/compress_driver.h
73705+++ b/include/sound/compress_driver.h
73706@@ -128,7 +128,7 @@ struct snd_compr_ops {
73707 struct snd_compr_caps *caps);
73708 int (*get_codec_caps) (struct snd_compr_stream *stream,
73709 struct snd_compr_codec_caps *codec);
73710-};
73711+} __no_const;
73712
73713 /**
73714 * struct snd_compr: Compressed device
73715diff --git a/include/sound/soc.h b/include/sound/soc.h
73716index 85c1522..f44bad1 100644
73717--- a/include/sound/soc.h
73718+++ b/include/sound/soc.h
73719@@ -781,7 +781,7 @@ struct snd_soc_codec_driver {
73720 /* probe ordering - for components with runtime dependencies */
73721 int probe_order;
73722 int remove_order;
73723-};
73724+} __do_const;
73725
73726 /* SoC platform interface */
73727 struct snd_soc_platform_driver {
73728@@ -827,7 +827,7 @@ struct snd_soc_platform_driver {
73729 unsigned int (*read)(struct snd_soc_platform *, unsigned int);
73730 int (*write)(struct snd_soc_platform *, unsigned int, unsigned int);
73731 int (*bespoke_trigger)(struct snd_pcm_substream *, int);
73732-};
73733+} __do_const;
73734
73735 struct snd_soc_platform {
73736 const char *name;
73737diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
73738index 4ea4f98..a63629b 100644
73739--- a/include/target/target_core_base.h
73740+++ b/include/target/target_core_base.h
73741@@ -653,7 +653,7 @@ struct se_device {
73742 spinlock_t stats_lock;
73743 /* Active commands on this virtual SE device */
73744 atomic_t simple_cmds;
73745- atomic_t dev_ordered_id;
73746+ atomic_unchecked_t dev_ordered_id;
73747 atomic_t dev_ordered_sync;
73748 atomic_t dev_qf_count;
73749 int export_count;
73750diff --git a/include/trace/events/fs.h b/include/trace/events/fs.h
73751new file mode 100644
73752index 0000000..fb634b7
73753--- /dev/null
73754+++ b/include/trace/events/fs.h
73755@@ -0,0 +1,53 @@
73756+#undef TRACE_SYSTEM
73757+#define TRACE_SYSTEM fs
73758+
73759+#if !defined(_TRACE_FS_H) || defined(TRACE_HEADER_MULTI_READ)
73760+#define _TRACE_FS_H
73761+
73762+#include <linux/fs.h>
73763+#include <linux/tracepoint.h>
73764+
73765+TRACE_EVENT(do_sys_open,
73766+
73767+ TP_PROTO(const char *filename, int flags, int mode),
73768+
73769+ TP_ARGS(filename, flags, mode),
73770+
73771+ TP_STRUCT__entry(
73772+ __string( filename, filename )
73773+ __field( int, flags )
73774+ __field( int, mode )
73775+ ),
73776+
73777+ TP_fast_assign(
73778+ __assign_str(filename, filename);
73779+ __entry->flags = flags;
73780+ __entry->mode = mode;
73781+ ),
73782+
73783+ TP_printk("\"%s\" %x %o",
73784+ __get_str(filename), __entry->flags, __entry->mode)
73785+);
73786+
73787+TRACE_EVENT(open_exec,
73788+
73789+ TP_PROTO(const char *filename),
73790+
73791+ TP_ARGS(filename),
73792+
73793+ TP_STRUCT__entry(
73794+ __string( filename, filename )
73795+ ),
73796+
73797+ TP_fast_assign(
73798+ __assign_str(filename, filename);
73799+ ),
73800+
73801+ TP_printk("\"%s\"",
73802+ __get_str(filename))
73803+);
73804+
73805+#endif /* _TRACE_FS_H */
73806+
73807+/* This part must be outside protection */
73808+#include <trace/define_trace.h>
73809diff --git a/include/trace/events/irq.h b/include/trace/events/irq.h
73810index 1c09820..7f5ec79 100644
73811--- a/include/trace/events/irq.h
73812+++ b/include/trace/events/irq.h
73813@@ -36,7 +36,7 @@ struct softirq_action;
73814 */
73815 TRACE_EVENT(irq_handler_entry,
73816
73817- TP_PROTO(int irq, struct irqaction *action),
73818+ TP_PROTO(int irq, const struct irqaction *action),
73819
73820 TP_ARGS(irq, action),
73821
73822@@ -66,7 +66,7 @@ TRACE_EVENT(irq_handler_entry,
73823 */
73824 TRACE_EVENT(irq_handler_exit,
73825
73826- TP_PROTO(int irq, struct irqaction *action, int ret),
73827+ TP_PROTO(int irq, const struct irqaction *action, int ret),
73828
73829 TP_ARGS(irq, action, ret),
73830
73831diff --git a/include/uapi/linux/a.out.h b/include/uapi/linux/a.out.h
73832index 7caf44c..23c6f27 100644
73833--- a/include/uapi/linux/a.out.h
73834+++ b/include/uapi/linux/a.out.h
73835@@ -39,6 +39,14 @@ enum machine_type {
73836 M_MIPS2 = 152 /* MIPS R6000/R4000 binary */
73837 };
73838
73839+/* Constants for the N_FLAGS field */
73840+#define F_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73841+#define F_PAX_EMUTRAMP 2 /* Emulate trampolines */
73842+#define F_PAX_MPROTECT 4 /* Restrict mprotect() */
73843+#define F_PAX_RANDMMAP 8 /* Randomize mmap() base */
73844+/*#define F_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73845+#define F_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73846+
73847 #if !defined (N_MAGIC)
73848 #define N_MAGIC(exec) ((exec).a_info & 0xffff)
73849 #endif
73850diff --git a/include/uapi/linux/byteorder/little_endian.h b/include/uapi/linux/byteorder/little_endian.h
73851index d876736..ccce5c0 100644
73852--- a/include/uapi/linux/byteorder/little_endian.h
73853+++ b/include/uapi/linux/byteorder/little_endian.h
73854@@ -42,51 +42,51 @@
73855
73856 static inline __le64 __cpu_to_le64p(const __u64 *p)
73857 {
73858- return (__force __le64)*p;
73859+ return (__force const __le64)*p;
73860 }
73861-static inline __u64 __le64_to_cpup(const __le64 *p)
73862+static inline __u64 __intentional_overflow(-1) __le64_to_cpup(const __le64 *p)
73863 {
73864- return (__force __u64)*p;
73865+ return (__force const __u64)*p;
73866 }
73867 static inline __le32 __cpu_to_le32p(const __u32 *p)
73868 {
73869- return (__force __le32)*p;
73870+ return (__force const __le32)*p;
73871 }
73872 static inline __u32 __le32_to_cpup(const __le32 *p)
73873 {
73874- return (__force __u32)*p;
73875+ return (__force const __u32)*p;
73876 }
73877 static inline __le16 __cpu_to_le16p(const __u16 *p)
73878 {
73879- return (__force __le16)*p;
73880+ return (__force const __le16)*p;
73881 }
73882 static inline __u16 __le16_to_cpup(const __le16 *p)
73883 {
73884- return (__force __u16)*p;
73885+ return (__force const __u16)*p;
73886 }
73887 static inline __be64 __cpu_to_be64p(const __u64 *p)
73888 {
73889- return (__force __be64)__swab64p(p);
73890+ return (__force const __be64)__swab64p(p);
73891 }
73892 static inline __u64 __be64_to_cpup(const __be64 *p)
73893 {
73894- return __swab64p((__u64 *)p);
73895+ return __swab64p((const __u64 *)p);
73896 }
73897 static inline __be32 __cpu_to_be32p(const __u32 *p)
73898 {
73899- return (__force __be32)__swab32p(p);
73900+ return (__force const __be32)__swab32p(p);
73901 }
73902-static inline __u32 __be32_to_cpup(const __be32 *p)
73903+static inline __u32 __intentional_overflow(-1) __be32_to_cpup(const __be32 *p)
73904 {
73905- return __swab32p((__u32 *)p);
73906+ return __swab32p((const __u32 *)p);
73907 }
73908 static inline __be16 __cpu_to_be16p(const __u16 *p)
73909 {
73910- return (__force __be16)__swab16p(p);
73911+ return (__force const __be16)__swab16p(p);
73912 }
73913 static inline __u16 __be16_to_cpup(const __be16 *p)
73914 {
73915- return __swab16p((__u16 *)p);
73916+ return __swab16p((const __u16 *)p);
73917 }
73918 #define __cpu_to_le64s(x) do { (void)(x); } while (0)
73919 #define __le64_to_cpus(x) do { (void)(x); } while (0)
73920diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
73921index ef6103b..d4e65dd 100644
73922--- a/include/uapi/linux/elf.h
73923+++ b/include/uapi/linux/elf.h
73924@@ -37,6 +37,17 @@ typedef __s64 Elf64_Sxword;
73925 #define PT_GNU_EH_FRAME 0x6474e550
73926
73927 #define PT_GNU_STACK (PT_LOOS + 0x474e551)
73928+#define PT_GNU_RELRO (PT_LOOS + 0x474e552)
73929+
73930+#define PT_PAX_FLAGS (PT_LOOS + 0x5041580)
73931+
73932+/* Constants for the e_flags field */
73933+#define EF_PAX_PAGEEXEC 1 /* Paging based non-executable pages */
73934+#define EF_PAX_EMUTRAMP 2 /* Emulate trampolines */
73935+#define EF_PAX_MPROTECT 4 /* Restrict mprotect() */
73936+#define EF_PAX_RANDMMAP 8 /* Randomize mmap() base */
73937+/*#define EF_PAX_RANDEXEC 16*/ /* Randomize ET_EXEC base */
73938+#define EF_PAX_SEGMEXEC 32 /* Segmentation based non-executable pages */
73939
73940 /*
73941 * Extended Numbering
73942@@ -94,6 +105,8 @@ typedef __s64 Elf64_Sxword;
73943 #define DT_DEBUG 21
73944 #define DT_TEXTREL 22
73945 #define DT_JMPREL 23
73946+#define DT_FLAGS 30
73947+ #define DF_TEXTREL 0x00000004
73948 #define DT_ENCODING 32
73949 #define OLD_DT_LOOS 0x60000000
73950 #define DT_LOOS 0x6000000d
73951@@ -240,6 +253,19 @@ typedef struct elf64_hdr {
73952 #define PF_W 0x2
73953 #define PF_X 0x1
73954
73955+#define PF_PAGEEXEC (1U << 4) /* Enable PAGEEXEC */
73956+#define PF_NOPAGEEXEC (1U << 5) /* Disable PAGEEXEC */
73957+#define PF_SEGMEXEC (1U << 6) /* Enable SEGMEXEC */
73958+#define PF_NOSEGMEXEC (1U << 7) /* Disable SEGMEXEC */
73959+#define PF_MPROTECT (1U << 8) /* Enable MPROTECT */
73960+#define PF_NOMPROTECT (1U << 9) /* Disable MPROTECT */
73961+/*#define PF_RANDEXEC (1U << 10)*/ /* Enable RANDEXEC */
73962+/*#define PF_NORANDEXEC (1U << 11)*/ /* Disable RANDEXEC */
73963+#define PF_EMUTRAMP (1U << 12) /* Enable EMUTRAMP */
73964+#define PF_NOEMUTRAMP (1U << 13) /* Disable EMUTRAMP */
73965+#define PF_RANDMMAP (1U << 14) /* Enable RANDMMAP */
73966+#define PF_NORANDMMAP (1U << 15) /* Disable RANDMMAP */
73967+
73968 typedef struct elf32_phdr{
73969 Elf32_Word p_type;
73970 Elf32_Off p_offset;
73971@@ -332,6 +358,8 @@ typedef struct elf64_shdr {
73972 #define EI_OSABI 7
73973 #define EI_PAD 8
73974
73975+#define EI_PAX 14
73976+
73977 #define ELFMAG0 0x7f /* EI_MAG */
73978 #define ELFMAG1 'E'
73979 #define ELFMAG2 'L'
73980diff --git a/include/uapi/linux/personality.h b/include/uapi/linux/personality.h
73981index aa169c4..6a2771d 100644
73982--- a/include/uapi/linux/personality.h
73983+++ b/include/uapi/linux/personality.h
73984@@ -30,6 +30,7 @@ enum {
73985 #define PER_CLEAR_ON_SETID (READ_IMPLIES_EXEC | \
73986 ADDR_NO_RANDOMIZE | \
73987 ADDR_COMPAT_LAYOUT | \
73988+ ADDR_LIMIT_3GB | \
73989 MMAP_PAGE_ZERO)
73990
73991 /*
73992diff --git a/include/uapi/linux/screen_info.h b/include/uapi/linux/screen_info.h
73993index 7530e74..e714828 100644
73994--- a/include/uapi/linux/screen_info.h
73995+++ b/include/uapi/linux/screen_info.h
73996@@ -43,7 +43,8 @@ struct screen_info {
73997 __u16 pages; /* 0x32 */
73998 __u16 vesa_attributes; /* 0x34 */
73999 __u32 capabilities; /* 0x36 */
74000- __u8 _reserved[6]; /* 0x3a */
74001+ __u16 vesapm_size; /* 0x3a */
74002+ __u8 _reserved[4]; /* 0x3c */
74003 } __attribute__((packed));
74004
74005 #define VIDEO_TYPE_MDA 0x10 /* Monochrome Text Display */
74006diff --git a/include/uapi/linux/swab.h b/include/uapi/linux/swab.h
74007index 0e011eb..82681b1 100644
74008--- a/include/uapi/linux/swab.h
74009+++ b/include/uapi/linux/swab.h
74010@@ -43,7 +43,7 @@
74011 * ___swab16, ___swab32, ___swab64, ___swahw32, ___swahb32
74012 */
74013
74014-static inline __attribute_const__ __u16 __fswab16(__u16 val)
74015+static inline __intentional_overflow(-1) __attribute_const__ __u16 __fswab16(__u16 val)
74016 {
74017 #ifdef __HAVE_BUILTIN_BSWAP16__
74018 return __builtin_bswap16(val);
74019@@ -54,7 +54,7 @@ static inline __attribute_const__ __u16 __fswab16(__u16 val)
74020 #endif
74021 }
74022
74023-static inline __attribute_const__ __u32 __fswab32(__u32 val)
74024+static inline __intentional_overflow(-1) __attribute_const__ __u32 __fswab32(__u32 val)
74025 {
74026 #ifdef __HAVE_BUILTIN_BSWAP32__
74027 return __builtin_bswap32(val);
74028@@ -65,7 +65,7 @@ static inline __attribute_const__ __u32 __fswab32(__u32 val)
74029 #endif
74030 }
74031
74032-static inline __attribute_const__ __u64 __fswab64(__u64 val)
74033+static inline __intentional_overflow(-1) __attribute_const__ __u64 __fswab64(__u64 val)
74034 {
74035 #ifdef __HAVE_BUILTIN_BSWAP64__
74036 return __builtin_bswap64(val);
74037diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
74038index 6d67213..8dab561 100644
74039--- a/include/uapi/linux/sysctl.h
74040+++ b/include/uapi/linux/sysctl.h
74041@@ -155,7 +155,11 @@ enum
74042 KERN_PANIC_ON_NMI=76, /* int: whether we will panic on an unrecovered */
74043 };
74044
74045-
74046+#ifdef CONFIG_PAX_SOFTMODE
74047+enum {
74048+ PAX_SOFTMODE=1 /* PaX: disable/enable soft mode */
74049+};
74050+#endif
74051
74052 /* CTL_VM names: */
74053 enum
74054diff --git a/include/uapi/linux/xattr.h b/include/uapi/linux/xattr.h
74055index e4629b9..6958086 100644
74056--- a/include/uapi/linux/xattr.h
74057+++ b/include/uapi/linux/xattr.h
74058@@ -63,5 +63,9 @@
74059 #define XATTR_POSIX_ACL_DEFAULT "posix_acl_default"
74060 #define XATTR_NAME_POSIX_ACL_DEFAULT XATTR_SYSTEM_PREFIX XATTR_POSIX_ACL_DEFAULT
74061
74062+/* User namespace */
74063+#define XATTR_PAX_PREFIX XATTR_USER_PREFIX "pax."
74064+#define XATTR_PAX_FLAGS_SUFFIX "flags"
74065+#define XATTR_NAME_PAX_FLAGS XATTR_PAX_PREFIX XATTR_PAX_FLAGS_SUFFIX
74066
74067 #endif /* _UAPI_LINUX_XATTR_H */
74068diff --git a/include/video/udlfb.h b/include/video/udlfb.h
74069index f9466fa..f4e2b81 100644
74070--- a/include/video/udlfb.h
74071+++ b/include/video/udlfb.h
74072@@ -53,10 +53,10 @@ struct dlfb_data {
74073 u32 pseudo_palette[256];
74074 int blank_mode; /*one of FB_BLANK_ */
74075 /* blit-only rendering path metrics, exposed through sysfs */
74076- atomic_t bytes_rendered; /* raw pixel-bytes driver asked to render */
74077- atomic_t bytes_identical; /* saved effort with backbuffer comparison */
74078- atomic_t bytes_sent; /* to usb, after compression including overhead */
74079- atomic_t cpu_kcycles_used; /* transpired during pixel processing */
74080+ atomic_unchecked_t bytes_rendered; /* raw pixel-bytes driver asked to render */
74081+ atomic_unchecked_t bytes_identical; /* saved effort with backbuffer comparison */
74082+ atomic_unchecked_t bytes_sent; /* to usb, after compression including overhead */
74083+ atomic_unchecked_t cpu_kcycles_used; /* transpired during pixel processing */
74084 };
74085
74086 #define NR_USB_REQUEST_I2C_SUB_IO 0x02
74087diff --git a/include/video/uvesafb.h b/include/video/uvesafb.h
74088index 1a91850..28573f8 100644
74089--- a/include/video/uvesafb.h
74090+++ b/include/video/uvesafb.h
74091@@ -122,6 +122,7 @@ struct uvesafb_par {
74092 u8 ypan; /* 0 - nothing, 1 - ypan, 2 - ywrap */
74093 u8 pmi_setpal; /* PMI for palette changes */
74094 u16 *pmi_base; /* protected mode interface location */
74095+ u8 *pmi_code; /* protected mode code location */
74096 void *pmi_start;
74097 void *pmi_pal;
74098 u8 *vbe_state_orig; /*
74099diff --git a/init/Kconfig b/init/Kconfig
74100index 2d9b831..ae4c8ac 100644
74101--- a/init/Kconfig
74102+++ b/init/Kconfig
74103@@ -1029,6 +1029,7 @@ endif # CGROUPS
74104
74105 config CHECKPOINT_RESTORE
74106 bool "Checkpoint/restore support" if EXPERT
74107+ depends on !GRKERNSEC
74108 default n
74109 help
74110 Enables additional kernel features in a sake of checkpoint/restore.
74111@@ -1516,7 +1517,7 @@ config SLUB_DEBUG
74112
74113 config COMPAT_BRK
74114 bool "Disable heap randomization"
74115- default y
74116+ default n
74117 help
74118 Randomizing heap placement makes heap exploits harder, but it
74119 also breaks ancient binaries (including anything libc5 based).
74120@@ -1779,7 +1780,7 @@ config INIT_ALL_POSSIBLE
74121 config STOP_MACHINE
74122 bool
74123 default y
74124- depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU
74125+ depends on (SMP && MODULE_UNLOAD) || HOTPLUG_CPU || GRKERNSEC
74126 help
74127 Need stop_machine() primitive.
74128
74129diff --git a/init/Makefile b/init/Makefile
74130index 7bc47ee..6da2dc7 100644
74131--- a/init/Makefile
74132+++ b/init/Makefile
74133@@ -2,6 +2,9 @@
74134 # Makefile for the linux kernel.
74135 #
74136
74137+ccflags-y := $(GCC_PLUGINS_CFLAGS)
74138+asflags-y := $(GCC_PLUGINS_AFLAGS)
74139+
74140 obj-y := main.o version.o mounts.o
74141 ifneq ($(CONFIG_BLK_DEV_INITRD),y)
74142 obj-y += noinitramfs.o
74143diff --git a/init/do_mounts.c b/init/do_mounts.c
74144index a2b49f2..03a0e17c 100644
74145--- a/init/do_mounts.c
74146+++ b/init/do_mounts.c
74147@@ -355,11 +355,11 @@ static void __init get_fs_names(char *page)
74148 static int __init do_mount_root(char *name, char *fs, int flags, void *data)
74149 {
74150 struct super_block *s;
74151- int err = sys_mount(name, "/root", fs, flags, data);
74152+ int err = sys_mount((char __force_user *)name, (char __force_user *)"/root", (char __force_user *)fs, flags, (void __force_user *)data);
74153 if (err)
74154 return err;
74155
74156- sys_chdir("/root");
74157+ sys_chdir((const char __force_user *)"/root");
74158 s = current->fs->pwd.dentry->d_sb;
74159 ROOT_DEV = s->s_dev;
74160 printk(KERN_INFO
74161@@ -480,18 +480,18 @@ void __init change_floppy(char *fmt, ...)
74162 va_start(args, fmt);
74163 vsprintf(buf, fmt, args);
74164 va_end(args);
74165- fd = sys_open("/dev/root", O_RDWR | O_NDELAY, 0);
74166+ fd = sys_open((char __user *)"/dev/root", O_RDWR | O_NDELAY, 0);
74167 if (fd >= 0) {
74168 sys_ioctl(fd, FDEJECT, 0);
74169 sys_close(fd);
74170 }
74171 printk(KERN_NOTICE "VFS: Insert %s and press ENTER\n", buf);
74172- fd = sys_open("/dev/console", O_RDWR, 0);
74173+ fd = sys_open((__force const char __user *)"/dev/console", O_RDWR, 0);
74174 if (fd >= 0) {
74175 sys_ioctl(fd, TCGETS, (long)&termios);
74176 termios.c_lflag &= ~ICANON;
74177 sys_ioctl(fd, TCSETSF, (long)&termios);
74178- sys_read(fd, &c, 1);
74179+ sys_read(fd, (char __user *)&c, 1);
74180 termios.c_lflag |= ICANON;
74181 sys_ioctl(fd, TCSETSF, (long)&termios);
74182 sys_close(fd);
74183@@ -585,6 +585,6 @@ void __init prepare_namespace(void)
74184 mount_root();
74185 out:
74186 devtmpfs_mount("dev");
74187- sys_mount(".", "/", NULL, MS_MOVE, NULL);
74188- sys_chroot(".");
74189+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
74190+ sys_chroot((const char __force_user *)".");
74191 }
74192diff --git a/init/do_mounts.h b/init/do_mounts.h
74193index f5b978a..69dbfe8 100644
74194--- a/init/do_mounts.h
74195+++ b/init/do_mounts.h
74196@@ -15,15 +15,15 @@ extern int root_mountflags;
74197
74198 static inline int create_dev(char *name, dev_t dev)
74199 {
74200- sys_unlink(name);
74201- return sys_mknod(name, S_IFBLK|0600, new_encode_dev(dev));
74202+ sys_unlink((char __force_user *)name);
74203+ return sys_mknod((char __force_user *)name, S_IFBLK|0600, new_encode_dev(dev));
74204 }
74205
74206 #if BITS_PER_LONG == 32
74207 static inline u32 bstat(char *name)
74208 {
74209 struct stat64 stat;
74210- if (sys_stat64(name, &stat) != 0)
74211+ if (sys_stat64((char __force_user *)name, (struct stat64 __force_user *)&stat) != 0)
74212 return 0;
74213 if (!S_ISBLK(stat.st_mode))
74214 return 0;
74215@@ -35,7 +35,7 @@ static inline u32 bstat(char *name)
74216 static inline u32 bstat(char *name)
74217 {
74218 struct stat stat;
74219- if (sys_newstat(name, &stat) != 0)
74220+ if (sys_newstat((const char __force_user *)name, (struct stat __force_user *)&stat) != 0)
74221 return 0;
74222 if (!S_ISBLK(stat.st_mode))
74223 return 0;
74224diff --git a/init/do_mounts_initrd.c b/init/do_mounts_initrd.c
74225index 3e0878e..8a9d7a0 100644
74226--- a/init/do_mounts_initrd.c
74227+++ b/init/do_mounts_initrd.c
74228@@ -37,13 +37,13 @@ static int init_linuxrc(struct subprocess_info *info, struct cred *new)
74229 {
74230 sys_unshare(CLONE_FS | CLONE_FILES);
74231 /* stdin/stdout/stderr for /linuxrc */
74232- sys_open("/dev/console", O_RDWR, 0);
74233+ sys_open((const char __force_user *)"/dev/console", O_RDWR, 0);
74234 sys_dup(0);
74235 sys_dup(0);
74236 /* move initrd over / and chdir/chroot in initrd root */
74237- sys_chdir("/root");
74238- sys_mount(".", "/", NULL, MS_MOVE, NULL);
74239- sys_chroot(".");
74240+ sys_chdir((const char __force_user *)"/root");
74241+ sys_mount((char __force_user *)".", (char __force_user *)"/", NULL, MS_MOVE, NULL);
74242+ sys_chroot((const char __force_user *)".");
74243 sys_setsid();
74244 return 0;
74245 }
74246@@ -59,8 +59,8 @@ static void __init handle_initrd(void)
74247 create_dev("/dev/root.old", Root_RAM0);
74248 /* mount initrd on rootfs' /root */
74249 mount_block_root("/dev/root.old", root_mountflags & ~MS_RDONLY);
74250- sys_mkdir("/old", 0700);
74251- sys_chdir("/old");
74252+ sys_mkdir((const char __force_user *)"/old", 0700);
74253+ sys_chdir((const char __force_user *)"/old");
74254
74255 /* try loading default modules from initrd */
74256 load_default_modules();
74257@@ -80,31 +80,31 @@ static void __init handle_initrd(void)
74258 current->flags &= ~PF_FREEZER_SKIP;
74259
74260 /* move initrd to rootfs' /old */
74261- sys_mount("..", ".", NULL, MS_MOVE, NULL);
74262+ sys_mount((char __force_user *)"..", (char __force_user *)".", NULL, MS_MOVE, NULL);
74263 /* switch root and cwd back to / of rootfs */
74264- sys_chroot("..");
74265+ sys_chroot((const char __force_user *)"..");
74266
74267 if (new_decode_dev(real_root_dev) == Root_RAM0) {
74268- sys_chdir("/old");
74269+ sys_chdir((const char __force_user *)"/old");
74270 return;
74271 }
74272
74273- sys_chdir("/");
74274+ sys_chdir((const char __force_user *)"/");
74275 ROOT_DEV = new_decode_dev(real_root_dev);
74276 mount_root();
74277
74278 printk(KERN_NOTICE "Trying to move old root to /initrd ... ");
74279- error = sys_mount("/old", "/root/initrd", NULL, MS_MOVE, NULL);
74280+ error = sys_mount((char __force_user *)"/old", (char __force_user *)"/root/initrd", NULL, MS_MOVE, NULL);
74281 if (!error)
74282 printk("okay\n");
74283 else {
74284- int fd = sys_open("/dev/root.old", O_RDWR, 0);
74285+ int fd = sys_open((const char __force_user *)"/dev/root.old", O_RDWR, 0);
74286 if (error == -ENOENT)
74287 printk("/initrd does not exist. Ignored.\n");
74288 else
74289 printk("failed\n");
74290 printk(KERN_NOTICE "Unmounting old root\n");
74291- sys_umount("/old", MNT_DETACH);
74292+ sys_umount((char __force_user *)"/old", MNT_DETACH);
74293 printk(KERN_NOTICE "Trying to free ramdisk memory ... ");
74294 if (fd < 0) {
74295 error = fd;
74296@@ -127,11 +127,11 @@ int __init initrd_load(void)
74297 * mounted in the normal path.
74298 */
74299 if (rd_load_image("/initrd.image") && ROOT_DEV != Root_RAM0) {
74300- sys_unlink("/initrd.image");
74301+ sys_unlink((const char __force_user *)"/initrd.image");
74302 handle_initrd();
74303 return 1;
74304 }
74305 }
74306- sys_unlink("/initrd.image");
74307+ sys_unlink((const char __force_user *)"/initrd.image");
74308 return 0;
74309 }
74310diff --git a/init/do_mounts_md.c b/init/do_mounts_md.c
74311index 8cb6db5..d729f50 100644
74312--- a/init/do_mounts_md.c
74313+++ b/init/do_mounts_md.c
74314@@ -180,7 +180,7 @@ static void __init md_setup_drive(void)
74315 partitioned ? "_d" : "", minor,
74316 md_setup_args[ent].device_names);
74317
74318- fd = sys_open(name, 0, 0);
74319+ fd = sys_open((char __force_user *)name, 0, 0);
74320 if (fd < 0) {
74321 printk(KERN_ERR "md: open failed - cannot start "
74322 "array %s\n", name);
74323@@ -243,7 +243,7 @@ static void __init md_setup_drive(void)
74324 * array without it
74325 */
74326 sys_close(fd);
74327- fd = sys_open(name, 0, 0);
74328+ fd = sys_open((char __force_user *)name, 0, 0);
74329 sys_ioctl(fd, BLKRRPART, 0);
74330 }
74331 sys_close(fd);
74332@@ -293,7 +293,7 @@ static void __init autodetect_raid(void)
74333
74334 wait_for_device_probe();
74335
74336- fd = sys_open("/dev/md0", 0, 0);
74337+ fd = sys_open((const char __force_user *) "/dev/md0", 0, 0);
74338 if (fd >= 0) {
74339 sys_ioctl(fd, RAID_AUTORUN, raid_autopart);
74340 sys_close(fd);
74341diff --git a/init/init_task.c b/init/init_task.c
74342index ba0a7f36..2bcf1d5 100644
74343--- a/init/init_task.c
74344+++ b/init/init_task.c
74345@@ -22,5 +22,9 @@ EXPORT_SYMBOL(init_task);
74346 * Initial thread structure. Alignment of this is handled by a special
74347 * linker map entry.
74348 */
74349+#ifdef CONFIG_X86
74350+union thread_union init_thread_union __init_task_data;
74351+#else
74352 union thread_union init_thread_union __init_task_data =
74353 { INIT_THREAD_INFO(init_task) };
74354+#endif
74355diff --git a/init/initramfs.c b/init/initramfs.c
74356index a67ef9d..2d17ed9 100644
74357--- a/init/initramfs.c
74358+++ b/init/initramfs.c
74359@@ -84,7 +84,7 @@ static void __init free_hash(void)
74360 }
74361 }
74362
74363-static long __init do_utime(char *filename, time_t mtime)
74364+static long __init do_utime(char __force_user *filename, time_t mtime)
74365 {
74366 struct timespec t[2];
74367
74368@@ -119,7 +119,7 @@ static void __init dir_utime(void)
74369 struct dir_entry *de, *tmp;
74370 list_for_each_entry_safe(de, tmp, &dir_list, list) {
74371 list_del(&de->list);
74372- do_utime(de->name, de->mtime);
74373+ do_utime((char __force_user *)de->name, de->mtime);
74374 kfree(de->name);
74375 kfree(de);
74376 }
74377@@ -281,7 +281,7 @@ static int __init maybe_link(void)
74378 if (nlink >= 2) {
74379 char *old = find_link(major, minor, ino, mode, collected);
74380 if (old)
74381- return (sys_link(old, collected) < 0) ? -1 : 1;
74382+ return (sys_link((char __force_user *)old, (char __force_user *)collected) < 0) ? -1 : 1;
74383 }
74384 return 0;
74385 }
74386@@ -290,11 +290,11 @@ static void __init clean_path(char *path, umode_t mode)
74387 {
74388 struct stat st;
74389
74390- if (!sys_newlstat(path, &st) && (st.st_mode^mode) & S_IFMT) {
74391+ if (!sys_newlstat((char __force_user *)path, (struct stat __force_user *)&st) && (st.st_mode^mode) & S_IFMT) {
74392 if (S_ISDIR(st.st_mode))
74393- sys_rmdir(path);
74394+ sys_rmdir((char __force_user *)path);
74395 else
74396- sys_unlink(path);
74397+ sys_unlink((char __force_user *)path);
74398 }
74399 }
74400
74401@@ -315,7 +315,7 @@ static int __init do_name(void)
74402 int openflags = O_WRONLY|O_CREAT;
74403 if (ml != 1)
74404 openflags |= O_TRUNC;
74405- wfd = sys_open(collected, openflags, mode);
74406+ wfd = sys_open((char __force_user *)collected, openflags, mode);
74407
74408 if (wfd >= 0) {
74409 sys_fchown(wfd, uid, gid);
74410@@ -327,17 +327,17 @@ static int __init do_name(void)
74411 }
74412 }
74413 } else if (S_ISDIR(mode)) {
74414- sys_mkdir(collected, mode);
74415- sys_chown(collected, uid, gid);
74416- sys_chmod(collected, mode);
74417+ sys_mkdir((char __force_user *)collected, mode);
74418+ sys_chown((char __force_user *)collected, uid, gid);
74419+ sys_chmod((char __force_user *)collected, mode);
74420 dir_add(collected, mtime);
74421 } else if (S_ISBLK(mode) || S_ISCHR(mode) ||
74422 S_ISFIFO(mode) || S_ISSOCK(mode)) {
74423 if (maybe_link() == 0) {
74424- sys_mknod(collected, mode, rdev);
74425- sys_chown(collected, uid, gid);
74426- sys_chmod(collected, mode);
74427- do_utime(collected, mtime);
74428+ sys_mknod((char __force_user *)collected, mode, rdev);
74429+ sys_chown((char __force_user *)collected, uid, gid);
74430+ sys_chmod((char __force_user *)collected, mode);
74431+ do_utime((char __force_user *)collected, mtime);
74432 }
74433 }
74434 return 0;
74435@@ -346,15 +346,15 @@ static int __init do_name(void)
74436 static int __init do_copy(void)
74437 {
74438 if (count >= body_len) {
74439- sys_write(wfd, victim, body_len);
74440+ sys_write(wfd, (char __force_user *)victim, body_len);
74441 sys_close(wfd);
74442- do_utime(vcollected, mtime);
74443+ do_utime((char __force_user *)vcollected, mtime);
74444 kfree(vcollected);
74445 eat(body_len);
74446 state = SkipIt;
74447 return 0;
74448 } else {
74449- sys_write(wfd, victim, count);
74450+ sys_write(wfd, (char __force_user *)victim, count);
74451 body_len -= count;
74452 eat(count);
74453 return 1;
74454@@ -365,9 +365,9 @@ static int __init do_symlink(void)
74455 {
74456 collected[N_ALIGN(name_len) + body_len] = '\0';
74457 clean_path(collected, 0);
74458- sys_symlink(collected + N_ALIGN(name_len), collected);
74459- sys_lchown(collected, uid, gid);
74460- do_utime(collected, mtime);
74461+ sys_symlink((char __force_user *)collected + N_ALIGN(name_len), (char __force_user *)collected);
74462+ sys_lchown((char __force_user *)collected, uid, gid);
74463+ do_utime((char __force_user *)collected, mtime);
74464 state = SkipIt;
74465 next_state = Reset;
74466 return 0;
74467@@ -583,7 +583,7 @@ static int __init populate_rootfs(void)
74468 {
74469 char *err = unpack_to_rootfs(__initramfs_start, __initramfs_size);
74470 if (err)
74471- panic(err); /* Failed to decompress INTERNAL initramfs */
74472+ panic("%s", err); /* Failed to decompress INTERNAL initramfs */
74473 if (initrd_start) {
74474 #ifdef CONFIG_BLK_DEV_RAM
74475 int fd;
74476diff --git a/init/main.c b/init/main.c
74477index 9484f4b..4c01430 100644
74478--- a/init/main.c
74479+++ b/init/main.c
74480@@ -100,6 +100,8 @@ static inline void mark_rodata_ro(void) { }
74481 extern void tc_init(void);
74482 #endif
74483
74484+extern void grsecurity_init(void);
74485+
74486 /*
74487 * Debug helper: via this flag we know that we are in 'early bootup code'
74488 * where only the boot processor is running with IRQ disabled. This means
74489@@ -153,6 +155,64 @@ static int __init set_reset_devices(char *str)
74490
74491 __setup("reset_devices", set_reset_devices);
74492
74493+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
74494+kgid_t grsec_proc_gid = KGIDT_INIT(CONFIG_GRKERNSEC_PROC_GID);
74495+static int __init setup_grsec_proc_gid(char *str)
74496+{
74497+ grsec_proc_gid = KGIDT_INIT(simple_strtol(str, NULL, 0));
74498+ return 1;
74499+}
74500+__setup("grsec_proc_gid=", setup_grsec_proc_gid);
74501+#endif
74502+
74503+#if defined(CONFIG_X86_64) && defined(CONFIG_PAX_MEMORY_UDEREF)
74504+unsigned long pax_user_shadow_base __read_only = 1UL << TASK_SIZE_MAX_SHIFT;
74505+EXPORT_SYMBOL(pax_user_shadow_base);
74506+extern char pax_enter_kernel_user[];
74507+extern char pax_exit_kernel_user[];
74508+extern pgdval_t clone_pgd_mask;
74509+#endif
74510+
74511+#if defined(CONFIG_X86) && defined(CONFIG_PAX_MEMORY_UDEREF)
74512+static int __init setup_pax_nouderef(char *str)
74513+{
74514+#ifdef CONFIG_X86_32
74515+ unsigned int cpu;
74516+ struct desc_struct *gdt;
74517+
74518+ for (cpu = 0; cpu < nr_cpu_ids; cpu++) {
74519+ gdt = get_cpu_gdt_table(cpu);
74520+ gdt[GDT_ENTRY_KERNEL_DS].type = 3;
74521+ gdt[GDT_ENTRY_KERNEL_DS].limit = 0xf;
74522+ gdt[GDT_ENTRY_DEFAULT_USER_CS].limit = 0xf;
74523+ gdt[GDT_ENTRY_DEFAULT_USER_DS].limit = 0xf;
74524+ }
74525+ loadsegment(ds, __KERNEL_DS);
74526+ loadsegment(es, __KERNEL_DS);
74527+ loadsegment(ss, __KERNEL_DS);
74528+#else
74529+ memcpy(pax_enter_kernel_user, (unsigned char []){0xc3}, 1);
74530+ memcpy(pax_exit_kernel_user, (unsigned char []){0xc3}, 1);
74531+ clone_pgd_mask = ~(pgdval_t)0UL;
74532+ pax_user_shadow_base = 0UL;
74533+#endif
74534+
74535+ return 0;
74536+}
74537+early_param("pax_nouderef", setup_pax_nouderef);
74538+#endif
74539+
74540+#ifdef CONFIG_PAX_SOFTMODE
74541+int pax_softmode;
74542+
74543+static int __init setup_pax_softmode(char *str)
74544+{
74545+ get_option(&str, &pax_softmode);
74546+ return 1;
74547+}
74548+__setup("pax_softmode=", setup_pax_softmode);
74549+#endif
74550+
74551 static const char * argv_init[MAX_INIT_ARGS+2] = { "init", NULL, };
74552 const char * envp_init[MAX_INIT_ENVS+2] = { "HOME=/", "TERM=linux", NULL, };
74553 static const char *panic_later, *panic_param;
74554@@ -655,8 +715,6 @@ static void __init do_ctors(void)
74555 bool initcall_debug;
74556 core_param(initcall_debug, initcall_debug, bool, 0644);
74557
74558-static char msgbuf[64];
74559-
74560 static int __init_or_module do_one_initcall_debug(initcall_t fn)
74561 {
74562 ktime_t calltime, delta, rettime;
74563@@ -679,23 +737,22 @@ int __init_or_module do_one_initcall(initcall_t fn)
74564 {
74565 int count = preempt_count();
74566 int ret;
74567+ const char *msg1 = "", *msg2 = "";
74568
74569 if (initcall_debug)
74570 ret = do_one_initcall_debug(fn);
74571 else
74572 ret = fn();
74573
74574- msgbuf[0] = 0;
74575-
74576 if (preempt_count() != count) {
74577- sprintf(msgbuf, "preemption imbalance ");
74578+ msg1 = " preemption imbalance";
74579 preempt_count() = count;
74580 }
74581 if (irqs_disabled()) {
74582- strlcat(msgbuf, "disabled interrupts ", sizeof(msgbuf));
74583+ msg2 = " disabled interrupts";
74584 local_irq_enable();
74585 }
74586- WARN(msgbuf[0], "initcall %pF returned with %s\n", fn, msgbuf);
74587+ WARN(*msg1 || *msg2, "initcall %pF returned with%s%s\n", fn, msg1, msg2);
74588
74589 return ret;
74590 }
74591@@ -748,8 +805,14 @@ static void __init do_initcall_level(int level)
74592 level, level,
74593 &repair_env_string);
74594
74595- for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++)
74596+ for (fn = initcall_levels[level]; fn < initcall_levels[level+1]; fn++) {
74597 do_one_initcall(*fn);
74598+
74599+#ifdef LATENT_ENTROPY_PLUGIN
74600+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74601+#endif
74602+
74603+ }
74604 }
74605
74606 static void __init do_initcalls(void)
74607@@ -783,8 +846,14 @@ static void __init do_pre_smp_initcalls(void)
74608 {
74609 initcall_t *fn;
74610
74611- for (fn = __initcall_start; fn < __initcall0_start; fn++)
74612+ for (fn = __initcall_start; fn < __initcall0_start; fn++) {
74613 do_one_initcall(*fn);
74614+
74615+#ifdef LATENT_ENTROPY_PLUGIN
74616+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
74617+#endif
74618+
74619+ }
74620 }
74621
74622 /*
74623@@ -802,8 +871,8 @@ static int run_init_process(const char *init_filename)
74624 {
74625 argv_init[0] = init_filename;
74626 return do_execve(init_filename,
74627- (const char __user *const __user *)argv_init,
74628- (const char __user *const __user *)envp_init);
74629+ (const char __user *const __force_user *)argv_init,
74630+ (const char __user *const __force_user *)envp_init);
74631 }
74632
74633 static noinline void __init kernel_init_freeable(void);
74634@@ -880,7 +949,7 @@ static noinline void __init kernel_init_freeable(void)
74635 do_basic_setup();
74636
74637 /* Open the /dev/console on the rootfs, this should never fail */
74638- if (sys_open((const char __user *) "/dev/console", O_RDWR, 0) < 0)
74639+ if (sys_open((const char __force_user *) "/dev/console", O_RDWR, 0) < 0)
74640 pr_err("Warning: unable to open an initial console.\n");
74641
74642 (void) sys_dup(0);
74643@@ -893,11 +962,13 @@ static noinline void __init kernel_init_freeable(void)
74644 if (!ramdisk_execute_command)
74645 ramdisk_execute_command = "/init";
74646
74647- if (sys_access((const char __user *) ramdisk_execute_command, 0) != 0) {
74648+ if (sys_access((const char __force_user *) ramdisk_execute_command, 0) != 0) {
74649 ramdisk_execute_command = NULL;
74650 prepare_namespace();
74651 }
74652
74653+ grsecurity_init();
74654+
74655 /*
74656 * Ok, we have completed the initial bootup, and
74657 * we're essentially up and running. Get rid of the
74658diff --git a/ipc/ipc_sysctl.c b/ipc/ipc_sysctl.c
74659index 130dfec..cc88451 100644
74660--- a/ipc/ipc_sysctl.c
74661+++ b/ipc/ipc_sysctl.c
74662@@ -30,7 +30,7 @@ static void *get_ipc(ctl_table *table)
74663 static int proc_ipc_dointvec(ctl_table *table, int write,
74664 void __user *buffer, size_t *lenp, loff_t *ppos)
74665 {
74666- struct ctl_table ipc_table;
74667+ ctl_table_no_const ipc_table;
74668
74669 memcpy(&ipc_table, table, sizeof(ipc_table));
74670 ipc_table.data = get_ipc(table);
74671@@ -41,7 +41,7 @@ static int proc_ipc_dointvec(ctl_table *table, int write,
74672 static int proc_ipc_dointvec_minmax(ctl_table *table, int write,
74673 void __user *buffer, size_t *lenp, loff_t *ppos)
74674 {
74675- struct ctl_table ipc_table;
74676+ ctl_table_no_const ipc_table;
74677
74678 memcpy(&ipc_table, table, sizeof(ipc_table));
74679 ipc_table.data = get_ipc(table);
74680@@ -65,7 +65,7 @@ static int proc_ipc_dointvec_minmax_orphans(ctl_table *table, int write,
74681 static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74682 void __user *buffer, size_t *lenp, loff_t *ppos)
74683 {
74684- struct ctl_table ipc_table;
74685+ ctl_table_no_const ipc_table;
74686 size_t lenp_bef = *lenp;
74687 int rc;
74688
74689@@ -88,7 +88,7 @@ static int proc_ipc_callback_dointvec(ctl_table *table, int write,
74690 static int proc_ipc_doulongvec_minmax(ctl_table *table, int write,
74691 void __user *buffer, size_t *lenp, loff_t *ppos)
74692 {
74693- struct ctl_table ipc_table;
74694+ ctl_table_no_const ipc_table;
74695 memcpy(&ipc_table, table, sizeof(ipc_table));
74696 ipc_table.data = get_ipc(table);
74697
74698@@ -122,7 +122,7 @@ static void ipc_auto_callback(int val)
74699 static int proc_ipcauto_dointvec_minmax(ctl_table *table, int write,
74700 void __user *buffer, size_t *lenp, loff_t *ppos)
74701 {
74702- struct ctl_table ipc_table;
74703+ ctl_table_no_const ipc_table;
74704 size_t lenp_bef = *lenp;
74705 int oldval;
74706 int rc;
74707diff --git a/ipc/mq_sysctl.c b/ipc/mq_sysctl.c
74708index 383d638..943fdbb 100644
74709--- a/ipc/mq_sysctl.c
74710+++ b/ipc/mq_sysctl.c
74711@@ -25,7 +25,7 @@ static void *get_mq(ctl_table *table)
74712 static int proc_mq_dointvec_minmax(ctl_table *table, int write,
74713 void __user *buffer, size_t *lenp, loff_t *ppos)
74714 {
74715- struct ctl_table mq_table;
74716+ ctl_table_no_const mq_table;
74717 memcpy(&mq_table, table, sizeof(mq_table));
74718 mq_table.data = get_mq(table);
74719
74720diff --git a/ipc/mqueue.c b/ipc/mqueue.c
74721index e4e47f6..a85e0ad 100644
74722--- a/ipc/mqueue.c
74723+++ b/ipc/mqueue.c
74724@@ -278,6 +278,7 @@ static struct inode *mqueue_get_inode(struct super_block *sb,
74725 mq_bytes = mq_treesize + (info->attr.mq_maxmsg *
74726 info->attr.mq_msgsize);
74727
74728+ gr_learn_resource(current, RLIMIT_MSGQUEUE, u->mq_bytes + mq_bytes, 1);
74729 spin_lock(&mq_lock);
74730 if (u->mq_bytes + mq_bytes < u->mq_bytes ||
74731 u->mq_bytes + mq_bytes > rlimit(RLIMIT_MSGQUEUE)) {
74732diff --git a/ipc/msg.c b/ipc/msg.c
74733index d0c6d96..69a893c 100644
74734--- a/ipc/msg.c
74735+++ b/ipc/msg.c
74736@@ -296,18 +296,19 @@ static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
74737 return security_msg_queue_associate(msq, msgflg);
74738 }
74739
74740+static struct ipc_ops msg_ops = {
74741+ .getnew = newque,
74742+ .associate = msg_security,
74743+ .more_checks = NULL
74744+};
74745+
74746 SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
74747 {
74748 struct ipc_namespace *ns;
74749- struct ipc_ops msg_ops;
74750 struct ipc_params msg_params;
74751
74752 ns = current->nsproxy->ipc_ns;
74753
74754- msg_ops.getnew = newque;
74755- msg_ops.associate = msg_security;
74756- msg_ops.more_checks = NULL;
74757-
74758 msg_params.key = key;
74759 msg_params.flg = msgflg;
74760
74761diff --git a/ipc/sem.c b/ipc/sem.c
74762index 70480a3..f4e8262 100644
74763--- a/ipc/sem.c
74764+++ b/ipc/sem.c
74765@@ -460,10 +460,15 @@ static inline int sem_more_checks(struct kern_ipc_perm *ipcp,
74766 return 0;
74767 }
74768
74769+static struct ipc_ops sem_ops = {
74770+ .getnew = newary,
74771+ .associate = sem_security,
74772+ .more_checks = sem_more_checks
74773+};
74774+
74775 SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74776 {
74777 struct ipc_namespace *ns;
74778- struct ipc_ops sem_ops;
74779 struct ipc_params sem_params;
74780
74781 ns = current->nsproxy->ipc_ns;
74782@@ -471,10 +476,6 @@ SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
74783 if (nsems < 0 || nsems > ns->sc_semmsl)
74784 return -EINVAL;
74785
74786- sem_ops.getnew = newary;
74787- sem_ops.associate = sem_security;
74788- sem_ops.more_checks = sem_more_checks;
74789-
74790 sem_params.key = key;
74791 sem_params.flg = semflg;
74792 sem_params.u.nsems = nsems;
74793diff --git a/ipc/shm.c b/ipc/shm.c
74794index 7e199fa..180a1ca 100644
74795--- a/ipc/shm.c
74796+++ b/ipc/shm.c
74797@@ -69,6 +69,14 @@ static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp);
74798 static int sysvipc_shm_proc_show(struct seq_file *s, void *it);
74799 #endif
74800
74801+#ifdef CONFIG_GRKERNSEC
74802+extern int gr_handle_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74803+ const time_t shm_createtime, const kuid_t cuid,
74804+ const int shmid);
74805+extern int gr_chroot_shmat(const pid_t shm_cprid, const pid_t shm_lapid,
74806+ const time_t shm_createtime);
74807+#endif
74808+
74809 void shm_init_ns(struct ipc_namespace *ns)
74810 {
74811 ns->shm_ctlmax = SHMMAX;
74812@@ -531,6 +539,14 @@ static int newseg(struct ipc_namespace *ns, struct ipc_params *params)
74813 shp->shm_lprid = 0;
74814 shp->shm_atim = shp->shm_dtim = 0;
74815 shp->shm_ctim = get_seconds();
74816+#ifdef CONFIG_GRKERNSEC
74817+ {
74818+ struct timespec timeval;
74819+ do_posix_clock_monotonic_gettime(&timeval);
74820+
74821+ shp->shm_createtime = timeval.tv_sec;
74822+ }
74823+#endif
74824 shp->shm_segsz = size;
74825 shp->shm_nattch = 0;
74826 shp->shm_file = file;
74827@@ -582,18 +598,19 @@ static inline int shm_more_checks(struct kern_ipc_perm *ipcp,
74828 return 0;
74829 }
74830
74831+static struct ipc_ops shm_ops = {
74832+ .getnew = newseg,
74833+ .associate = shm_security,
74834+ .more_checks = shm_more_checks
74835+};
74836+
74837 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg)
74838 {
74839 struct ipc_namespace *ns;
74840- struct ipc_ops shm_ops;
74841 struct ipc_params shm_params;
74842
74843 ns = current->nsproxy->ipc_ns;
74844
74845- shm_ops.getnew = newseg;
74846- shm_ops.associate = shm_security;
74847- shm_ops.more_checks = shm_more_checks;
74848-
74849 shm_params.key = key;
74850 shm_params.flg = shmflg;
74851 shm_params.u.size = size;
74852@@ -1014,6 +1031,12 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74853 f_mode = FMODE_READ | FMODE_WRITE;
74854 }
74855 if (shmflg & SHM_EXEC) {
74856+
74857+#ifdef CONFIG_PAX_MPROTECT
74858+ if (current->mm->pax_flags & MF_PAX_MPROTECT)
74859+ goto out;
74860+#endif
74861+
74862 prot |= PROT_EXEC;
74863 acc_mode |= S_IXUGO;
74864 }
74865@@ -1037,9 +1060,21 @@ long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr,
74866 if (err)
74867 goto out_unlock;
74868
74869+#ifdef CONFIG_GRKERNSEC
74870+ if (!gr_handle_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime,
74871+ shp->shm_perm.cuid, shmid) ||
74872+ !gr_chroot_shmat(shp->shm_cprid, shp->shm_lapid, shp->shm_createtime)) {
74873+ err = -EACCES;
74874+ goto out_unlock;
74875+ }
74876+#endif
74877+
74878 path = shp->shm_file->f_path;
74879 path_get(&path);
74880 shp->shm_nattch++;
74881+#ifdef CONFIG_GRKERNSEC
74882+ shp->shm_lapid = current->pid;
74883+#endif
74884 size = i_size_read(path.dentry->d_inode);
74885 shm_unlock(shp);
74886
74887diff --git a/kernel/acct.c b/kernel/acct.c
74888index 8d6e145..33e0b1e 100644
74889--- a/kernel/acct.c
74890+++ b/kernel/acct.c
74891@@ -556,7 +556,7 @@ static void do_acct_process(struct bsd_acct_struct *acct,
74892 */
74893 flim = current->signal->rlim[RLIMIT_FSIZE].rlim_cur;
74894 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = RLIM_INFINITY;
74895- file->f_op->write(file, (char *)&ac,
74896+ file->f_op->write(file, (char __force_user *)&ac,
74897 sizeof(acct_t), &file->f_pos);
74898 current->signal->rlim[RLIMIT_FSIZE].rlim_cur = flim;
74899 set_fs(fs);
74900diff --git a/kernel/audit.c b/kernel/audit.c
74901index 91e53d0..d9e3ec4 100644
74902--- a/kernel/audit.c
74903+++ b/kernel/audit.c
74904@@ -118,7 +118,7 @@ u32 audit_sig_sid = 0;
74905 3) suppressed due to audit_rate_limit
74906 4) suppressed due to audit_backlog_limit
74907 */
74908-static atomic_t audit_lost = ATOMIC_INIT(0);
74909+static atomic_unchecked_t audit_lost = ATOMIC_INIT(0);
74910
74911 /* The netlink socket. */
74912 static struct sock *audit_sock;
74913@@ -240,7 +240,7 @@ void audit_log_lost(const char *message)
74914 unsigned long now;
74915 int print;
74916
74917- atomic_inc(&audit_lost);
74918+ atomic_inc_unchecked(&audit_lost);
74919
74920 print = (audit_failure == AUDIT_FAIL_PANIC || !audit_rate_limit);
74921
74922@@ -259,7 +259,7 @@ void audit_log_lost(const char *message)
74923 printk(KERN_WARNING
74924 "audit: audit_lost=%d audit_rate_limit=%d "
74925 "audit_backlog_limit=%d\n",
74926- atomic_read(&audit_lost),
74927+ atomic_read_unchecked(&audit_lost),
74928 audit_rate_limit,
74929 audit_backlog_limit);
74930 audit_panic(message);
74931@@ -664,7 +664,7 @@ static int audit_receive_msg(struct sk_buff *skb, struct nlmsghdr *nlh)
74932 status_set.pid = audit_pid;
74933 status_set.rate_limit = audit_rate_limit;
74934 status_set.backlog_limit = audit_backlog_limit;
74935- status_set.lost = atomic_read(&audit_lost);
74936+ status_set.lost = atomic_read_unchecked(&audit_lost);
74937 status_set.backlog = skb_queue_len(&audit_skb_queue);
74938 audit_send_reply(NETLINK_CB(skb).portid, seq, AUDIT_GET, 0, 0,
74939 &status_set, sizeof(status_set));
74940diff --git a/kernel/auditfilter.c b/kernel/auditfilter.c
74941index 6bd4a90..0ee9eff 100644
74942--- a/kernel/auditfilter.c
74943+++ b/kernel/auditfilter.c
74944@@ -423,7 +423,7 @@ static struct audit_entry *audit_data_to_entry(struct audit_rule_data *data,
74945 f->lsm_rule = NULL;
74946
74947 /* Support legacy tests for a valid loginuid */
74948- if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295)) {
74949+ if ((f->type == AUDIT_LOGINUID) && (f->val == 4294967295U)) {
74950 f->type = AUDIT_LOGINUID_SET;
74951 f->val = 0;
74952 }
74953diff --git a/kernel/auditsc.c b/kernel/auditsc.c
74954index 3c8a601..3a416f6 100644
74955--- a/kernel/auditsc.c
74956+++ b/kernel/auditsc.c
74957@@ -1956,7 +1956,7 @@ int auditsc_get_stamp(struct audit_context *ctx,
74958 }
74959
74960 /* global counter which is incremented every time something logs in */
74961-static atomic_t session_id = ATOMIC_INIT(0);
74962+static atomic_unchecked_t session_id = ATOMIC_INIT(0);
74963
74964 /**
74965 * audit_set_loginuid - set current task's audit_context loginuid
74966@@ -1980,7 +1980,7 @@ int audit_set_loginuid(kuid_t loginuid)
74967 return -EPERM;
74968 #endif /* CONFIG_AUDIT_LOGINUID_IMMUTABLE */
74969
74970- sessionid = atomic_inc_return(&session_id);
74971+ sessionid = atomic_inc_return_unchecked(&session_id);
74972 if (context && context->in_syscall) {
74973 struct audit_buffer *ab;
74974
74975diff --git a/kernel/capability.c b/kernel/capability.c
74976index f6c2ce5..982c0f9 100644
74977--- a/kernel/capability.c
74978+++ b/kernel/capability.c
74979@@ -202,6 +202,9 @@ SYSCALL_DEFINE2(capget, cap_user_header_t, header, cap_user_data_t, dataptr)
74980 * before modification is attempted and the application
74981 * fails.
74982 */
74983+ if (tocopy > ARRAY_SIZE(kdata))
74984+ return -EFAULT;
74985+
74986 if (copy_to_user(dataptr, kdata, tocopy
74987 * sizeof(struct __user_cap_data_struct))) {
74988 return -EFAULT;
74989@@ -303,10 +306,11 @@ bool has_ns_capability(struct task_struct *t,
74990 int ret;
74991
74992 rcu_read_lock();
74993- ret = security_capable(__task_cred(t), ns, cap);
74994+ ret = security_capable(__task_cred(t), ns, cap) == 0 &&
74995+ gr_task_is_capable(t, __task_cred(t), cap);
74996 rcu_read_unlock();
74997
74998- return (ret == 0);
74999+ return ret;
75000 }
75001
75002 /**
75003@@ -343,10 +347,10 @@ bool has_ns_capability_noaudit(struct task_struct *t,
75004 int ret;
75005
75006 rcu_read_lock();
75007- ret = security_capable_noaudit(__task_cred(t), ns, cap);
75008+ ret = security_capable_noaudit(__task_cred(t), ns, cap) == 0 && gr_task_is_capable_nolog(t, cap);
75009 rcu_read_unlock();
75010
75011- return (ret == 0);
75012+ return ret;
75013 }
75014
75015 /**
75016@@ -384,7 +388,7 @@ bool ns_capable(struct user_namespace *ns, int cap)
75017 BUG();
75018 }
75019
75020- if (security_capable(current_cred(), ns, cap) == 0) {
75021+ if (security_capable(current_cred(), ns, cap) == 0 && gr_is_capable(cap)) {
75022 current->flags |= PF_SUPERPRIV;
75023 return true;
75024 }
75025@@ -392,6 +396,21 @@ bool ns_capable(struct user_namespace *ns, int cap)
75026 }
75027 EXPORT_SYMBOL(ns_capable);
75028
75029+bool ns_capable_nolog(struct user_namespace *ns, int cap)
75030+{
75031+ if (unlikely(!cap_valid(cap))) {
75032+ printk(KERN_CRIT "capable_nolog() called with invalid cap=%u\n", cap);
75033+ BUG();
75034+ }
75035+
75036+ if (security_capable_noaudit(current_cred(), ns, cap) == 0 && gr_is_capable_nolog(cap)) {
75037+ current->flags |= PF_SUPERPRIV;
75038+ return true;
75039+ }
75040+ return false;
75041+}
75042+EXPORT_SYMBOL(ns_capable_nolog);
75043+
75044 /**
75045 * file_ns_capable - Determine if the file's opener had a capability in effect
75046 * @file: The file we want to check
75047@@ -432,6 +451,12 @@ bool capable(int cap)
75048 }
75049 EXPORT_SYMBOL(capable);
75050
75051+bool capable_nolog(int cap)
75052+{
75053+ return ns_capable_nolog(&init_user_ns, cap);
75054+}
75055+EXPORT_SYMBOL(capable_nolog);
75056+
75057 /**
75058 * nsown_capable - Check superior capability to one's own user_ns
75059 * @cap: The capability in question
75060@@ -464,3 +489,10 @@ bool inode_capable(const struct inode *inode, int cap)
75061
75062 return ns_capable(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
75063 }
75064+
75065+bool inode_capable_nolog(const struct inode *inode, int cap)
75066+{
75067+ struct user_namespace *ns = current_user_ns();
75068+
75069+ return ns_capable_nolog(ns, cap) && kuid_has_mapping(ns, inode->i_uid);
75070+}
75071diff --git a/kernel/cgroup.c b/kernel/cgroup.c
75072index c6e77ef..af531a0 100644
75073--- a/kernel/cgroup.c
75074+++ b/kernel/cgroup.c
75075@@ -5391,7 +5391,7 @@ static int cgroup_css_links_read(struct cgroup *cont,
75076 struct css_set *cg = link->cg;
75077 struct task_struct *task;
75078 int count = 0;
75079- seq_printf(seq, "css_set %p\n", cg);
75080+ seq_printf(seq, "css_set %pK\n", cg);
75081 list_for_each_entry(task, &cg->tasks, cg_list) {
75082 if (count++ > MAX_TASKS_SHOWN_PER_CSS) {
75083 seq_puts(seq, " ...\n");
75084diff --git a/kernel/compat.c b/kernel/compat.c
75085index 0a09e48..f44f3f0 100644
75086--- a/kernel/compat.c
75087+++ b/kernel/compat.c
75088@@ -13,6 +13,7 @@
75089
75090 #include <linux/linkage.h>
75091 #include <linux/compat.h>
75092+#include <linux/module.h>
75093 #include <linux/errno.h>
75094 #include <linux/time.h>
75095 #include <linux/signal.h>
75096@@ -220,7 +221,7 @@ static long compat_nanosleep_restart(struct restart_block *restart)
75097 mm_segment_t oldfs;
75098 long ret;
75099
75100- restart->nanosleep.rmtp = (struct timespec __user *) &rmt;
75101+ restart->nanosleep.rmtp = (struct timespec __force_user *) &rmt;
75102 oldfs = get_fs();
75103 set_fs(KERNEL_DS);
75104 ret = hrtimer_nanosleep_restart(restart);
75105@@ -252,7 +253,7 @@ asmlinkage long compat_sys_nanosleep(struct compat_timespec __user *rqtp,
75106 oldfs = get_fs();
75107 set_fs(KERNEL_DS);
75108 ret = hrtimer_nanosleep(&tu,
75109- rmtp ? (struct timespec __user *)&rmt : NULL,
75110+ rmtp ? (struct timespec __force_user *)&rmt : NULL,
75111 HRTIMER_MODE_REL, CLOCK_MONOTONIC);
75112 set_fs(oldfs);
75113
75114@@ -361,7 +362,7 @@ asmlinkage long compat_sys_sigpending(compat_old_sigset_t __user *set)
75115 mm_segment_t old_fs = get_fs();
75116
75117 set_fs(KERNEL_DS);
75118- ret = sys_sigpending((old_sigset_t __user *) &s);
75119+ ret = sys_sigpending((old_sigset_t __force_user *) &s);
75120 set_fs(old_fs);
75121 if (ret == 0)
75122 ret = put_user(s, set);
75123@@ -451,7 +452,7 @@ asmlinkage long compat_sys_old_getrlimit(unsigned int resource,
75124 mm_segment_t old_fs = get_fs();
75125
75126 set_fs(KERNEL_DS);
75127- ret = sys_old_getrlimit(resource, &r);
75128+ ret = sys_old_getrlimit(resource, (struct rlimit __force_user *)&r);
75129 set_fs(old_fs);
75130
75131 if (!ret) {
75132@@ -533,8 +534,8 @@ COMPAT_SYSCALL_DEFINE4(wait4,
75133 set_fs (KERNEL_DS);
75134 ret = sys_wait4(pid,
75135 (stat_addr ?
75136- (unsigned int __user *) &status : NULL),
75137- options, (struct rusage __user *) &r);
75138+ (unsigned int __force_user *) &status : NULL),
75139+ options, (struct rusage __force_user *) &r);
75140 set_fs (old_fs);
75141
75142 if (ret > 0) {
75143@@ -560,8 +561,8 @@ COMPAT_SYSCALL_DEFINE5(waitid,
75144 memset(&info, 0, sizeof(info));
75145
75146 set_fs(KERNEL_DS);
75147- ret = sys_waitid(which, pid, (siginfo_t __user *)&info, options,
75148- uru ? (struct rusage __user *)&ru : NULL);
75149+ ret = sys_waitid(which, pid, (siginfo_t __force_user *)&info, options,
75150+ uru ? (struct rusage __force_user *)&ru : NULL);
75151 set_fs(old_fs);
75152
75153 if ((ret < 0) || (info.si_signo == 0))
75154@@ -695,8 +696,8 @@ long compat_sys_timer_settime(timer_t timer_id, int flags,
75155 oldfs = get_fs();
75156 set_fs(KERNEL_DS);
75157 err = sys_timer_settime(timer_id, flags,
75158- (struct itimerspec __user *) &newts,
75159- (struct itimerspec __user *) &oldts);
75160+ (struct itimerspec __force_user *) &newts,
75161+ (struct itimerspec __force_user *) &oldts);
75162 set_fs(oldfs);
75163 if (!err && old && put_compat_itimerspec(old, &oldts))
75164 return -EFAULT;
75165@@ -713,7 +714,7 @@ long compat_sys_timer_gettime(timer_t timer_id,
75166 oldfs = get_fs();
75167 set_fs(KERNEL_DS);
75168 err = sys_timer_gettime(timer_id,
75169- (struct itimerspec __user *) &ts);
75170+ (struct itimerspec __force_user *) &ts);
75171 set_fs(oldfs);
75172 if (!err && put_compat_itimerspec(setting, &ts))
75173 return -EFAULT;
75174@@ -732,7 +733,7 @@ long compat_sys_clock_settime(clockid_t which_clock,
75175 oldfs = get_fs();
75176 set_fs(KERNEL_DS);
75177 err = sys_clock_settime(which_clock,
75178- (struct timespec __user *) &ts);
75179+ (struct timespec __force_user *) &ts);
75180 set_fs(oldfs);
75181 return err;
75182 }
75183@@ -747,7 +748,7 @@ long compat_sys_clock_gettime(clockid_t which_clock,
75184 oldfs = get_fs();
75185 set_fs(KERNEL_DS);
75186 err = sys_clock_gettime(which_clock,
75187- (struct timespec __user *) &ts);
75188+ (struct timespec __force_user *) &ts);
75189 set_fs(oldfs);
75190 if (!err && put_compat_timespec(&ts, tp))
75191 return -EFAULT;
75192@@ -767,7 +768,7 @@ long compat_sys_clock_adjtime(clockid_t which_clock,
75193
75194 oldfs = get_fs();
75195 set_fs(KERNEL_DS);
75196- ret = sys_clock_adjtime(which_clock, (struct timex __user *) &txc);
75197+ ret = sys_clock_adjtime(which_clock, (struct timex __force_user *) &txc);
75198 set_fs(oldfs);
75199
75200 err = compat_put_timex(utp, &txc);
75201@@ -787,7 +788,7 @@ long compat_sys_clock_getres(clockid_t which_clock,
75202 oldfs = get_fs();
75203 set_fs(KERNEL_DS);
75204 err = sys_clock_getres(which_clock,
75205- (struct timespec __user *) &ts);
75206+ (struct timespec __force_user *) &ts);
75207 set_fs(oldfs);
75208 if (!err && tp && put_compat_timespec(&ts, tp))
75209 return -EFAULT;
75210@@ -799,9 +800,9 @@ static long compat_clock_nanosleep_restart(struct restart_block *restart)
75211 long err;
75212 mm_segment_t oldfs;
75213 struct timespec tu;
75214- struct compat_timespec *rmtp = restart->nanosleep.compat_rmtp;
75215+ struct compat_timespec __user *rmtp = restart->nanosleep.compat_rmtp;
75216
75217- restart->nanosleep.rmtp = (struct timespec __user *) &tu;
75218+ restart->nanosleep.rmtp = (struct timespec __force_user *) &tu;
75219 oldfs = get_fs();
75220 set_fs(KERNEL_DS);
75221 err = clock_nanosleep_restart(restart);
75222@@ -833,8 +834,8 @@ long compat_sys_clock_nanosleep(clockid_t which_clock, int flags,
75223 oldfs = get_fs();
75224 set_fs(KERNEL_DS);
75225 err = sys_clock_nanosleep(which_clock, flags,
75226- (struct timespec __user *) &in,
75227- (struct timespec __user *) &out);
75228+ (struct timespec __force_user *) &in,
75229+ (struct timespec __force_user *) &out);
75230 set_fs(oldfs);
75231
75232 if ((err == -ERESTART_RESTARTBLOCK) && rmtp &&
75233diff --git a/kernel/configs.c b/kernel/configs.c
75234index c18b1f1..b9a0132 100644
75235--- a/kernel/configs.c
75236+++ b/kernel/configs.c
75237@@ -74,8 +74,19 @@ static int __init ikconfig_init(void)
75238 struct proc_dir_entry *entry;
75239
75240 /* create the current config file */
75241+#if defined(CONFIG_GRKERNSEC_PROC_ADD) || defined(CONFIG_GRKERNSEC_HIDESYM)
75242+#if defined(CONFIG_GRKERNSEC_PROC_USER) || defined(CONFIG_GRKERNSEC_HIDESYM)
75243+ entry = proc_create("config.gz", S_IFREG | S_IRUSR, NULL,
75244+ &ikconfig_file_ops);
75245+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
75246+ entry = proc_create("config.gz", S_IFREG | S_IRUSR | S_IRGRP, NULL,
75247+ &ikconfig_file_ops);
75248+#endif
75249+#else
75250 entry = proc_create("config.gz", S_IFREG | S_IRUGO, NULL,
75251 &ikconfig_file_ops);
75252+#endif
75253+
75254 if (!entry)
75255 return -ENOMEM;
75256
75257diff --git a/kernel/cred.c b/kernel/cred.c
75258index e0573a4..3874e41 100644
75259--- a/kernel/cred.c
75260+++ b/kernel/cred.c
75261@@ -164,6 +164,16 @@ void exit_creds(struct task_struct *tsk)
75262 validate_creds(cred);
75263 alter_cred_subscribers(cred, -1);
75264 put_cred(cred);
75265+
75266+#ifdef CONFIG_GRKERNSEC_SETXID
75267+ cred = (struct cred *) tsk->delayed_cred;
75268+ if (cred != NULL) {
75269+ tsk->delayed_cred = NULL;
75270+ validate_creds(cred);
75271+ alter_cred_subscribers(cred, -1);
75272+ put_cred(cred);
75273+ }
75274+#endif
75275 }
75276
75277 /**
75278@@ -411,7 +421,7 @@ static bool cred_cap_issubset(const struct cred *set, const struct cred *subset)
75279 * Always returns 0 thus allowing this function to be tail-called at the end
75280 * of, say, sys_setgid().
75281 */
75282-int commit_creds(struct cred *new)
75283+static int __commit_creds(struct cred *new)
75284 {
75285 struct task_struct *task = current;
75286 const struct cred *old = task->real_cred;
75287@@ -430,6 +440,8 @@ int commit_creds(struct cred *new)
75288
75289 get_cred(new); /* we will require a ref for the subj creds too */
75290
75291+ gr_set_role_label(task, new->uid, new->gid);
75292+
75293 /* dumpability changes */
75294 if (!uid_eq(old->euid, new->euid) ||
75295 !gid_eq(old->egid, new->egid) ||
75296@@ -479,6 +491,102 @@ int commit_creds(struct cred *new)
75297 put_cred(old);
75298 return 0;
75299 }
75300+#ifdef CONFIG_GRKERNSEC_SETXID
75301+extern int set_user(struct cred *new);
75302+
75303+void gr_delayed_cred_worker(void)
75304+{
75305+ const struct cred *new = current->delayed_cred;
75306+ struct cred *ncred;
75307+
75308+ current->delayed_cred = NULL;
75309+
75310+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID) && new != NULL) {
75311+ // from doing get_cred on it when queueing this
75312+ put_cred(new);
75313+ return;
75314+ } else if (new == NULL)
75315+ return;
75316+
75317+ ncred = prepare_creds();
75318+ if (!ncred)
75319+ goto die;
75320+ // uids
75321+ ncred->uid = new->uid;
75322+ ncred->euid = new->euid;
75323+ ncred->suid = new->suid;
75324+ ncred->fsuid = new->fsuid;
75325+ // gids
75326+ ncred->gid = new->gid;
75327+ ncred->egid = new->egid;
75328+ ncred->sgid = new->sgid;
75329+ ncred->fsgid = new->fsgid;
75330+ // groups
75331+ if (set_groups(ncred, new->group_info) < 0) {
75332+ abort_creds(ncred);
75333+ goto die;
75334+ }
75335+ // caps
75336+ ncred->securebits = new->securebits;
75337+ ncred->cap_inheritable = new->cap_inheritable;
75338+ ncred->cap_permitted = new->cap_permitted;
75339+ ncred->cap_effective = new->cap_effective;
75340+ ncred->cap_bset = new->cap_bset;
75341+
75342+ if (set_user(ncred)) {
75343+ abort_creds(ncred);
75344+ goto die;
75345+ }
75346+
75347+ // from doing get_cred on it when queueing this
75348+ put_cred(new);
75349+
75350+ __commit_creds(ncred);
75351+ return;
75352+die:
75353+ // from doing get_cred on it when queueing this
75354+ put_cred(new);
75355+ do_group_exit(SIGKILL);
75356+}
75357+#endif
75358+
75359+int commit_creds(struct cred *new)
75360+{
75361+#ifdef CONFIG_GRKERNSEC_SETXID
75362+ int ret;
75363+ int schedule_it = 0;
75364+ struct task_struct *t;
75365+
75366+ /* we won't get called with tasklist_lock held for writing
75367+ and interrupts disabled as the cred struct in that case is
75368+ init_cred
75369+ */
75370+ if (grsec_enable_setxid && !current_is_single_threaded() &&
75371+ uid_eq(current_uid(), GLOBAL_ROOT_UID) &&
75372+ !uid_eq(new->uid, GLOBAL_ROOT_UID)) {
75373+ schedule_it = 1;
75374+ }
75375+ ret = __commit_creds(new);
75376+ if (schedule_it) {
75377+ rcu_read_lock();
75378+ read_lock(&tasklist_lock);
75379+ for (t = next_thread(current); t != current;
75380+ t = next_thread(t)) {
75381+ if (t->delayed_cred == NULL) {
75382+ t->delayed_cred = get_cred(new);
75383+ set_tsk_thread_flag(t, TIF_GRSEC_SETXID);
75384+ set_tsk_need_resched(t);
75385+ }
75386+ }
75387+ read_unlock(&tasklist_lock);
75388+ rcu_read_unlock();
75389+ }
75390+ return ret;
75391+#else
75392+ return __commit_creds(new);
75393+#endif
75394+}
75395+
75396 EXPORT_SYMBOL(commit_creds);
75397
75398 /**
75399diff --git a/kernel/debug/debug_core.c b/kernel/debug/debug_core.c
75400index 0506d44..2c20034 100644
75401--- a/kernel/debug/debug_core.c
75402+++ b/kernel/debug/debug_core.c
75403@@ -123,7 +123,7 @@ static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
75404 */
75405 static atomic_t masters_in_kgdb;
75406 static atomic_t slaves_in_kgdb;
75407-static atomic_t kgdb_break_tasklet_var;
75408+static atomic_unchecked_t kgdb_break_tasklet_var;
75409 atomic_t kgdb_setting_breakpoint;
75410
75411 struct task_struct *kgdb_usethread;
75412@@ -133,7 +133,7 @@ int kgdb_single_step;
75413 static pid_t kgdb_sstep_pid;
75414
75415 /* to keep track of the CPU which is doing the single stepping*/
75416-atomic_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75417+atomic_unchecked_t kgdb_cpu_doing_single_step = ATOMIC_INIT(-1);
75418
75419 /*
75420 * If you are debugging a problem where roundup (the collection of
75421@@ -541,7 +541,7 @@ return_normal:
75422 * kernel will only try for the value of sstep_tries before
75423 * giving up and continuing on.
75424 */
75425- if (atomic_read(&kgdb_cpu_doing_single_step) != -1 &&
75426+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1 &&
75427 (kgdb_info[cpu].task &&
75428 kgdb_info[cpu].task->pid != kgdb_sstep_pid) && --sstep_tries) {
75429 atomic_set(&kgdb_active, -1);
75430@@ -635,8 +635,8 @@ cpu_master_loop:
75431 }
75432
75433 kgdb_restore:
75434- if (atomic_read(&kgdb_cpu_doing_single_step) != -1) {
75435- int sstep_cpu = atomic_read(&kgdb_cpu_doing_single_step);
75436+ if (atomic_read_unchecked(&kgdb_cpu_doing_single_step) != -1) {
75437+ int sstep_cpu = atomic_read_unchecked(&kgdb_cpu_doing_single_step);
75438 if (kgdb_info[sstep_cpu].task)
75439 kgdb_sstep_pid = kgdb_info[sstep_cpu].task->pid;
75440 else
75441@@ -888,18 +888,18 @@ static void kgdb_unregister_callbacks(void)
75442 static void kgdb_tasklet_bpt(unsigned long ing)
75443 {
75444 kgdb_breakpoint();
75445- atomic_set(&kgdb_break_tasklet_var, 0);
75446+ atomic_set_unchecked(&kgdb_break_tasklet_var, 0);
75447 }
75448
75449 static DECLARE_TASKLET(kgdb_tasklet_breakpoint, kgdb_tasklet_bpt, 0);
75450
75451 void kgdb_schedule_breakpoint(void)
75452 {
75453- if (atomic_read(&kgdb_break_tasklet_var) ||
75454+ if (atomic_read_unchecked(&kgdb_break_tasklet_var) ||
75455 atomic_read(&kgdb_active) != -1 ||
75456 atomic_read(&kgdb_setting_breakpoint))
75457 return;
75458- atomic_inc(&kgdb_break_tasklet_var);
75459+ atomic_inc_unchecked(&kgdb_break_tasklet_var);
75460 tasklet_schedule(&kgdb_tasklet_breakpoint);
75461 }
75462 EXPORT_SYMBOL_GPL(kgdb_schedule_breakpoint);
75463diff --git a/kernel/debug/kdb/kdb_main.c b/kernel/debug/kdb/kdb_main.c
75464index 00eb8f7..d7e3244 100644
75465--- a/kernel/debug/kdb/kdb_main.c
75466+++ b/kernel/debug/kdb/kdb_main.c
75467@@ -1974,7 +1974,7 @@ static int kdb_lsmod(int argc, const char **argv)
75468 continue;
75469
75470 kdb_printf("%-20s%8u 0x%p ", mod->name,
75471- mod->core_size, (void *)mod);
75472+ mod->core_size_rx + mod->core_size_rw, (void *)mod);
75473 #ifdef CONFIG_MODULE_UNLOAD
75474 kdb_printf("%4ld ", module_refcount(mod));
75475 #endif
75476@@ -1984,7 +1984,7 @@ static int kdb_lsmod(int argc, const char **argv)
75477 kdb_printf(" (Loading)");
75478 else
75479 kdb_printf(" (Live)");
75480- kdb_printf(" 0x%p", mod->module_core);
75481+ kdb_printf(" 0x%p 0x%p", mod->module_core_rx, mod->module_core_rw);
75482
75483 #ifdef CONFIG_MODULE_UNLOAD
75484 {
75485diff --git a/kernel/events/core.c b/kernel/events/core.c
75486index e76e495..cbfe63a 100644
75487--- a/kernel/events/core.c
75488+++ b/kernel/events/core.c
75489@@ -156,8 +156,15 @@ static struct srcu_struct pmus_srcu;
75490 * 0 - disallow raw tracepoint access for unpriv
75491 * 1 - disallow cpu events for unpriv
75492 * 2 - disallow kernel profiling for unpriv
75493+ * 3 - disallow all unpriv perf event use
75494 */
75495-int sysctl_perf_event_paranoid __read_mostly = 1;
75496+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
75497+int sysctl_perf_event_legitimately_concerned __read_mostly = 3;
75498+#elif defined(CONFIG_GRKERNSEC_HIDESYM)
75499+int sysctl_perf_event_legitimately_concerned __read_mostly = 2;
75500+#else
75501+int sysctl_perf_event_legitimately_concerned __read_mostly = 1;
75502+#endif
75503
75504 /* Minimum for 512 kiB + 1 user control page */
75505 int sysctl_perf_event_mlock __read_mostly = 512 + (PAGE_SIZE / 1024); /* 'free' kiB per user */
75506@@ -184,7 +191,7 @@ int perf_proc_update_handler(struct ctl_table *table, int write,
75507 return 0;
75508 }
75509
75510-static atomic64_t perf_event_id;
75511+static atomic64_unchecked_t perf_event_id;
75512
75513 static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
75514 enum event_type_t event_type);
75515@@ -2747,7 +2754,7 @@ static void __perf_event_read(void *info)
75516
75517 static inline u64 perf_event_count(struct perf_event *event)
75518 {
75519- return local64_read(&event->count) + atomic64_read(&event->child_count);
75520+ return local64_read(&event->count) + atomic64_read_unchecked(&event->child_count);
75521 }
75522
75523 static u64 perf_event_read(struct perf_event *event)
75524@@ -3093,9 +3100,9 @@ u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
75525 mutex_lock(&event->child_mutex);
75526 total += perf_event_read(event);
75527 *enabled += event->total_time_enabled +
75528- atomic64_read(&event->child_total_time_enabled);
75529+ atomic64_read_unchecked(&event->child_total_time_enabled);
75530 *running += event->total_time_running +
75531- atomic64_read(&event->child_total_time_running);
75532+ atomic64_read_unchecked(&event->child_total_time_running);
75533
75534 list_for_each_entry(child, &event->child_list, child_list) {
75535 total += perf_event_read(child);
75536@@ -3481,10 +3488,10 @@ void perf_event_update_userpage(struct perf_event *event)
75537 userpg->offset -= local64_read(&event->hw.prev_count);
75538
75539 userpg->time_enabled = enabled +
75540- atomic64_read(&event->child_total_time_enabled);
75541+ atomic64_read_unchecked(&event->child_total_time_enabled);
75542
75543 userpg->time_running = running +
75544- atomic64_read(&event->child_total_time_running);
75545+ atomic64_read_unchecked(&event->child_total_time_running);
75546
75547 arch_perf_update_userpage(userpg, now);
75548
75549@@ -4034,7 +4041,7 @@ perf_output_sample_ustack(struct perf_output_handle *handle, u64 dump_size,
75550
75551 /* Data. */
75552 sp = perf_user_stack_pointer(regs);
75553- rem = __output_copy_user(handle, (void *) sp, dump_size);
75554+ rem = __output_copy_user(handle, (void __user *) sp, dump_size);
75555 dyn_size = dump_size - rem;
75556
75557 perf_output_skip(handle, rem);
75558@@ -4122,11 +4129,11 @@ static void perf_output_read_one(struct perf_output_handle *handle,
75559 values[n++] = perf_event_count(event);
75560 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
75561 values[n++] = enabled +
75562- atomic64_read(&event->child_total_time_enabled);
75563+ atomic64_read_unchecked(&event->child_total_time_enabled);
75564 }
75565 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
75566 values[n++] = running +
75567- atomic64_read(&event->child_total_time_running);
75568+ atomic64_read_unchecked(&event->child_total_time_running);
75569 }
75570 if (read_format & PERF_FORMAT_ID)
75571 values[n++] = primary_event_id(event);
75572@@ -4835,12 +4842,12 @@ static void perf_event_mmap_event(struct perf_mmap_event *mmap_event)
75573 * need to add enough zero bytes after the string to handle
75574 * the 64bit alignment we do later.
75575 */
75576- buf = kzalloc(PATH_MAX + sizeof(u64), GFP_KERNEL);
75577+ buf = kzalloc(PATH_MAX, GFP_KERNEL);
75578 if (!buf) {
75579 name = strncpy(tmp, "//enomem", sizeof(tmp));
75580 goto got_name;
75581 }
75582- name = d_path(&file->f_path, buf, PATH_MAX);
75583+ name = d_path(&file->f_path, buf, PATH_MAX - sizeof(u64));
75584 if (IS_ERR(name)) {
75585 name = strncpy(tmp, "//toolong", sizeof(tmp));
75586 goto got_name;
75587@@ -6262,7 +6269,7 @@ perf_event_alloc(struct perf_event_attr *attr, int cpu,
75588 event->parent = parent_event;
75589
75590 event->ns = get_pid_ns(task_active_pid_ns(current));
75591- event->id = atomic64_inc_return(&perf_event_id);
75592+ event->id = atomic64_inc_return_unchecked(&perf_event_id);
75593
75594 event->state = PERF_EVENT_STATE_INACTIVE;
75595
75596@@ -6572,6 +6579,11 @@ SYSCALL_DEFINE5(perf_event_open,
75597 if (flags & ~PERF_FLAG_ALL)
75598 return -EINVAL;
75599
75600+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
75601+ if (perf_paranoid_any() && !capable(CAP_SYS_ADMIN))
75602+ return -EACCES;
75603+#endif
75604+
75605 err = perf_copy_attr(attr_uptr, &attr);
75606 if (err)
75607 return err;
75608@@ -6904,10 +6916,10 @@ static void sync_child_event(struct perf_event *child_event,
75609 /*
75610 * Add back the child's count to the parent's count:
75611 */
75612- atomic64_add(child_val, &parent_event->child_count);
75613- atomic64_add(child_event->total_time_enabled,
75614+ atomic64_add_unchecked(child_val, &parent_event->child_count);
75615+ atomic64_add_unchecked(child_event->total_time_enabled,
75616 &parent_event->child_total_time_enabled);
75617- atomic64_add(child_event->total_time_running,
75618+ atomic64_add_unchecked(child_event->total_time_running,
75619 &parent_event->child_total_time_running);
75620
75621 /*
75622diff --git a/kernel/events/internal.h b/kernel/events/internal.h
75623index ca65997..cc8cee4 100644
75624--- a/kernel/events/internal.h
75625+++ b/kernel/events/internal.h
75626@@ -81,10 +81,10 @@ static inline unsigned long perf_data_size(struct ring_buffer *rb)
75627 return rb->nr_pages << (PAGE_SHIFT + page_order(rb));
75628 }
75629
75630-#define DEFINE_OUTPUT_COPY(func_name, memcpy_func) \
75631+#define DEFINE_OUTPUT_COPY(func_name, memcpy_func, user) \
75632 static inline unsigned int \
75633 func_name(struct perf_output_handle *handle, \
75634- const void *buf, unsigned int len) \
75635+ const void user *buf, unsigned int len) \
75636 { \
75637 unsigned long size, written; \
75638 \
75639@@ -116,17 +116,17 @@ static inline int memcpy_common(void *dst, const void *src, size_t n)
75640 return n;
75641 }
75642
75643-DEFINE_OUTPUT_COPY(__output_copy, memcpy_common)
75644+DEFINE_OUTPUT_COPY(__output_copy, memcpy_common, )
75645
75646 #define MEMCPY_SKIP(dst, src, n) (n)
75647
75648-DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP)
75649+DEFINE_OUTPUT_COPY(__output_skip, MEMCPY_SKIP, )
75650
75651 #ifndef arch_perf_out_copy_user
75652 #define arch_perf_out_copy_user __copy_from_user_inatomic
75653 #endif
75654
75655-DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user)
75656+DEFINE_OUTPUT_COPY(__output_copy_user, arch_perf_out_copy_user, __user)
75657
75658 /* Callchain handling */
75659 extern struct perf_callchain_entry *
75660diff --git a/kernel/exit.c b/kernel/exit.c
75661index 7bb73f9..d7978ed 100644
75662--- a/kernel/exit.c
75663+++ b/kernel/exit.c
75664@@ -172,6 +172,10 @@ void release_task(struct task_struct * p)
75665 struct task_struct *leader;
75666 int zap_leader;
75667 repeat:
75668+#ifdef CONFIG_NET
75669+ gr_del_task_from_ip_table(p);
75670+#endif
75671+
75672 /* don't need to get the RCU readlock here - the process is dead and
75673 * can't be modifying its own credentials. But shut RCU-lockdep up */
75674 rcu_read_lock();
75675@@ -340,7 +344,7 @@ int allow_signal(int sig)
75676 * know it'll be handled, so that they don't get converted to
75677 * SIGKILL or just silently dropped.
75678 */
75679- current->sighand->action[(sig)-1].sa.sa_handler = (void __user *)2;
75680+ current->sighand->action[(sig)-1].sa.sa_handler = (__force void __user *)2;
75681 recalc_sigpending();
75682 spin_unlock_irq(&current->sighand->siglock);
75683 return 0;
75684@@ -709,6 +713,8 @@ void do_exit(long code)
75685 struct task_struct *tsk = current;
75686 int group_dead;
75687
75688+ set_fs(USER_DS);
75689+
75690 profile_task_exit(tsk);
75691
75692 WARN_ON(blk_needs_flush_plug(tsk));
75693@@ -725,7 +731,6 @@ void do_exit(long code)
75694 * mm_release()->clear_child_tid() from writing to a user-controlled
75695 * kernel address.
75696 */
75697- set_fs(USER_DS);
75698
75699 ptrace_event(PTRACE_EVENT_EXIT, code);
75700
75701@@ -784,6 +789,9 @@ void do_exit(long code)
75702 tsk->exit_code = code;
75703 taskstats_exit(tsk, group_dead);
75704
75705+ gr_acl_handle_psacct(tsk, code);
75706+ gr_acl_handle_exit();
75707+
75708 exit_mm(tsk);
75709
75710 if (group_dead)
75711@@ -905,7 +913,7 @@ SYSCALL_DEFINE1(exit, int, error_code)
75712 * Take down every thread in the group. This is called by fatal signals
75713 * as well as by sys_exit_group (below).
75714 */
75715-void
75716+__noreturn void
75717 do_group_exit(int exit_code)
75718 {
75719 struct signal_struct *sig = current->signal;
75720diff --git a/kernel/fork.c b/kernel/fork.c
75721index 987b28a..e0102b2 100644
75722--- a/kernel/fork.c
75723+++ b/kernel/fork.c
75724@@ -319,7 +319,7 @@ static struct task_struct *dup_task_struct(struct task_struct *orig)
75725 *stackend = STACK_END_MAGIC; /* for overflow detection */
75726
75727 #ifdef CONFIG_CC_STACKPROTECTOR
75728- tsk->stack_canary = get_random_int();
75729+ tsk->stack_canary = pax_get_random_long();
75730 #endif
75731
75732 /*
75733@@ -345,13 +345,81 @@ free_tsk:
75734 }
75735
75736 #ifdef CONFIG_MMU
75737+static struct vm_area_struct *dup_vma(struct mm_struct *mm, struct mm_struct *oldmm, struct vm_area_struct *mpnt)
75738+{
75739+ struct vm_area_struct *tmp;
75740+ unsigned long charge;
75741+ struct mempolicy *pol;
75742+ struct file *file;
75743+
75744+ charge = 0;
75745+ if (mpnt->vm_flags & VM_ACCOUNT) {
75746+ unsigned long len = vma_pages(mpnt);
75747+
75748+ if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75749+ goto fail_nomem;
75750+ charge = len;
75751+ }
75752+ tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75753+ if (!tmp)
75754+ goto fail_nomem;
75755+ *tmp = *mpnt;
75756+ tmp->vm_mm = mm;
75757+ INIT_LIST_HEAD(&tmp->anon_vma_chain);
75758+ pol = mpol_dup(vma_policy(mpnt));
75759+ if (IS_ERR(pol))
75760+ goto fail_nomem_policy;
75761+ vma_set_policy(tmp, pol);
75762+ if (anon_vma_fork(tmp, mpnt))
75763+ goto fail_nomem_anon_vma_fork;
75764+ tmp->vm_flags &= ~VM_LOCKED;
75765+ tmp->vm_next = tmp->vm_prev = NULL;
75766+ tmp->vm_mirror = NULL;
75767+ file = tmp->vm_file;
75768+ if (file) {
75769+ struct inode *inode = file_inode(file);
75770+ struct address_space *mapping = file->f_mapping;
75771+
75772+ get_file(file);
75773+ if (tmp->vm_flags & VM_DENYWRITE)
75774+ atomic_dec(&inode->i_writecount);
75775+ mutex_lock(&mapping->i_mmap_mutex);
75776+ if (tmp->vm_flags & VM_SHARED)
75777+ mapping->i_mmap_writable++;
75778+ flush_dcache_mmap_lock(mapping);
75779+ /* insert tmp into the share list, just after mpnt */
75780+ if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75781+ vma_nonlinear_insert(tmp, &mapping->i_mmap_nonlinear);
75782+ else
75783+ vma_interval_tree_insert_after(tmp, mpnt, &mapping->i_mmap);
75784+ flush_dcache_mmap_unlock(mapping);
75785+ mutex_unlock(&mapping->i_mmap_mutex);
75786+ }
75787+
75788+ /*
75789+ * Clear hugetlb-related page reserves for children. This only
75790+ * affects MAP_PRIVATE mappings. Faults generated by the child
75791+ * are not guaranteed to succeed, even if read-only
75792+ */
75793+ if (is_vm_hugetlb_page(tmp))
75794+ reset_vma_resv_huge_pages(tmp);
75795+
75796+ return tmp;
75797+
75798+fail_nomem_anon_vma_fork:
75799+ mpol_put(pol);
75800+fail_nomem_policy:
75801+ kmem_cache_free(vm_area_cachep, tmp);
75802+fail_nomem:
75803+ vm_unacct_memory(charge);
75804+ return NULL;
75805+}
75806+
75807 static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75808 {
75809 struct vm_area_struct *mpnt, *tmp, *prev, **pprev;
75810 struct rb_node **rb_link, *rb_parent;
75811 int retval;
75812- unsigned long charge;
75813- struct mempolicy *pol;
75814
75815 uprobe_start_dup_mmap();
75816 down_write(&oldmm->mmap_sem);
75817@@ -365,8 +433,8 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75818 mm->locked_vm = 0;
75819 mm->mmap = NULL;
75820 mm->mmap_cache = NULL;
75821- mm->free_area_cache = oldmm->mmap_base;
75822- mm->cached_hole_size = ~0UL;
75823+ mm->free_area_cache = oldmm->free_area_cache;
75824+ mm->cached_hole_size = oldmm->cached_hole_size;
75825 mm->map_count = 0;
75826 cpumask_clear(mm_cpumask(mm));
75827 mm->mm_rb = RB_ROOT;
75828@@ -382,57 +450,15 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75829
75830 prev = NULL;
75831 for (mpnt = oldmm->mmap; mpnt; mpnt = mpnt->vm_next) {
75832- struct file *file;
75833-
75834 if (mpnt->vm_flags & VM_DONTCOPY) {
75835 vm_stat_account(mm, mpnt->vm_flags, mpnt->vm_file,
75836 -vma_pages(mpnt));
75837 continue;
75838 }
75839- charge = 0;
75840- if (mpnt->vm_flags & VM_ACCOUNT) {
75841- unsigned long len = vma_pages(mpnt);
75842-
75843- if (security_vm_enough_memory_mm(oldmm, len)) /* sic */
75844- goto fail_nomem;
75845- charge = len;
75846- }
75847- tmp = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
75848- if (!tmp)
75849- goto fail_nomem;
75850- *tmp = *mpnt;
75851- INIT_LIST_HEAD(&tmp->anon_vma_chain);
75852- pol = mpol_dup(vma_policy(mpnt));
75853- retval = PTR_ERR(pol);
75854- if (IS_ERR(pol))
75855- goto fail_nomem_policy;
75856- vma_set_policy(tmp, pol);
75857- tmp->vm_mm = mm;
75858- if (anon_vma_fork(tmp, mpnt))
75859- goto fail_nomem_anon_vma_fork;
75860- tmp->vm_flags &= ~VM_LOCKED;
75861- tmp->vm_next = tmp->vm_prev = NULL;
75862- file = tmp->vm_file;
75863- if (file) {
75864- struct inode *inode = file_inode(file);
75865- struct address_space *mapping = file->f_mapping;
75866-
75867- get_file(file);
75868- if (tmp->vm_flags & VM_DENYWRITE)
75869- atomic_dec(&inode->i_writecount);
75870- mutex_lock(&mapping->i_mmap_mutex);
75871- if (tmp->vm_flags & VM_SHARED)
75872- mapping->i_mmap_writable++;
75873- flush_dcache_mmap_lock(mapping);
75874- /* insert tmp into the share list, just after mpnt */
75875- if (unlikely(tmp->vm_flags & VM_NONLINEAR))
75876- vma_nonlinear_insert(tmp,
75877- &mapping->i_mmap_nonlinear);
75878- else
75879- vma_interval_tree_insert_after(tmp, mpnt,
75880- &mapping->i_mmap);
75881- flush_dcache_mmap_unlock(mapping);
75882- mutex_unlock(&mapping->i_mmap_mutex);
75883+ tmp = dup_vma(mm, oldmm, mpnt);
75884+ if (!tmp) {
75885+ retval = -ENOMEM;
75886+ goto out;
75887 }
75888
75889 /*
75890@@ -464,6 +490,31 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm)
75891 if (retval)
75892 goto out;
75893 }
75894+
75895+#ifdef CONFIG_PAX_SEGMEXEC
75896+ if (oldmm->pax_flags & MF_PAX_SEGMEXEC) {
75897+ struct vm_area_struct *mpnt_m;
75898+
75899+ for (mpnt = oldmm->mmap, mpnt_m = mm->mmap; mpnt; mpnt = mpnt->vm_next, mpnt_m = mpnt_m->vm_next) {
75900+ BUG_ON(!mpnt_m || mpnt_m->vm_mirror || mpnt->vm_mm != oldmm || mpnt_m->vm_mm != mm);
75901+
75902+ if (!mpnt->vm_mirror)
75903+ continue;
75904+
75905+ if (mpnt->vm_end <= SEGMEXEC_TASK_SIZE) {
75906+ BUG_ON(mpnt->vm_mirror->vm_mirror != mpnt);
75907+ mpnt->vm_mirror = mpnt_m;
75908+ } else {
75909+ BUG_ON(mpnt->vm_mirror->vm_mirror == mpnt || mpnt->vm_mirror->vm_mirror->vm_mm != mm);
75910+ mpnt_m->vm_mirror = mpnt->vm_mirror->vm_mirror;
75911+ mpnt_m->vm_mirror->vm_mirror = mpnt_m;
75912+ mpnt->vm_mirror->vm_mirror = mpnt;
75913+ }
75914+ }
75915+ BUG_ON(mpnt_m);
75916+ }
75917+#endif
75918+
75919 /* a new mm has just been created */
75920 arch_dup_mmap(oldmm, mm);
75921 retval = 0;
75922@@ -473,14 +524,6 @@ out:
75923 up_write(&oldmm->mmap_sem);
75924 uprobe_end_dup_mmap();
75925 return retval;
75926-fail_nomem_anon_vma_fork:
75927- mpol_put(pol);
75928-fail_nomem_policy:
75929- kmem_cache_free(vm_area_cachep, tmp);
75930-fail_nomem:
75931- retval = -ENOMEM;
75932- vm_unacct_memory(charge);
75933- goto out;
75934 }
75935
75936 static inline int mm_alloc_pgd(struct mm_struct *mm)
75937@@ -695,8 +738,8 @@ struct mm_struct *mm_access(struct task_struct *task, unsigned int mode)
75938 return ERR_PTR(err);
75939
75940 mm = get_task_mm(task);
75941- if (mm && mm != current->mm &&
75942- !ptrace_may_access(task, mode)) {
75943+ if (mm && ((mm != current->mm && !ptrace_may_access(task, mode)) ||
75944+ (mode == PTRACE_MODE_ATTACH && (gr_handle_proc_ptrace(task) || gr_acl_handle_procpidmem(task))))) {
75945 mmput(mm);
75946 mm = ERR_PTR(-EACCES);
75947 }
75948@@ -918,13 +961,20 @@ static int copy_fs(unsigned long clone_flags, struct task_struct *tsk)
75949 spin_unlock(&fs->lock);
75950 return -EAGAIN;
75951 }
75952- fs->users++;
75953+ atomic_inc(&fs->users);
75954 spin_unlock(&fs->lock);
75955 return 0;
75956 }
75957 tsk->fs = copy_fs_struct(fs);
75958 if (!tsk->fs)
75959 return -ENOMEM;
75960+ /* Carry through gr_chroot_dentry and is_chrooted instead
75961+ of recomputing it here. Already copied when the task struct
75962+ is duplicated. This allows pivot_root to not be treated as
75963+ a chroot
75964+ */
75965+ //gr_set_chroot_entries(tsk, &tsk->fs->root);
75966+
75967 return 0;
75968 }
75969
75970@@ -1197,10 +1247,13 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75971 DEBUG_LOCKS_WARN_ON(!p->softirqs_enabled);
75972 #endif
75973 retval = -EAGAIN;
75974+
75975+ gr_learn_resource(p, RLIMIT_NPROC, atomic_read(&p->real_cred->user->processes), 0);
75976+
75977 if (atomic_read(&p->real_cred->user->processes) >=
75978 task_rlimit(p, RLIMIT_NPROC)) {
75979- if (!capable(CAP_SYS_ADMIN) && !capable(CAP_SYS_RESOURCE) &&
75980- p->real_cred->user != INIT_USER)
75981+ if (p->real_cred->user != INIT_USER &&
75982+ !capable(CAP_SYS_RESOURCE) && !capable(CAP_SYS_ADMIN))
75983 goto bad_fork_free;
75984 }
75985 current->flags &= ~PF_NPROC_EXCEEDED;
75986@@ -1446,6 +1499,11 @@ static struct task_struct *copy_process(unsigned long clone_flags,
75987 goto bad_fork_free_pid;
75988 }
75989
75990+ /* synchronizes with gr_set_acls()
75991+ we need to call this past the point of no return for fork()
75992+ */
75993+ gr_copy_label(p);
75994+
75995 if (clone_flags & CLONE_THREAD) {
75996 current->signal->nr_threads++;
75997 atomic_inc(&current->signal->live);
75998@@ -1529,6 +1587,8 @@ bad_fork_cleanup_count:
75999 bad_fork_free:
76000 free_task(p);
76001 fork_out:
76002+ gr_log_forkfail(retval);
76003+
76004 return ERR_PTR(retval);
76005 }
76006
76007@@ -1579,6 +1639,23 @@ long do_fork(unsigned long clone_flags,
76008 return -EINVAL;
76009 }
76010
76011+#ifdef CONFIG_GRKERNSEC
76012+ if (clone_flags & CLONE_NEWUSER) {
76013+ /*
76014+ * This doesn't really inspire confidence:
76015+ * http://marc.info/?l=linux-kernel&m=135543612731939&w=2
76016+ * http://marc.info/?l=linux-kernel&m=135545831607095&w=2
76017+ * Increases kernel attack surface in areas developers
76018+ * previously cared little about ("low importance due
76019+ * to requiring "root" capability")
76020+ * To be removed when this code receives *proper* review
76021+ */
76022+ if (!capable(CAP_SYS_ADMIN) || !capable(CAP_SETUID) ||
76023+ !capable(CAP_SETGID))
76024+ return -EPERM;
76025+ }
76026+#endif
76027+
76028 /*
76029 * Determine whether and which event to report to ptracer. When
76030 * called from kernel_thread or CLONE_UNTRACED is explicitly
76031@@ -1613,6 +1690,8 @@ long do_fork(unsigned long clone_flags,
76032 if (clone_flags & CLONE_PARENT_SETTID)
76033 put_user(nr, parent_tidptr);
76034
76035+ gr_handle_brute_check();
76036+
76037 if (clone_flags & CLONE_VFORK) {
76038 p->vfork_done = &vfork;
76039 init_completion(&vfork);
76040@@ -1723,7 +1802,7 @@ void __init proc_caches_init(void)
76041 mm_cachep = kmem_cache_create("mm_struct",
76042 sizeof(struct mm_struct), ARCH_MIN_MMSTRUCT_ALIGN,
76043 SLAB_HWCACHE_ALIGN|SLAB_PANIC|SLAB_NOTRACK, NULL);
76044- vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC);
76045+ vm_area_cachep = KMEM_CACHE(vm_area_struct, SLAB_PANIC | SLAB_NO_SANITIZE);
76046 mmap_init();
76047 nsproxy_cache_init();
76048 }
76049@@ -1763,7 +1842,7 @@ static int unshare_fs(unsigned long unshare_flags, struct fs_struct **new_fsp)
76050 return 0;
76051
76052 /* don't need lock here; in the worst case we'll do useless copy */
76053- if (fs->users == 1)
76054+ if (atomic_read(&fs->users) == 1)
76055 return 0;
76056
76057 *new_fsp = copy_fs_struct(fs);
76058@@ -1875,7 +1954,8 @@ SYSCALL_DEFINE1(unshare, unsigned long, unshare_flags)
76059 fs = current->fs;
76060 spin_lock(&fs->lock);
76061 current->fs = new_fs;
76062- if (--fs->users)
76063+ gr_set_chroot_entries(current, &current->fs->root);
76064+ if (atomic_dec_return(&fs->users))
76065 new_fs = NULL;
76066 else
76067 new_fs = fs;
76068diff --git a/kernel/futex.c b/kernel/futex.c
76069index 49dacfb..5c6b450 100644
76070--- a/kernel/futex.c
76071+++ b/kernel/futex.c
76072@@ -54,6 +54,7 @@
76073 #include <linux/mount.h>
76074 #include <linux/pagemap.h>
76075 #include <linux/syscalls.h>
76076+#include <linux/ptrace.h>
76077 #include <linux/signal.h>
76078 #include <linux/export.h>
76079 #include <linux/magic.h>
76080@@ -242,6 +243,11 @@ get_futex_key(u32 __user *uaddr, int fshared, union futex_key *key, int rw)
76081 struct page *page, *page_head;
76082 int err, ro = 0;
76083
76084+#ifdef CONFIG_PAX_SEGMEXEC
76085+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && address >= SEGMEXEC_TASK_SIZE)
76086+ return -EFAULT;
76087+#endif
76088+
76089 /*
76090 * The futex address must be "naturally" aligned.
76091 */
76092@@ -2733,6 +2739,7 @@ static int __init futex_init(void)
76093 {
76094 u32 curval;
76095 int i;
76096+ mm_segment_t oldfs;
76097
76098 /*
76099 * This will fail and we want it. Some arch implementations do
76100@@ -2744,8 +2751,11 @@ static int __init futex_init(void)
76101 * implementation, the non-functional ones will return
76102 * -ENOSYS.
76103 */
76104+ oldfs = get_fs();
76105+ set_fs(USER_DS);
76106 if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT)
76107 futex_cmpxchg_enabled = 1;
76108+ set_fs(oldfs);
76109
76110 for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {
76111 plist_head_init(&futex_queues[i].chain);
76112diff --git a/kernel/futex_compat.c b/kernel/futex_compat.c
76113index f9f44fd..29885e4 100644
76114--- a/kernel/futex_compat.c
76115+++ b/kernel/futex_compat.c
76116@@ -32,7 +32,7 @@ fetch_robust_entry(compat_uptr_t *uentry, struct robust_list __user **entry,
76117 return 0;
76118 }
76119
76120-static void __user *futex_uaddr(struct robust_list __user *entry,
76121+static void __user __intentional_overflow(-1) *futex_uaddr(struct robust_list __user *entry,
76122 compat_long_t futex_offset)
76123 {
76124 compat_uptr_t base = ptr_to_compat(entry);
76125diff --git a/kernel/gcov/base.c b/kernel/gcov/base.c
76126index 9b22d03..6295b62 100644
76127--- a/kernel/gcov/base.c
76128+++ b/kernel/gcov/base.c
76129@@ -102,11 +102,6 @@ void gcov_enable_events(void)
76130 }
76131
76132 #ifdef CONFIG_MODULES
76133-static inline int within(void *addr, void *start, unsigned long size)
76134-{
76135- return ((addr >= start) && (addr < start + size));
76136-}
76137-
76138 /* Update list and generate events when modules are unloaded. */
76139 static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
76140 void *data)
76141@@ -121,7 +116,7 @@ static int gcov_module_notifier(struct notifier_block *nb, unsigned long event,
76142 prev = NULL;
76143 /* Remove entries located in module from linked list. */
76144 for (info = gcov_info_head; info; info = info->next) {
76145- if (within(info, mod->module_core, mod->core_size)) {
76146+ if (within_module_core_rw((unsigned long)info, mod)) {
76147 if (prev)
76148 prev->next = info->next;
76149 else
76150diff --git a/kernel/hrtimer.c b/kernel/hrtimer.c
76151index fd4b13b..09a5ccb 100644
76152--- a/kernel/hrtimer.c
76153+++ b/kernel/hrtimer.c
76154@@ -1430,7 +1430,7 @@ void hrtimer_peek_ahead_timers(void)
76155 local_irq_restore(flags);
76156 }
76157
76158-static void run_hrtimer_softirq(struct softirq_action *h)
76159+static void run_hrtimer_softirq(void)
76160 {
76161 struct hrtimer_cpu_base *cpu_base = &__get_cpu_var(hrtimer_bases);
76162
76163@@ -1772,7 +1772,7 @@ static int __cpuinit hrtimer_cpu_notify(struct notifier_block *self,
76164 return NOTIFY_OK;
76165 }
76166
76167-static struct notifier_block __cpuinitdata hrtimers_nb = {
76168+static struct notifier_block hrtimers_nb = {
76169 .notifier_call = hrtimer_cpu_notify,
76170 };
76171
76172diff --git a/kernel/irq_work.c b/kernel/irq_work.c
76173index 55fcce6..0e4cf34 100644
76174--- a/kernel/irq_work.c
76175+++ b/kernel/irq_work.c
76176@@ -189,12 +189,13 @@ static int irq_work_cpu_notify(struct notifier_block *self,
76177 return NOTIFY_OK;
76178 }
76179
76180-static struct notifier_block cpu_notify;
76181+static struct notifier_block cpu_notify = {
76182+ .notifier_call = irq_work_cpu_notify,
76183+ .priority = 0,
76184+};
76185
76186 static __init int irq_work_init_cpu_notifier(void)
76187 {
76188- cpu_notify.notifier_call = irq_work_cpu_notify;
76189- cpu_notify.priority = 0;
76190 register_cpu_notifier(&cpu_notify);
76191 return 0;
76192 }
76193diff --git a/kernel/jump_label.c b/kernel/jump_label.c
76194index 60f48fa..7f3a770 100644
76195--- a/kernel/jump_label.c
76196+++ b/kernel/jump_label.c
76197@@ -13,6 +13,7 @@
76198 #include <linux/sort.h>
76199 #include <linux/err.h>
76200 #include <linux/static_key.h>
76201+#include <linux/mm.h>
76202
76203 #ifdef HAVE_JUMP_LABEL
76204
76205@@ -50,7 +51,9 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
76206
76207 size = (((unsigned long)stop - (unsigned long)start)
76208 / sizeof(struct jump_entry));
76209+ pax_open_kernel();
76210 sort(start, size, sizeof(struct jump_entry), jump_label_cmp, NULL);
76211+ pax_close_kernel();
76212 }
76213
76214 static void jump_label_update(struct static_key *key, int enable);
76215@@ -357,10 +360,12 @@ static void jump_label_invalidate_module_init(struct module *mod)
76216 struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
76217 struct jump_entry *iter;
76218
76219+ pax_open_kernel();
76220 for (iter = iter_start; iter < iter_stop; iter++) {
76221 if (within_module_init(iter->code, mod))
76222 iter->code = 0;
76223 }
76224+ pax_close_kernel();
76225 }
76226
76227 static int
76228diff --git a/kernel/kallsyms.c b/kernel/kallsyms.c
76229index 3127ad5..159d880 100644
76230--- a/kernel/kallsyms.c
76231+++ b/kernel/kallsyms.c
76232@@ -11,6 +11,9 @@
76233 * Changed the compression method from stem compression to "table lookup"
76234 * compression (see scripts/kallsyms.c for a more complete description)
76235 */
76236+#ifdef CONFIG_GRKERNSEC_HIDESYM
76237+#define __INCLUDED_BY_HIDESYM 1
76238+#endif
76239 #include <linux/kallsyms.h>
76240 #include <linux/module.h>
76241 #include <linux/init.h>
76242@@ -53,12 +56,33 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
76243
76244 static inline int is_kernel_inittext(unsigned long addr)
76245 {
76246+ if (system_state != SYSTEM_BOOTING)
76247+ return 0;
76248+
76249 if (addr >= (unsigned long)_sinittext
76250 && addr <= (unsigned long)_einittext)
76251 return 1;
76252 return 0;
76253 }
76254
76255+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76256+#ifdef CONFIG_MODULES
76257+static inline int is_module_text(unsigned long addr)
76258+{
76259+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END)
76260+ return 1;
76261+
76262+ addr = ktla_ktva(addr);
76263+ return (unsigned long)MODULES_EXEC_VADDR <= addr && addr <= (unsigned long)MODULES_EXEC_END;
76264+}
76265+#else
76266+static inline int is_module_text(unsigned long addr)
76267+{
76268+ return 0;
76269+}
76270+#endif
76271+#endif
76272+
76273 static inline int is_kernel_text(unsigned long addr)
76274 {
76275 if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext) ||
76276@@ -69,13 +93,28 @@ static inline int is_kernel_text(unsigned long addr)
76277
76278 static inline int is_kernel(unsigned long addr)
76279 {
76280+
76281+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76282+ if (is_kernel_text(addr) || is_kernel_inittext(addr))
76283+ return 1;
76284+
76285+ if (ktla_ktva((unsigned long)_text) <= addr && addr < (unsigned long)_end)
76286+#else
76287 if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
76288+#endif
76289+
76290 return 1;
76291 return in_gate_area_no_mm(addr);
76292 }
76293
76294 static int is_ksym_addr(unsigned long addr)
76295 {
76296+
76297+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
76298+ if (is_module_text(addr))
76299+ return 0;
76300+#endif
76301+
76302 if (all_var)
76303 return is_kernel(addr);
76304
76305@@ -480,7 +519,6 @@ static unsigned long get_ksymbol_core(struct kallsym_iter *iter)
76306
76307 static void reset_iter(struct kallsym_iter *iter, loff_t new_pos)
76308 {
76309- iter->name[0] = '\0';
76310 iter->nameoff = get_symbol_offset(new_pos);
76311 iter->pos = new_pos;
76312 }
76313@@ -528,6 +566,11 @@ static int s_show(struct seq_file *m, void *p)
76314 {
76315 struct kallsym_iter *iter = m->private;
76316
76317+#ifdef CONFIG_GRKERNSEC_HIDESYM
76318+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID))
76319+ return 0;
76320+#endif
76321+
76322 /* Some debugging symbols have no name. Ignore them. */
76323 if (!iter->name[0])
76324 return 0;
76325@@ -541,6 +584,7 @@ static int s_show(struct seq_file *m, void *p)
76326 */
76327 type = iter->exported ? toupper(iter->type) :
76328 tolower(iter->type);
76329+
76330 seq_printf(m, "%pK %c %s\t[%s]\n", (void *)iter->value,
76331 type, iter->name, iter->module_name);
76332 } else
76333@@ -566,7 +610,7 @@ static int kallsyms_open(struct inode *inode, struct file *file)
76334 struct kallsym_iter *iter;
76335 int ret;
76336
76337- iter = kmalloc(sizeof(*iter), GFP_KERNEL);
76338+ iter = kzalloc(sizeof(*iter), GFP_KERNEL);
76339 if (!iter)
76340 return -ENOMEM;
76341 reset_iter(iter, 0);
76342diff --git a/kernel/kcmp.c b/kernel/kcmp.c
76343index e30ac0f..3528cac 100644
76344--- a/kernel/kcmp.c
76345+++ b/kernel/kcmp.c
76346@@ -99,6 +99,10 @@ SYSCALL_DEFINE5(kcmp, pid_t, pid1, pid_t, pid2, int, type,
76347 struct task_struct *task1, *task2;
76348 int ret;
76349
76350+#ifdef CONFIG_GRKERNSEC
76351+ return -ENOSYS;
76352+#endif
76353+
76354 rcu_read_lock();
76355
76356 /*
76357diff --git a/kernel/kexec.c b/kernel/kexec.c
76358index 59f7b55..4022f65 100644
76359--- a/kernel/kexec.c
76360+++ b/kernel/kexec.c
76361@@ -1041,7 +1041,8 @@ asmlinkage long compat_sys_kexec_load(unsigned long entry,
76362 unsigned long flags)
76363 {
76364 struct compat_kexec_segment in;
76365- struct kexec_segment out, __user *ksegments;
76366+ struct kexec_segment out;
76367+ struct kexec_segment __user *ksegments;
76368 unsigned long i, result;
76369
76370 /* Don't allow clients that don't understand the native
76371diff --git a/kernel/kmod.c b/kernel/kmod.c
76372index 8241906..d625f2c 100644
76373--- a/kernel/kmod.c
76374+++ b/kernel/kmod.c
76375@@ -75,7 +75,7 @@ static void free_modprobe_argv(struct subprocess_info *info)
76376 kfree(info->argv);
76377 }
76378
76379-static int call_modprobe(char *module_name, int wait)
76380+static int call_modprobe(char *module_name, char *module_param, int wait)
76381 {
76382 struct subprocess_info *info;
76383 static char *envp[] = {
76384@@ -85,7 +85,7 @@ static int call_modprobe(char *module_name, int wait)
76385 NULL
76386 };
76387
76388- char **argv = kmalloc(sizeof(char *[5]), GFP_KERNEL);
76389+ char **argv = kmalloc(sizeof(char *[6]), GFP_KERNEL);
76390 if (!argv)
76391 goto out;
76392
76393@@ -97,7 +97,8 @@ static int call_modprobe(char *module_name, int wait)
76394 argv[1] = "-q";
76395 argv[2] = "--";
76396 argv[3] = module_name; /* check free_modprobe_argv() */
76397- argv[4] = NULL;
76398+ argv[4] = module_param;
76399+ argv[5] = NULL;
76400
76401 info = call_usermodehelper_setup(modprobe_path, argv, envp, GFP_KERNEL,
76402 NULL, free_modprobe_argv, NULL);
76403@@ -129,9 +130,8 @@ out:
76404 * If module auto-loading support is disabled then this function
76405 * becomes a no-operation.
76406 */
76407-int __request_module(bool wait, const char *fmt, ...)
76408+static int ____request_module(bool wait, char *module_param, const char *fmt, va_list ap)
76409 {
76410- va_list args;
76411 char module_name[MODULE_NAME_LEN];
76412 unsigned int max_modprobes;
76413 int ret;
76414@@ -147,9 +147,7 @@ int __request_module(bool wait, const char *fmt, ...)
76415 */
76416 WARN_ON_ONCE(wait && current_is_async());
76417
76418- va_start(args, fmt);
76419- ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, args);
76420- va_end(args);
76421+ ret = vsnprintf(module_name, MODULE_NAME_LEN, fmt, ap);
76422 if (ret >= MODULE_NAME_LEN)
76423 return -ENAMETOOLONG;
76424
76425@@ -157,6 +155,20 @@ int __request_module(bool wait, const char *fmt, ...)
76426 if (ret)
76427 return ret;
76428
76429+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76430+ if (uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76431+ /* hack to workaround consolekit/udisks stupidity */
76432+ read_lock(&tasklist_lock);
76433+ if (!strcmp(current->comm, "mount") &&
76434+ current->real_parent && !strncmp(current->real_parent->comm, "udisk", 5)) {
76435+ read_unlock(&tasklist_lock);
76436+ printk(KERN_ALERT "grsec: denied attempt to auto-load fs module %.64s by udisks\n", module_name);
76437+ return -EPERM;
76438+ }
76439+ read_unlock(&tasklist_lock);
76440+ }
76441+#endif
76442+
76443 /* If modprobe needs a service that is in a module, we get a recursive
76444 * loop. Limit the number of running kmod threads to max_threads/2 or
76445 * MAX_KMOD_CONCURRENT, whichever is the smaller. A cleaner method
76446@@ -185,11 +197,52 @@ int __request_module(bool wait, const char *fmt, ...)
76447
76448 trace_module_request(module_name, wait, _RET_IP_);
76449
76450- ret = call_modprobe(module_name, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76451+ ret = call_modprobe(module_name, module_param, wait ? UMH_WAIT_PROC : UMH_WAIT_EXEC);
76452
76453 atomic_dec(&kmod_concurrent);
76454 return ret;
76455 }
76456+
76457+int ___request_module(bool wait, char *module_param, const char *fmt, ...)
76458+{
76459+ va_list args;
76460+ int ret;
76461+
76462+ va_start(args, fmt);
76463+ ret = ____request_module(wait, module_param, fmt, args);
76464+ va_end(args);
76465+
76466+ return ret;
76467+}
76468+
76469+int __request_module(bool wait, const char *fmt, ...)
76470+{
76471+ va_list args;
76472+ int ret;
76473+
76474+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76475+ if (!uid_eq(current_uid(), GLOBAL_ROOT_UID)) {
76476+ char module_param[MODULE_NAME_LEN];
76477+
76478+ memset(module_param, 0, sizeof(module_param));
76479+
76480+ snprintf(module_param, sizeof(module_param) - 1, "grsec_modharden_normal%u_", GR_GLOBAL_UID(current_uid()));
76481+
76482+ va_start(args, fmt);
76483+ ret = ____request_module(wait, module_param, fmt, args);
76484+ va_end(args);
76485+
76486+ return ret;
76487+ }
76488+#endif
76489+
76490+ va_start(args, fmt);
76491+ ret = ____request_module(wait, NULL, fmt, args);
76492+ va_end(args);
76493+
76494+ return ret;
76495+}
76496+
76497 EXPORT_SYMBOL(__request_module);
76498 #endif /* CONFIG_MODULES */
76499
76500@@ -300,7 +353,7 @@ static int wait_for_helper(void *data)
76501 *
76502 * Thus the __user pointer cast is valid here.
76503 */
76504- sys_wait4(pid, (int __user *)&ret, 0, NULL);
76505+ sys_wait4(pid, (int __force_user *)&ret, 0, NULL);
76506
76507 /*
76508 * If ret is 0, either ____call_usermodehelper failed and the
76509@@ -651,7 +704,7 @@ EXPORT_SYMBOL(call_usermodehelper);
76510 static int proc_cap_handler(struct ctl_table *table, int write,
76511 void __user *buffer, size_t *lenp, loff_t *ppos)
76512 {
76513- struct ctl_table t;
76514+ ctl_table_no_const t;
76515 unsigned long cap_array[_KERNEL_CAPABILITY_U32S];
76516 kernel_cap_t new_cap;
76517 int err, i;
76518diff --git a/kernel/kprobes.c b/kernel/kprobes.c
76519index bddf3b2..07b90dd 100644
76520--- a/kernel/kprobes.c
76521+++ b/kernel/kprobes.c
76522@@ -185,7 +185,7 @@ static kprobe_opcode_t __kprobes *__get_insn_slot(struct kprobe_insn_cache *c)
76523 * kernel image and loaded module images reside. This is required
76524 * so x86_64 can correctly handle the %rip-relative fixups.
76525 */
76526- kip->insns = module_alloc(PAGE_SIZE);
76527+ kip->insns = module_alloc_exec(PAGE_SIZE);
76528 if (!kip->insns) {
76529 kfree(kip);
76530 return NULL;
76531@@ -225,7 +225,7 @@ static int __kprobes collect_one_slot(struct kprobe_insn_page *kip, int idx)
76532 */
76533 if (!list_is_singular(&kip->list)) {
76534 list_del(&kip->list);
76535- module_free(NULL, kip->insns);
76536+ module_free_exec(NULL, kip->insns);
76537 kfree(kip);
76538 }
76539 return 1;
76540@@ -2083,7 +2083,7 @@ static int __init init_kprobes(void)
76541 {
76542 int i, err = 0;
76543 unsigned long offset = 0, size = 0;
76544- char *modname, namebuf[128];
76545+ char *modname, namebuf[KSYM_NAME_LEN];
76546 const char *symbol_name;
76547 void *addr;
76548 struct kprobe_blackpoint *kb;
76549@@ -2168,11 +2168,11 @@ static void __kprobes report_probe(struct seq_file *pi, struct kprobe *p,
76550 kprobe_type = "k";
76551
76552 if (sym)
76553- seq_printf(pi, "%p %s %s+0x%x %s ",
76554+ seq_printf(pi, "%pK %s %s+0x%x %s ",
76555 p->addr, kprobe_type, sym, offset,
76556 (modname ? modname : " "));
76557 else
76558- seq_printf(pi, "%p %s %p ",
76559+ seq_printf(pi, "%pK %s %pK ",
76560 p->addr, kprobe_type, p->addr);
76561
76562 if (!pp)
76563@@ -2209,7 +2209,7 @@ static int __kprobes show_kprobe_addr(struct seq_file *pi, void *v)
76564 const char *sym = NULL;
76565 unsigned int i = *(loff_t *) v;
76566 unsigned long offset = 0;
76567- char *modname, namebuf[128];
76568+ char *modname, namebuf[KSYM_NAME_LEN];
76569
76570 head = &kprobe_table[i];
76571 preempt_disable();
76572diff --git a/kernel/ksysfs.c b/kernel/ksysfs.c
76573index 6ada93c..dce7d5d 100644
76574--- a/kernel/ksysfs.c
76575+++ b/kernel/ksysfs.c
76576@@ -46,6 +46,8 @@ static ssize_t uevent_helper_store(struct kobject *kobj,
76577 {
76578 if (count+1 > UEVENT_HELPER_PATH_LEN)
76579 return -ENOENT;
76580+ if (!capable(CAP_SYS_ADMIN))
76581+ return -EPERM;
76582 memcpy(uevent_helper, buf, count);
76583 uevent_helper[count] = '\0';
76584 if (count && uevent_helper[count-1] == '\n')
76585@@ -172,7 +174,7 @@ static ssize_t notes_read(struct file *filp, struct kobject *kobj,
76586 return count;
76587 }
76588
76589-static struct bin_attribute notes_attr = {
76590+static bin_attribute_no_const notes_attr __read_only = {
76591 .attr = {
76592 .name = "notes",
76593 .mode = S_IRUGO,
76594diff --git a/kernel/lockdep.c b/kernel/lockdep.c
76595index 1f3186b..bb7dbc6 100644
76596--- a/kernel/lockdep.c
76597+++ b/kernel/lockdep.c
76598@@ -596,6 +596,10 @@ static int static_obj(void *obj)
76599 end = (unsigned long) &_end,
76600 addr = (unsigned long) obj;
76601
76602+#ifdef CONFIG_PAX_KERNEXEC
76603+ start = ktla_ktva(start);
76604+#endif
76605+
76606 /*
76607 * static variable?
76608 */
76609@@ -736,6 +740,7 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
76610 if (!static_obj(lock->key)) {
76611 debug_locks_off();
76612 printk("INFO: trying to register non-static key.\n");
76613+ printk("lock:%pS key:%pS.\n", lock, lock->key);
76614 printk("the code is fine but needs lockdep annotation.\n");
76615 printk("turning off the locking correctness validator.\n");
76616 dump_stack();
76617@@ -3080,7 +3085,7 @@ static int __lock_acquire(struct lockdep_map *lock, unsigned int subclass,
76618 if (!class)
76619 return 0;
76620 }
76621- atomic_inc((atomic_t *)&class->ops);
76622+ atomic_inc_unchecked((atomic_unchecked_t *)&class->ops);
76623 if (very_verbose(class)) {
76624 printk("\nacquire class [%p] %s", class->key, class->name);
76625 if (class->name_version > 1)
76626diff --git a/kernel/lockdep_proc.c b/kernel/lockdep_proc.c
76627index b2c71c5..7b88d63 100644
76628--- a/kernel/lockdep_proc.c
76629+++ b/kernel/lockdep_proc.c
76630@@ -65,7 +65,7 @@ static int l_show(struct seq_file *m, void *v)
76631 return 0;
76632 }
76633
76634- seq_printf(m, "%p", class->key);
76635+ seq_printf(m, "%pK", class->key);
76636 #ifdef CONFIG_DEBUG_LOCKDEP
76637 seq_printf(m, " OPS:%8ld", class->ops);
76638 #endif
76639@@ -83,7 +83,7 @@ static int l_show(struct seq_file *m, void *v)
76640
76641 list_for_each_entry(entry, &class->locks_after, entry) {
76642 if (entry->distance == 1) {
76643- seq_printf(m, " -> [%p] ", entry->class->key);
76644+ seq_printf(m, " -> [%pK] ", entry->class->key);
76645 print_name(m, entry->class);
76646 seq_puts(m, "\n");
76647 }
76648@@ -152,7 +152,7 @@ static int lc_show(struct seq_file *m, void *v)
76649 if (!class->key)
76650 continue;
76651
76652- seq_printf(m, "[%p] ", class->key);
76653+ seq_printf(m, "[%pK] ", class->key);
76654 print_name(m, class);
76655 seq_puts(m, "\n");
76656 }
76657@@ -495,7 +495,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76658 if (!i)
76659 seq_line(m, '-', 40-namelen, namelen);
76660
76661- snprintf(ip, sizeof(ip), "[<%p>]",
76662+ snprintf(ip, sizeof(ip), "[<%pK>]",
76663 (void *)class->contention_point[i]);
76664 seq_printf(m, "%40s %14lu %29s %pS\n",
76665 name, stats->contention_point[i],
76666@@ -510,7 +510,7 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
76667 if (!i)
76668 seq_line(m, '-', 40-namelen, namelen);
76669
76670- snprintf(ip, sizeof(ip), "[<%p>]",
76671+ snprintf(ip, sizeof(ip), "[<%pK>]",
76672 (void *)class->contending_point[i]);
76673 seq_printf(m, "%40s %14lu %29s %pS\n",
76674 name, stats->contending_point[i],
76675diff --git a/kernel/module.c b/kernel/module.c
76676index fa53db8..6f17200 100644
76677--- a/kernel/module.c
76678+++ b/kernel/module.c
76679@@ -61,6 +61,7 @@
76680 #include <linux/pfn.h>
76681 #include <linux/bsearch.h>
76682 #include <linux/fips.h>
76683+#include <linux/grsecurity.h>
76684 #include <uapi/linux/module.h>
76685 #include "module-internal.h"
76686
76687@@ -156,7 +157,8 @@ static BLOCKING_NOTIFIER_HEAD(module_notify_list);
76688
76689 /* Bounds of module allocation, for speeding __module_address.
76690 * Protected by module_mutex. */
76691-static unsigned long module_addr_min = -1UL, module_addr_max = 0;
76692+static unsigned long module_addr_min_rw = -1UL, module_addr_max_rw = 0;
76693+static unsigned long module_addr_min_rx = -1UL, module_addr_max_rx = 0;
76694
76695 int register_module_notifier(struct notifier_block * nb)
76696 {
76697@@ -323,7 +325,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76698 return true;
76699
76700 list_for_each_entry_rcu(mod, &modules, list) {
76701- struct symsearch arr[] = {
76702+ struct symsearch modarr[] = {
76703 { mod->syms, mod->syms + mod->num_syms, mod->crcs,
76704 NOT_GPL_ONLY, false },
76705 { mod->gpl_syms, mod->gpl_syms + mod->num_gpl_syms,
76706@@ -348,7 +350,7 @@ bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
76707 if (mod->state == MODULE_STATE_UNFORMED)
76708 continue;
76709
76710- if (each_symbol_in_section(arr, ARRAY_SIZE(arr), mod, fn, data))
76711+ if (each_symbol_in_section(modarr, ARRAY_SIZE(modarr), mod, fn, data))
76712 return true;
76713 }
76714 return false;
76715@@ -485,7 +487,7 @@ static inline void __percpu *mod_percpu(struct module *mod)
76716 static int percpu_modalloc(struct module *mod,
76717 unsigned long size, unsigned long align)
76718 {
76719- if (align > PAGE_SIZE) {
76720+ if (align-1 >= PAGE_SIZE) {
76721 printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
76722 mod->name, align, PAGE_SIZE);
76723 align = PAGE_SIZE;
76724@@ -1089,7 +1091,7 @@ struct module_attribute module_uevent =
76725 static ssize_t show_coresize(struct module_attribute *mattr,
76726 struct module_kobject *mk, char *buffer)
76727 {
76728- return sprintf(buffer, "%u\n", mk->mod->core_size);
76729+ return sprintf(buffer, "%u\n", mk->mod->core_size_rx + mk->mod->core_size_rw);
76730 }
76731
76732 static struct module_attribute modinfo_coresize =
76733@@ -1098,7 +1100,7 @@ static struct module_attribute modinfo_coresize =
76734 static ssize_t show_initsize(struct module_attribute *mattr,
76735 struct module_kobject *mk, char *buffer)
76736 {
76737- return sprintf(buffer, "%u\n", mk->mod->init_size);
76738+ return sprintf(buffer, "%u\n", mk->mod->init_size_rx + mk->mod->init_size_rw);
76739 }
76740
76741 static struct module_attribute modinfo_initsize =
76742@@ -1313,7 +1315,7 @@ resolve_symbol_wait(struct module *mod,
76743 */
76744 #ifdef CONFIG_SYSFS
76745
76746-#ifdef CONFIG_KALLSYMS
76747+#if defined(CONFIG_KALLSYMS) && !defined(CONFIG_GRKERNSEC_HIDESYM)
76748 static inline bool sect_empty(const Elf_Shdr *sect)
76749 {
76750 return !(sect->sh_flags & SHF_ALLOC) || sect->sh_size == 0;
76751@@ -1453,7 +1455,7 @@ static void add_notes_attrs(struct module *mod, const struct load_info *info)
76752 {
76753 unsigned int notes, loaded, i;
76754 struct module_notes_attrs *notes_attrs;
76755- struct bin_attribute *nattr;
76756+ bin_attribute_no_const *nattr;
76757
76758 /* failed to create section attributes, so can't create notes */
76759 if (!mod->sect_attrs)
76760@@ -1565,7 +1567,7 @@ static void del_usage_links(struct module *mod)
76761 static int module_add_modinfo_attrs(struct module *mod)
76762 {
76763 struct module_attribute *attr;
76764- struct module_attribute *temp_attr;
76765+ module_attribute_no_const *temp_attr;
76766 int error = 0;
76767 int i;
76768
76769@@ -1779,21 +1781,21 @@ static void set_section_ro_nx(void *base,
76770
76771 static void unset_module_core_ro_nx(struct module *mod)
76772 {
76773- set_page_attributes(mod->module_core + mod->core_text_size,
76774- mod->module_core + mod->core_size,
76775+ set_page_attributes(mod->module_core_rw,
76776+ mod->module_core_rw + mod->core_size_rw,
76777 set_memory_x);
76778- set_page_attributes(mod->module_core,
76779- mod->module_core + mod->core_ro_size,
76780+ set_page_attributes(mod->module_core_rx,
76781+ mod->module_core_rx + mod->core_size_rx,
76782 set_memory_rw);
76783 }
76784
76785 static void unset_module_init_ro_nx(struct module *mod)
76786 {
76787- set_page_attributes(mod->module_init + mod->init_text_size,
76788- mod->module_init + mod->init_size,
76789+ set_page_attributes(mod->module_init_rw,
76790+ mod->module_init_rw + mod->init_size_rw,
76791 set_memory_x);
76792- set_page_attributes(mod->module_init,
76793- mod->module_init + mod->init_ro_size,
76794+ set_page_attributes(mod->module_init_rx,
76795+ mod->module_init_rx + mod->init_size_rx,
76796 set_memory_rw);
76797 }
76798
76799@@ -1806,14 +1808,14 @@ void set_all_modules_text_rw(void)
76800 list_for_each_entry_rcu(mod, &modules, list) {
76801 if (mod->state == MODULE_STATE_UNFORMED)
76802 continue;
76803- if ((mod->module_core) && (mod->core_text_size)) {
76804- set_page_attributes(mod->module_core,
76805- mod->module_core + mod->core_text_size,
76806+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76807+ set_page_attributes(mod->module_core_rx,
76808+ mod->module_core_rx + mod->core_size_rx,
76809 set_memory_rw);
76810 }
76811- if ((mod->module_init) && (mod->init_text_size)) {
76812- set_page_attributes(mod->module_init,
76813- mod->module_init + mod->init_text_size,
76814+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76815+ set_page_attributes(mod->module_init_rx,
76816+ mod->module_init_rx + mod->init_size_rx,
76817 set_memory_rw);
76818 }
76819 }
76820@@ -1829,14 +1831,14 @@ void set_all_modules_text_ro(void)
76821 list_for_each_entry_rcu(mod, &modules, list) {
76822 if (mod->state == MODULE_STATE_UNFORMED)
76823 continue;
76824- if ((mod->module_core) && (mod->core_text_size)) {
76825- set_page_attributes(mod->module_core,
76826- mod->module_core + mod->core_text_size,
76827+ if ((mod->module_core_rx) && (mod->core_size_rx)) {
76828+ set_page_attributes(mod->module_core_rx,
76829+ mod->module_core_rx + mod->core_size_rx,
76830 set_memory_ro);
76831 }
76832- if ((mod->module_init) && (mod->init_text_size)) {
76833- set_page_attributes(mod->module_init,
76834- mod->module_init + mod->init_text_size,
76835+ if ((mod->module_init_rx) && (mod->init_size_rx)) {
76836+ set_page_attributes(mod->module_init_rx,
76837+ mod->module_init_rx + mod->init_size_rx,
76838 set_memory_ro);
76839 }
76840 }
76841@@ -1887,16 +1889,19 @@ static void free_module(struct module *mod)
76842
76843 /* This may be NULL, but that's OK */
76844 unset_module_init_ro_nx(mod);
76845- module_free(mod, mod->module_init);
76846+ module_free(mod, mod->module_init_rw);
76847+ module_free_exec(mod, mod->module_init_rx);
76848 kfree(mod->args);
76849 percpu_modfree(mod);
76850
76851 /* Free lock-classes: */
76852- lockdep_free_key_range(mod->module_core, mod->core_size);
76853+ lockdep_free_key_range(mod->module_core_rx, mod->core_size_rx);
76854+ lockdep_free_key_range(mod->module_core_rw, mod->core_size_rw);
76855
76856 /* Finally, free the core (containing the module structure) */
76857 unset_module_core_ro_nx(mod);
76858- module_free(mod, mod->module_core);
76859+ module_free_exec(mod, mod->module_core_rx);
76860+ module_free(mod, mod->module_core_rw);
76861
76862 #ifdef CONFIG_MPU
76863 update_protections(current->mm);
76864@@ -1966,9 +1971,31 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76865 int ret = 0;
76866 const struct kernel_symbol *ksym;
76867
76868+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76869+ int is_fs_load = 0;
76870+ int register_filesystem_found = 0;
76871+ char *p;
76872+
76873+ p = strstr(mod->args, "grsec_modharden_fs");
76874+ if (p) {
76875+ char *endptr = p + sizeof("grsec_modharden_fs") - 1;
76876+ /* copy \0 as well */
76877+ memmove(p, endptr, strlen(mod->args) - (unsigned int)(endptr - mod->args) + 1);
76878+ is_fs_load = 1;
76879+ }
76880+#endif
76881+
76882 for (i = 1; i < symsec->sh_size / sizeof(Elf_Sym); i++) {
76883 const char *name = info->strtab + sym[i].st_name;
76884
76885+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76886+ /* it's a real shame this will never get ripped and copied
76887+ upstream! ;(
76888+ */
76889+ if (is_fs_load && !strcmp(name, "register_filesystem"))
76890+ register_filesystem_found = 1;
76891+#endif
76892+
76893 switch (sym[i].st_shndx) {
76894 case SHN_COMMON:
76895 /* We compiled with -fno-common. These are not
76896@@ -1989,7 +2016,9 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76897 ksym = resolve_symbol_wait(mod, info, name);
76898 /* Ok if resolved. */
76899 if (ksym && !IS_ERR(ksym)) {
76900+ pax_open_kernel();
76901 sym[i].st_value = ksym->value;
76902+ pax_close_kernel();
76903 break;
76904 }
76905
76906@@ -2008,11 +2037,20 @@ static int simplify_symbols(struct module *mod, const struct load_info *info)
76907 secbase = (unsigned long)mod_percpu(mod);
76908 else
76909 secbase = info->sechdrs[sym[i].st_shndx].sh_addr;
76910+ pax_open_kernel();
76911 sym[i].st_value += secbase;
76912+ pax_close_kernel();
76913 break;
76914 }
76915 }
76916
76917+#ifdef CONFIG_GRKERNSEC_MODHARDEN
76918+ if (is_fs_load && !register_filesystem_found) {
76919+ printk(KERN_ALERT "grsec: Denied attempt to load non-fs module %.64s through mount\n", mod->name);
76920+ ret = -EPERM;
76921+ }
76922+#endif
76923+
76924 return ret;
76925 }
76926
76927@@ -2096,22 +2134,12 @@ static void layout_sections(struct module *mod, struct load_info *info)
76928 || s->sh_entsize != ~0UL
76929 || strstarts(sname, ".init"))
76930 continue;
76931- s->sh_entsize = get_offset(mod, &mod->core_size, s, i);
76932+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76933+ s->sh_entsize = get_offset(mod, &mod->core_size_rw, s, i);
76934+ else
76935+ s->sh_entsize = get_offset(mod, &mod->core_size_rx, s, i);
76936 pr_debug("\t%s\n", sname);
76937 }
76938- switch (m) {
76939- case 0: /* executable */
76940- mod->core_size = debug_align(mod->core_size);
76941- mod->core_text_size = mod->core_size;
76942- break;
76943- case 1: /* RO: text and ro-data */
76944- mod->core_size = debug_align(mod->core_size);
76945- mod->core_ro_size = mod->core_size;
76946- break;
76947- case 3: /* whole core */
76948- mod->core_size = debug_align(mod->core_size);
76949- break;
76950- }
76951 }
76952
76953 pr_debug("Init section allocation order:\n");
76954@@ -2125,23 +2153,13 @@ static void layout_sections(struct module *mod, struct load_info *info)
76955 || s->sh_entsize != ~0UL
76956 || !strstarts(sname, ".init"))
76957 continue;
76958- s->sh_entsize = (get_offset(mod, &mod->init_size, s, i)
76959- | INIT_OFFSET_MASK);
76960+ if ((s->sh_flags & SHF_WRITE) || !(s->sh_flags & SHF_ALLOC))
76961+ s->sh_entsize = get_offset(mod, &mod->init_size_rw, s, i);
76962+ else
76963+ s->sh_entsize = get_offset(mod, &mod->init_size_rx, s, i);
76964+ s->sh_entsize |= INIT_OFFSET_MASK;
76965 pr_debug("\t%s\n", sname);
76966 }
76967- switch (m) {
76968- case 0: /* executable */
76969- mod->init_size = debug_align(mod->init_size);
76970- mod->init_text_size = mod->init_size;
76971- break;
76972- case 1: /* RO: text and ro-data */
76973- mod->init_size = debug_align(mod->init_size);
76974- mod->init_ro_size = mod->init_size;
76975- break;
76976- case 3: /* whole init */
76977- mod->init_size = debug_align(mod->init_size);
76978- break;
76979- }
76980 }
76981 }
76982
76983@@ -2314,7 +2332,7 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76984
76985 /* Put symbol section at end of init part of module. */
76986 symsect->sh_flags |= SHF_ALLOC;
76987- symsect->sh_entsize = get_offset(mod, &mod->init_size, symsect,
76988+ symsect->sh_entsize = get_offset(mod, &mod->init_size_rx, symsect,
76989 info->index.sym) | INIT_OFFSET_MASK;
76990 pr_debug("\t%s\n", info->secstrings + symsect->sh_name);
76991
76992@@ -2331,13 +2349,13 @@ static void layout_symtab(struct module *mod, struct load_info *info)
76993 }
76994
76995 /* Append room for core symbols at end of core part. */
76996- info->symoffs = ALIGN(mod->core_size, symsect->sh_addralign ?: 1);
76997- info->stroffs = mod->core_size = info->symoffs + ndst * sizeof(Elf_Sym);
76998- mod->core_size += strtab_size;
76999+ info->symoffs = ALIGN(mod->core_size_rx, symsect->sh_addralign ?: 1);
77000+ info->stroffs = mod->core_size_rx = info->symoffs + ndst * sizeof(Elf_Sym);
77001+ mod->core_size_rx += strtab_size;
77002
77003 /* Put string table section at end of init part of module. */
77004 strsect->sh_flags |= SHF_ALLOC;
77005- strsect->sh_entsize = get_offset(mod, &mod->init_size, strsect,
77006+ strsect->sh_entsize = get_offset(mod, &mod->init_size_rx, strsect,
77007 info->index.str) | INIT_OFFSET_MASK;
77008 pr_debug("\t%s\n", info->secstrings + strsect->sh_name);
77009 }
77010@@ -2355,12 +2373,14 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
77011 /* Make sure we get permanent strtab: don't use info->strtab. */
77012 mod->strtab = (void *)info->sechdrs[info->index.str].sh_addr;
77013
77014+ pax_open_kernel();
77015+
77016 /* Set types up while we still have access to sections. */
77017 for (i = 0; i < mod->num_symtab; i++)
77018 mod->symtab[i].st_info = elf_type(&mod->symtab[i], info);
77019
77020- mod->core_symtab = dst = mod->module_core + info->symoffs;
77021- mod->core_strtab = s = mod->module_core + info->stroffs;
77022+ mod->core_symtab = dst = mod->module_core_rx + info->symoffs;
77023+ mod->core_strtab = s = mod->module_core_rx + info->stroffs;
77024 src = mod->symtab;
77025 for (ndst = i = 0; i < mod->num_symtab; i++) {
77026 if (i == 0 ||
77027@@ -2372,6 +2392,8 @@ static void add_kallsyms(struct module *mod, const struct load_info *info)
77028 }
77029 }
77030 mod->core_num_syms = ndst;
77031+
77032+ pax_close_kernel();
77033 }
77034 #else
77035 static inline void layout_symtab(struct module *mod, struct load_info *info)
77036@@ -2405,17 +2427,33 @@ void * __weak module_alloc(unsigned long size)
77037 return vmalloc_exec(size);
77038 }
77039
77040-static void *module_alloc_update_bounds(unsigned long size)
77041+static void *module_alloc_update_bounds_rw(unsigned long size)
77042 {
77043 void *ret = module_alloc(size);
77044
77045 if (ret) {
77046 mutex_lock(&module_mutex);
77047 /* Update module bounds. */
77048- if ((unsigned long)ret < module_addr_min)
77049- module_addr_min = (unsigned long)ret;
77050- if ((unsigned long)ret + size > module_addr_max)
77051- module_addr_max = (unsigned long)ret + size;
77052+ if ((unsigned long)ret < module_addr_min_rw)
77053+ module_addr_min_rw = (unsigned long)ret;
77054+ if ((unsigned long)ret + size > module_addr_max_rw)
77055+ module_addr_max_rw = (unsigned long)ret + size;
77056+ mutex_unlock(&module_mutex);
77057+ }
77058+ return ret;
77059+}
77060+
77061+static void *module_alloc_update_bounds_rx(unsigned long size)
77062+{
77063+ void *ret = module_alloc_exec(size);
77064+
77065+ if (ret) {
77066+ mutex_lock(&module_mutex);
77067+ /* Update module bounds. */
77068+ if ((unsigned long)ret < module_addr_min_rx)
77069+ module_addr_min_rx = (unsigned long)ret;
77070+ if ((unsigned long)ret + size > module_addr_max_rx)
77071+ module_addr_max_rx = (unsigned long)ret + size;
77072 mutex_unlock(&module_mutex);
77073 }
77074 return ret;
77075@@ -2691,8 +2729,14 @@ static struct module *setup_load_info(struct load_info *info, int flags)
77076 static int check_modinfo(struct module *mod, struct load_info *info, int flags)
77077 {
77078 const char *modmagic = get_modinfo(info, "vermagic");
77079+ const char *license = get_modinfo(info, "license");
77080 int err;
77081
77082+#ifdef CONFIG_PAX_KERNEXEC_PLUGIN_METHOD_OR
77083+ if (!license || !license_is_gpl_compatible(license))
77084+ return -ENOEXEC;
77085+#endif
77086+
77087 if (flags & MODULE_INIT_IGNORE_VERMAGIC)
77088 modmagic = NULL;
77089
77090@@ -2718,7 +2762,7 @@ static int check_modinfo(struct module *mod, struct load_info *info, int flags)
77091 }
77092
77093 /* Set up license info based on the info section */
77094- set_license(mod, get_modinfo(info, "license"));
77095+ set_license(mod, license);
77096
77097 return 0;
77098 }
77099@@ -2799,7 +2843,7 @@ static int move_module(struct module *mod, struct load_info *info)
77100 void *ptr;
77101
77102 /* Do the allocs. */
77103- ptr = module_alloc_update_bounds(mod->core_size);
77104+ ptr = module_alloc_update_bounds_rw(mod->core_size_rw);
77105 /*
77106 * The pointer to this block is stored in the module structure
77107 * which is inside the block. Just mark it as not being a
77108@@ -2809,11 +2853,11 @@ static int move_module(struct module *mod, struct load_info *info)
77109 if (!ptr)
77110 return -ENOMEM;
77111
77112- memset(ptr, 0, mod->core_size);
77113- mod->module_core = ptr;
77114+ memset(ptr, 0, mod->core_size_rw);
77115+ mod->module_core_rw = ptr;
77116
77117- if (mod->init_size) {
77118- ptr = module_alloc_update_bounds(mod->init_size);
77119+ if (mod->init_size_rw) {
77120+ ptr = module_alloc_update_bounds_rw(mod->init_size_rw);
77121 /*
77122 * The pointer to this block is stored in the module structure
77123 * which is inside the block. This block doesn't need to be
77124@@ -2822,13 +2866,45 @@ static int move_module(struct module *mod, struct load_info *info)
77125 */
77126 kmemleak_ignore(ptr);
77127 if (!ptr) {
77128- module_free(mod, mod->module_core);
77129+ module_free(mod, mod->module_core_rw);
77130 return -ENOMEM;
77131 }
77132- memset(ptr, 0, mod->init_size);
77133- mod->module_init = ptr;
77134+ memset(ptr, 0, mod->init_size_rw);
77135+ mod->module_init_rw = ptr;
77136 } else
77137- mod->module_init = NULL;
77138+ mod->module_init_rw = NULL;
77139+
77140+ ptr = module_alloc_update_bounds_rx(mod->core_size_rx);
77141+ kmemleak_not_leak(ptr);
77142+ if (!ptr) {
77143+ if (mod->module_init_rw)
77144+ module_free(mod, mod->module_init_rw);
77145+ module_free(mod, mod->module_core_rw);
77146+ return -ENOMEM;
77147+ }
77148+
77149+ pax_open_kernel();
77150+ memset(ptr, 0, mod->core_size_rx);
77151+ pax_close_kernel();
77152+ mod->module_core_rx = ptr;
77153+
77154+ if (mod->init_size_rx) {
77155+ ptr = module_alloc_update_bounds_rx(mod->init_size_rx);
77156+ kmemleak_ignore(ptr);
77157+ if (!ptr && mod->init_size_rx) {
77158+ module_free_exec(mod, mod->module_core_rx);
77159+ if (mod->module_init_rw)
77160+ module_free(mod, mod->module_init_rw);
77161+ module_free(mod, mod->module_core_rw);
77162+ return -ENOMEM;
77163+ }
77164+
77165+ pax_open_kernel();
77166+ memset(ptr, 0, mod->init_size_rx);
77167+ pax_close_kernel();
77168+ mod->module_init_rx = ptr;
77169+ } else
77170+ mod->module_init_rx = NULL;
77171
77172 /* Transfer each section which specifies SHF_ALLOC */
77173 pr_debug("final section addresses:\n");
77174@@ -2839,16 +2915,45 @@ static int move_module(struct module *mod, struct load_info *info)
77175 if (!(shdr->sh_flags & SHF_ALLOC))
77176 continue;
77177
77178- if (shdr->sh_entsize & INIT_OFFSET_MASK)
77179- dest = mod->module_init
77180- + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77181- else
77182- dest = mod->module_core + shdr->sh_entsize;
77183+ if (shdr->sh_entsize & INIT_OFFSET_MASK) {
77184+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
77185+ dest = mod->module_init_rw
77186+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77187+ else
77188+ dest = mod->module_init_rx
77189+ + (shdr->sh_entsize & ~INIT_OFFSET_MASK);
77190+ } else {
77191+ if ((shdr->sh_flags & SHF_WRITE) || !(shdr->sh_flags & SHF_ALLOC))
77192+ dest = mod->module_core_rw + shdr->sh_entsize;
77193+ else
77194+ dest = mod->module_core_rx + shdr->sh_entsize;
77195+ }
77196+
77197+ if (shdr->sh_type != SHT_NOBITS) {
77198+
77199+#ifdef CONFIG_PAX_KERNEXEC
77200+#ifdef CONFIG_X86_64
77201+ if ((shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_EXECINSTR))
77202+ set_memory_x((unsigned long)dest, (shdr->sh_size + PAGE_SIZE) >> PAGE_SHIFT);
77203+#endif
77204+ if (!(shdr->sh_flags & SHF_WRITE) && (shdr->sh_flags & SHF_ALLOC)) {
77205+ pax_open_kernel();
77206+ memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
77207+ pax_close_kernel();
77208+ } else
77209+#endif
77210
77211- if (shdr->sh_type != SHT_NOBITS)
77212 memcpy(dest, (void *)shdr->sh_addr, shdr->sh_size);
77213+ }
77214 /* Update sh_addr to point to copy in image. */
77215- shdr->sh_addr = (unsigned long)dest;
77216+
77217+#ifdef CONFIG_PAX_KERNEXEC
77218+ if (shdr->sh_flags & SHF_EXECINSTR)
77219+ shdr->sh_addr = ktva_ktla((unsigned long)dest);
77220+ else
77221+#endif
77222+
77223+ shdr->sh_addr = (unsigned long)dest;
77224 pr_debug("\t0x%lx %s\n",
77225 (long)shdr->sh_addr, info->secstrings + shdr->sh_name);
77226 }
77227@@ -2905,12 +3010,12 @@ static void flush_module_icache(const struct module *mod)
77228 * Do it before processing of module parameters, so the module
77229 * can provide parameter accessor functions of its own.
77230 */
77231- if (mod->module_init)
77232- flush_icache_range((unsigned long)mod->module_init,
77233- (unsigned long)mod->module_init
77234- + mod->init_size);
77235- flush_icache_range((unsigned long)mod->module_core,
77236- (unsigned long)mod->module_core + mod->core_size);
77237+ if (mod->module_init_rx)
77238+ flush_icache_range((unsigned long)mod->module_init_rx,
77239+ (unsigned long)mod->module_init_rx
77240+ + mod->init_size_rx);
77241+ flush_icache_range((unsigned long)mod->module_core_rx,
77242+ (unsigned long)mod->module_core_rx + mod->core_size_rx);
77243
77244 set_fs(old_fs);
77245 }
77246@@ -2977,8 +3082,10 @@ static int alloc_module_percpu(struct module *mod, struct load_info *info)
77247 static void module_deallocate(struct module *mod, struct load_info *info)
77248 {
77249 percpu_modfree(mod);
77250- module_free(mod, mod->module_init);
77251- module_free(mod, mod->module_core);
77252+ module_free_exec(mod, mod->module_init_rx);
77253+ module_free_exec(mod, mod->module_core_rx);
77254+ module_free(mod, mod->module_init_rw);
77255+ module_free(mod, mod->module_core_rw);
77256 }
77257
77258 int __weak module_finalize(const Elf_Ehdr *hdr,
77259@@ -2991,7 +3098,9 @@ int __weak module_finalize(const Elf_Ehdr *hdr,
77260 static int post_relocation(struct module *mod, const struct load_info *info)
77261 {
77262 /* Sort exception table now relocations are done. */
77263+ pax_open_kernel();
77264 sort_extable(mod->extable, mod->extable + mod->num_exentries);
77265+ pax_close_kernel();
77266
77267 /* Copy relocated percpu area over. */
77268 percpu_modcopy(mod, (void *)info->sechdrs[info->index.pcpu].sh_addr,
77269@@ -3045,16 +3154,16 @@ static int do_init_module(struct module *mod)
77270 MODULE_STATE_COMING, mod);
77271
77272 /* Set RO and NX regions for core */
77273- set_section_ro_nx(mod->module_core,
77274- mod->core_text_size,
77275- mod->core_ro_size,
77276- mod->core_size);
77277+ set_section_ro_nx(mod->module_core_rx,
77278+ mod->core_size_rx,
77279+ mod->core_size_rx,
77280+ mod->core_size_rx);
77281
77282 /* Set RO and NX regions for init */
77283- set_section_ro_nx(mod->module_init,
77284- mod->init_text_size,
77285- mod->init_ro_size,
77286- mod->init_size);
77287+ set_section_ro_nx(mod->module_init_rx,
77288+ mod->init_size_rx,
77289+ mod->init_size_rx,
77290+ mod->init_size_rx);
77291
77292 do_mod_ctors(mod);
77293 /* Start the module */
77294@@ -3116,11 +3225,12 @@ static int do_init_module(struct module *mod)
77295 mod->strtab = mod->core_strtab;
77296 #endif
77297 unset_module_init_ro_nx(mod);
77298- module_free(mod, mod->module_init);
77299- mod->module_init = NULL;
77300- mod->init_size = 0;
77301- mod->init_ro_size = 0;
77302- mod->init_text_size = 0;
77303+ module_free(mod, mod->module_init_rw);
77304+ module_free_exec(mod, mod->module_init_rx);
77305+ mod->module_init_rw = NULL;
77306+ mod->module_init_rx = NULL;
77307+ mod->init_size_rw = 0;
77308+ mod->init_size_rx = 0;
77309 mutex_unlock(&module_mutex);
77310 wake_up_all(&module_wq);
77311
77312@@ -3252,9 +3362,38 @@ static int load_module(struct load_info *info, const char __user *uargs,
77313 if (err)
77314 goto free_unload;
77315
77316+ /* Now copy in args */
77317+ mod->args = strndup_user(uargs, ~0UL >> 1);
77318+ if (IS_ERR(mod->args)) {
77319+ err = PTR_ERR(mod->args);
77320+ goto free_unload;
77321+ }
77322+
77323 /* Set up MODINFO_ATTR fields */
77324 setup_modinfo(mod, info);
77325
77326+#ifdef CONFIG_GRKERNSEC_MODHARDEN
77327+ {
77328+ char *p, *p2;
77329+
77330+ if (strstr(mod->args, "grsec_modharden_netdev")) {
77331+ printk(KERN_ALERT "grsec: denied auto-loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%.64s instead.", mod->name);
77332+ err = -EPERM;
77333+ goto free_modinfo;
77334+ } else if ((p = strstr(mod->args, "grsec_modharden_normal"))) {
77335+ p += sizeof("grsec_modharden_normal") - 1;
77336+ p2 = strstr(p, "_");
77337+ if (p2) {
77338+ *p2 = '\0';
77339+ printk(KERN_ALERT "grsec: denied kernel module auto-load of %.64s by uid %.9s\n", mod->name, p);
77340+ *p2 = '_';
77341+ }
77342+ err = -EPERM;
77343+ goto free_modinfo;
77344+ }
77345+ }
77346+#endif
77347+
77348 /* Fix up syms, so that st_value is a pointer to location. */
77349 err = simplify_symbols(mod, info);
77350 if (err < 0)
77351@@ -3270,13 +3409,6 @@ static int load_module(struct load_info *info, const char __user *uargs,
77352
77353 flush_module_icache(mod);
77354
77355- /* Now copy in args */
77356- mod->args = strndup_user(uargs, ~0UL >> 1);
77357- if (IS_ERR(mod->args)) {
77358- err = PTR_ERR(mod->args);
77359- goto free_arch_cleanup;
77360- }
77361-
77362 dynamic_debug_setup(info->debug, info->num_debug);
77363
77364 /* Finally it's fully formed, ready to start executing. */
77365@@ -3311,11 +3443,10 @@ static int load_module(struct load_info *info, const char __user *uargs,
77366 ddebug_cleanup:
77367 dynamic_debug_remove(info->debug);
77368 synchronize_sched();
77369- kfree(mod->args);
77370- free_arch_cleanup:
77371 module_arch_cleanup(mod);
77372 free_modinfo:
77373 free_modinfo(mod);
77374+ kfree(mod->args);
77375 free_unload:
77376 module_unload_free(mod);
77377 unlink_mod:
77378@@ -3398,10 +3529,16 @@ static const char *get_ksymbol(struct module *mod,
77379 unsigned long nextval;
77380
77381 /* At worse, next value is at end of module */
77382- if (within_module_init(addr, mod))
77383- nextval = (unsigned long)mod->module_init+mod->init_text_size;
77384+ if (within_module_init_rx(addr, mod))
77385+ nextval = (unsigned long)mod->module_init_rx+mod->init_size_rx;
77386+ else if (within_module_init_rw(addr, mod))
77387+ nextval = (unsigned long)mod->module_init_rw+mod->init_size_rw;
77388+ else if (within_module_core_rx(addr, mod))
77389+ nextval = (unsigned long)mod->module_core_rx+mod->core_size_rx;
77390+ else if (within_module_core_rw(addr, mod))
77391+ nextval = (unsigned long)mod->module_core_rw+mod->core_size_rw;
77392 else
77393- nextval = (unsigned long)mod->module_core+mod->core_text_size;
77394+ return NULL;
77395
77396 /* Scan for closest preceding symbol, and next symbol. (ELF
77397 starts real symbols at 1). */
77398@@ -3654,7 +3791,7 @@ static int m_show(struct seq_file *m, void *p)
77399 return 0;
77400
77401 seq_printf(m, "%s %u",
77402- mod->name, mod->init_size + mod->core_size);
77403+ mod->name, mod->init_size_rx + mod->init_size_rw + mod->core_size_rx + mod->core_size_rw);
77404 print_unload_info(m, mod);
77405
77406 /* Informative for users. */
77407@@ -3663,7 +3800,7 @@ static int m_show(struct seq_file *m, void *p)
77408 mod->state == MODULE_STATE_COMING ? "Loading":
77409 "Live");
77410 /* Used by oprofile and other similar tools. */
77411- seq_printf(m, " 0x%pK", mod->module_core);
77412+ seq_printf(m, " 0x%pK 0x%pK", mod->module_core_rx, mod->module_core_rw);
77413
77414 /* Taints info */
77415 if (mod->taints)
77416@@ -3699,7 +3836,17 @@ static const struct file_operations proc_modules_operations = {
77417
77418 static int __init proc_modules_init(void)
77419 {
77420+#ifndef CONFIG_GRKERNSEC_HIDESYM
77421+#ifdef CONFIG_GRKERNSEC_PROC_USER
77422+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77423+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
77424+ proc_create("modules", S_IRUSR | S_IRGRP, NULL, &proc_modules_operations);
77425+#else
77426 proc_create("modules", 0, NULL, &proc_modules_operations);
77427+#endif
77428+#else
77429+ proc_create("modules", S_IRUSR, NULL, &proc_modules_operations);
77430+#endif
77431 return 0;
77432 }
77433 module_init(proc_modules_init);
77434@@ -3760,14 +3907,14 @@ struct module *__module_address(unsigned long addr)
77435 {
77436 struct module *mod;
77437
77438- if (addr < module_addr_min || addr > module_addr_max)
77439+ if ((addr < module_addr_min_rx || addr > module_addr_max_rx) &&
77440+ (addr < module_addr_min_rw || addr > module_addr_max_rw))
77441 return NULL;
77442
77443 list_for_each_entry_rcu(mod, &modules, list) {
77444 if (mod->state == MODULE_STATE_UNFORMED)
77445 continue;
77446- if (within_module_core(addr, mod)
77447- || within_module_init(addr, mod))
77448+ if (within_module_init(addr, mod) || within_module_core(addr, mod))
77449 return mod;
77450 }
77451 return NULL;
77452@@ -3802,11 +3949,20 @@ bool is_module_text_address(unsigned long addr)
77453 */
77454 struct module *__module_text_address(unsigned long addr)
77455 {
77456- struct module *mod = __module_address(addr);
77457+ struct module *mod;
77458+
77459+#ifdef CONFIG_X86_32
77460+ addr = ktla_ktva(addr);
77461+#endif
77462+
77463+ if (addr < module_addr_min_rx || addr > module_addr_max_rx)
77464+ return NULL;
77465+
77466+ mod = __module_address(addr);
77467+
77468 if (mod) {
77469 /* Make sure it's within the text section. */
77470- if (!within(addr, mod->module_init, mod->init_text_size)
77471- && !within(addr, mod->module_core, mod->core_text_size))
77472+ if (!within_module_init_rx(addr, mod) && !within_module_core_rx(addr, mod))
77473 mod = NULL;
77474 }
77475 return mod;
77476diff --git a/kernel/mutex-debug.c b/kernel/mutex-debug.c
77477index 7e3443f..b2a1e6b 100644
77478--- a/kernel/mutex-debug.c
77479+++ b/kernel/mutex-debug.c
77480@@ -49,21 +49,21 @@ void debug_mutex_free_waiter(struct mutex_waiter *waiter)
77481 }
77482
77483 void debug_mutex_add_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77484- struct thread_info *ti)
77485+ struct task_struct *task)
77486 {
77487 SMP_DEBUG_LOCKS_WARN_ON(!spin_is_locked(&lock->wait_lock));
77488
77489 /* Mark the current thread as blocked on the lock: */
77490- ti->task->blocked_on = waiter;
77491+ task->blocked_on = waiter;
77492 }
77493
77494 void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77495- struct thread_info *ti)
77496+ struct task_struct *task)
77497 {
77498 DEBUG_LOCKS_WARN_ON(list_empty(&waiter->list));
77499- DEBUG_LOCKS_WARN_ON(waiter->task != ti->task);
77500- DEBUG_LOCKS_WARN_ON(ti->task->blocked_on != waiter);
77501- ti->task->blocked_on = NULL;
77502+ DEBUG_LOCKS_WARN_ON(waiter->task != task);
77503+ DEBUG_LOCKS_WARN_ON(task->blocked_on != waiter);
77504+ task->blocked_on = NULL;
77505
77506 list_del_init(&waiter->list);
77507 waiter->task = NULL;
77508diff --git a/kernel/mutex-debug.h b/kernel/mutex-debug.h
77509index 0799fd3..d06ae3b 100644
77510--- a/kernel/mutex-debug.h
77511+++ b/kernel/mutex-debug.h
77512@@ -20,9 +20,9 @@ extern void debug_mutex_wake_waiter(struct mutex *lock,
77513 extern void debug_mutex_free_waiter(struct mutex_waiter *waiter);
77514 extern void debug_mutex_add_waiter(struct mutex *lock,
77515 struct mutex_waiter *waiter,
77516- struct thread_info *ti);
77517+ struct task_struct *task);
77518 extern void mutex_remove_waiter(struct mutex *lock, struct mutex_waiter *waiter,
77519- struct thread_info *ti);
77520+ struct task_struct *task);
77521 extern void debug_mutex_unlock(struct mutex *lock);
77522 extern void debug_mutex_init(struct mutex *lock, const char *name,
77523 struct lock_class_key *key);
77524diff --git a/kernel/mutex.c b/kernel/mutex.c
77525index ad53a66..f1bf8bc 100644
77526--- a/kernel/mutex.c
77527+++ b/kernel/mutex.c
77528@@ -134,7 +134,7 @@ void mspin_lock(struct mspin_node **lock, struct mspin_node *node)
77529 node->locked = 1;
77530 return;
77531 }
77532- ACCESS_ONCE(prev->next) = node;
77533+ ACCESS_ONCE_RW(prev->next) = node;
77534 smp_wmb();
77535 /* Wait until the lock holder passes the lock down */
77536 while (!ACCESS_ONCE(node->locked))
77537@@ -155,7 +155,7 @@ static void mspin_unlock(struct mspin_node **lock, struct mspin_node *node)
77538 while (!(next = ACCESS_ONCE(node->next)))
77539 arch_mutex_cpu_relax();
77540 }
77541- ACCESS_ONCE(next->locked) = 1;
77542+ ACCESS_ONCE_RW(next->locked) = 1;
77543 smp_wmb();
77544 }
77545
77546@@ -341,7 +341,7 @@ slowpath:
77547 spin_lock_mutex(&lock->wait_lock, flags);
77548
77549 debug_mutex_lock_common(lock, &waiter);
77550- debug_mutex_add_waiter(lock, &waiter, task_thread_info(task));
77551+ debug_mutex_add_waiter(lock, &waiter, task);
77552
77553 /* add waiting tasks to the end of the waitqueue (FIFO): */
77554 list_add_tail(&waiter.list, &lock->wait_list);
77555@@ -371,8 +371,7 @@ slowpath:
77556 * TASK_UNINTERRUPTIBLE case.)
77557 */
77558 if (unlikely(signal_pending_state(state, task))) {
77559- mutex_remove_waiter(lock, &waiter,
77560- task_thread_info(task));
77561+ mutex_remove_waiter(lock, &waiter, task);
77562 mutex_release(&lock->dep_map, 1, ip);
77563 spin_unlock_mutex(&lock->wait_lock, flags);
77564
77565@@ -391,7 +390,7 @@ slowpath:
77566 done:
77567 lock_acquired(&lock->dep_map, ip);
77568 /* got the lock - rejoice! */
77569- mutex_remove_waiter(lock, &waiter, current_thread_info());
77570+ mutex_remove_waiter(lock, &waiter, task);
77571 mutex_set_owner(lock);
77572
77573 /* set it to 0 if there are no waiters left: */
77574diff --git a/kernel/notifier.c b/kernel/notifier.c
77575index 2d5cc4c..d9ea600 100644
77576--- a/kernel/notifier.c
77577+++ b/kernel/notifier.c
77578@@ -5,6 +5,7 @@
77579 #include <linux/rcupdate.h>
77580 #include <linux/vmalloc.h>
77581 #include <linux/reboot.h>
77582+#include <linux/mm.h>
77583
77584 /*
77585 * Notifier list for kernel code which wants to be called
77586@@ -24,10 +25,12 @@ static int notifier_chain_register(struct notifier_block **nl,
77587 while ((*nl) != NULL) {
77588 if (n->priority > (*nl)->priority)
77589 break;
77590- nl = &((*nl)->next);
77591+ nl = (struct notifier_block **)&((*nl)->next);
77592 }
77593- n->next = *nl;
77594+ pax_open_kernel();
77595+ *(const void **)&n->next = *nl;
77596 rcu_assign_pointer(*nl, n);
77597+ pax_close_kernel();
77598 return 0;
77599 }
77600
77601@@ -39,10 +42,12 @@ static int notifier_chain_cond_register(struct notifier_block **nl,
77602 return 0;
77603 if (n->priority > (*nl)->priority)
77604 break;
77605- nl = &((*nl)->next);
77606+ nl = (struct notifier_block **)&((*nl)->next);
77607 }
77608- n->next = *nl;
77609+ pax_open_kernel();
77610+ *(const void **)&n->next = *nl;
77611 rcu_assign_pointer(*nl, n);
77612+ pax_close_kernel();
77613 return 0;
77614 }
77615
77616@@ -51,10 +56,12 @@ static int notifier_chain_unregister(struct notifier_block **nl,
77617 {
77618 while ((*nl) != NULL) {
77619 if ((*nl) == n) {
77620+ pax_open_kernel();
77621 rcu_assign_pointer(*nl, n->next);
77622+ pax_close_kernel();
77623 return 0;
77624 }
77625- nl = &((*nl)->next);
77626+ nl = (struct notifier_block **)&((*nl)->next);
77627 }
77628 return -ENOENT;
77629 }
77630diff --git a/kernel/panic.c b/kernel/panic.c
77631index 167ec09..0dda5f9 100644
77632--- a/kernel/panic.c
77633+++ b/kernel/panic.c
77634@@ -400,7 +400,7 @@ static void warn_slowpath_common(const char *file, int line, void *caller,
77635 unsigned taint, struct slowpath_args *args)
77636 {
77637 printk(KERN_WARNING "------------[ cut here ]------------\n");
77638- printk(KERN_WARNING "WARNING: at %s:%d %pS()\n", file, line, caller);
77639+ printk(KERN_WARNING "WARNING: at %s:%d %pA()\n", file, line, caller);
77640
77641 if (args)
77642 vprintk(args->fmt, args->args);
77643@@ -453,7 +453,8 @@ EXPORT_SYMBOL(warn_slowpath_null);
77644 */
77645 void __stack_chk_fail(void)
77646 {
77647- panic("stack-protector: Kernel stack is corrupted in: %p\n",
77648+ dump_stack();
77649+ panic("stack-protector: Kernel stack is corrupted in: %pA\n",
77650 __builtin_return_address(0));
77651 }
77652 EXPORT_SYMBOL(__stack_chk_fail);
77653diff --git a/kernel/pid.c b/kernel/pid.c
77654index 0db3e79..95b9dc2 100644
77655--- a/kernel/pid.c
77656+++ b/kernel/pid.c
77657@@ -33,6 +33,7 @@
77658 #include <linux/rculist.h>
77659 #include <linux/bootmem.h>
77660 #include <linux/hash.h>
77661+#include <linux/security.h>
77662 #include <linux/pid_namespace.h>
77663 #include <linux/init_task.h>
77664 #include <linux/syscalls.h>
77665@@ -47,7 +48,7 @@ struct pid init_struct_pid = INIT_STRUCT_PID;
77666
77667 int pid_max = PID_MAX_DEFAULT;
77668
77669-#define RESERVED_PIDS 300
77670+#define RESERVED_PIDS 500
77671
77672 int pid_max_min = RESERVED_PIDS + 1;
77673 int pid_max_max = PID_MAX_LIMIT;
77674@@ -442,10 +443,18 @@ EXPORT_SYMBOL(pid_task);
77675 */
77676 struct task_struct *find_task_by_pid_ns(pid_t nr, struct pid_namespace *ns)
77677 {
77678+ struct task_struct *task;
77679+
77680 rcu_lockdep_assert(rcu_read_lock_held(),
77681 "find_task_by_pid_ns() needs rcu_read_lock()"
77682 " protection");
77683- return pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77684+
77685+ task = pid_task(find_pid_ns(nr, ns), PIDTYPE_PID);
77686+
77687+ if (gr_pid_is_chrooted(task))
77688+ return NULL;
77689+
77690+ return task;
77691 }
77692
77693 struct task_struct *find_task_by_vpid(pid_t vnr)
77694@@ -453,6 +462,14 @@ struct task_struct *find_task_by_vpid(pid_t vnr)
77695 return find_task_by_pid_ns(vnr, task_active_pid_ns(current));
77696 }
77697
77698+struct task_struct *find_task_by_vpid_unrestricted(pid_t vnr)
77699+{
77700+ rcu_lockdep_assert(rcu_read_lock_held(),
77701+ "find_task_by_pid_ns() needs rcu_read_lock()"
77702+ " protection");
77703+ return pid_task(find_pid_ns(vnr, task_active_pid_ns(current)), PIDTYPE_PID);
77704+}
77705+
77706 struct pid *get_task_pid(struct task_struct *task, enum pid_type type)
77707 {
77708 struct pid *pid;
77709diff --git a/kernel/pid_namespace.c b/kernel/pid_namespace.c
77710index 6917e8e..9909aeb 100644
77711--- a/kernel/pid_namespace.c
77712+++ b/kernel/pid_namespace.c
77713@@ -247,7 +247,7 @@ static int pid_ns_ctl_handler(struct ctl_table *table, int write,
77714 void __user *buffer, size_t *lenp, loff_t *ppos)
77715 {
77716 struct pid_namespace *pid_ns = task_active_pid_ns(current);
77717- struct ctl_table tmp = *table;
77718+ ctl_table_no_const tmp = *table;
77719
77720 if (write && !ns_capable(pid_ns->user_ns, CAP_SYS_ADMIN))
77721 return -EPERM;
77722diff --git a/kernel/posix-cpu-timers.c b/kernel/posix-cpu-timers.c
77723index 42670e9..8719c2f 100644
77724--- a/kernel/posix-cpu-timers.c
77725+++ b/kernel/posix-cpu-timers.c
77726@@ -1636,14 +1636,14 @@ struct k_clock clock_posix_cpu = {
77727
77728 static __init int init_posix_cpu_timers(void)
77729 {
77730- struct k_clock process = {
77731+ static struct k_clock process = {
77732 .clock_getres = process_cpu_clock_getres,
77733 .clock_get = process_cpu_clock_get,
77734 .timer_create = process_cpu_timer_create,
77735 .nsleep = process_cpu_nsleep,
77736 .nsleep_restart = process_cpu_nsleep_restart,
77737 };
77738- struct k_clock thread = {
77739+ static struct k_clock thread = {
77740 .clock_getres = thread_cpu_clock_getres,
77741 .clock_get = thread_cpu_clock_get,
77742 .timer_create = thread_cpu_timer_create,
77743diff --git a/kernel/posix-timers.c b/kernel/posix-timers.c
77744index 424c2d4..a9194f7 100644
77745--- a/kernel/posix-timers.c
77746+++ b/kernel/posix-timers.c
77747@@ -43,6 +43,7 @@
77748 #include <linux/hash.h>
77749 #include <linux/posix-clock.h>
77750 #include <linux/posix-timers.h>
77751+#include <linux/grsecurity.h>
77752 #include <linux/syscalls.h>
77753 #include <linux/wait.h>
77754 #include <linux/workqueue.h>
77755@@ -122,7 +123,7 @@ static DEFINE_SPINLOCK(hash_lock);
77756 * which we beg off on and pass to do_sys_settimeofday().
77757 */
77758
77759-static struct k_clock posix_clocks[MAX_CLOCKS];
77760+static struct k_clock *posix_clocks[MAX_CLOCKS];
77761
77762 /*
77763 * These ones are defined below.
77764@@ -275,7 +276,7 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
77765 */
77766 static __init int init_posix_timers(void)
77767 {
77768- struct k_clock clock_realtime = {
77769+ static struct k_clock clock_realtime = {
77770 .clock_getres = hrtimer_get_res,
77771 .clock_get = posix_clock_realtime_get,
77772 .clock_set = posix_clock_realtime_set,
77773@@ -287,7 +288,7 @@ static __init int init_posix_timers(void)
77774 .timer_get = common_timer_get,
77775 .timer_del = common_timer_del,
77776 };
77777- struct k_clock clock_monotonic = {
77778+ static struct k_clock clock_monotonic = {
77779 .clock_getres = hrtimer_get_res,
77780 .clock_get = posix_ktime_get_ts,
77781 .nsleep = common_nsleep,
77782@@ -297,19 +298,19 @@ static __init int init_posix_timers(void)
77783 .timer_get = common_timer_get,
77784 .timer_del = common_timer_del,
77785 };
77786- struct k_clock clock_monotonic_raw = {
77787+ static struct k_clock clock_monotonic_raw = {
77788 .clock_getres = hrtimer_get_res,
77789 .clock_get = posix_get_monotonic_raw,
77790 };
77791- struct k_clock clock_realtime_coarse = {
77792+ static struct k_clock clock_realtime_coarse = {
77793 .clock_getres = posix_get_coarse_res,
77794 .clock_get = posix_get_realtime_coarse,
77795 };
77796- struct k_clock clock_monotonic_coarse = {
77797+ static struct k_clock clock_monotonic_coarse = {
77798 .clock_getres = posix_get_coarse_res,
77799 .clock_get = posix_get_monotonic_coarse,
77800 };
77801- struct k_clock clock_tai = {
77802+ static struct k_clock clock_tai = {
77803 .clock_getres = hrtimer_get_res,
77804 .clock_get = posix_get_tai,
77805 .nsleep = common_nsleep,
77806@@ -319,7 +320,7 @@ static __init int init_posix_timers(void)
77807 .timer_get = common_timer_get,
77808 .timer_del = common_timer_del,
77809 };
77810- struct k_clock clock_boottime = {
77811+ static struct k_clock clock_boottime = {
77812 .clock_getres = hrtimer_get_res,
77813 .clock_get = posix_get_boottime,
77814 .nsleep = common_nsleep,
77815@@ -531,7 +532,7 @@ void posix_timers_register_clock(const clockid_t clock_id,
77816 return;
77817 }
77818
77819- posix_clocks[clock_id] = *new_clock;
77820+ posix_clocks[clock_id] = new_clock;
77821 }
77822 EXPORT_SYMBOL_GPL(posix_timers_register_clock);
77823
77824@@ -577,9 +578,9 @@ static struct k_clock *clockid_to_kclock(const clockid_t id)
77825 return (id & CLOCKFD_MASK) == CLOCKFD ?
77826 &clock_posix_dynamic : &clock_posix_cpu;
77827
77828- if (id >= MAX_CLOCKS || !posix_clocks[id].clock_getres)
77829+ if (id >= MAX_CLOCKS || !posix_clocks[id] || !posix_clocks[id]->clock_getres)
77830 return NULL;
77831- return &posix_clocks[id];
77832+ return posix_clocks[id];
77833 }
77834
77835 static int common_timer_create(struct k_itimer *new_timer)
77836@@ -1011,6 +1012,13 @@ SYSCALL_DEFINE2(clock_settime, const clockid_t, which_clock,
77837 if (copy_from_user(&new_tp, tp, sizeof (*tp)))
77838 return -EFAULT;
77839
77840+ /* only the CLOCK_REALTIME clock can be set, all other clocks
77841+ have their clock_set fptr set to a nosettime dummy function
77842+ CLOCK_REALTIME has a NULL clock_set fptr which causes it to
77843+ call common_clock_set, which calls do_sys_settimeofday, which
77844+ we hook
77845+ */
77846+
77847 return kc->clock_set(which_clock, &new_tp);
77848 }
77849
77850diff --git a/kernel/power/process.c b/kernel/power/process.c
77851index 98088e0..aaf95c0 100644
77852--- a/kernel/power/process.c
77853+++ b/kernel/power/process.c
77854@@ -33,6 +33,7 @@ static int try_to_freeze_tasks(bool user_only)
77855 u64 elapsed_csecs64;
77856 unsigned int elapsed_csecs;
77857 bool wakeup = false;
77858+ bool timedout = false;
77859
77860 do_gettimeofday(&start);
77861
77862@@ -43,13 +44,20 @@ static int try_to_freeze_tasks(bool user_only)
77863
77864 while (true) {
77865 todo = 0;
77866+ if (time_after(jiffies, end_time))
77867+ timedout = true;
77868 read_lock(&tasklist_lock);
77869 do_each_thread(g, p) {
77870 if (p == current || !freeze_task(p))
77871 continue;
77872
77873- if (!freezer_should_skip(p))
77874+ if (!freezer_should_skip(p)) {
77875 todo++;
77876+ if (timedout) {
77877+ printk(KERN_ERR "Task refusing to freeze:\n");
77878+ sched_show_task(p);
77879+ }
77880+ }
77881 } while_each_thread(g, p);
77882 read_unlock(&tasklist_lock);
77883
77884@@ -58,7 +66,7 @@ static int try_to_freeze_tasks(bool user_only)
77885 todo += wq_busy;
77886 }
77887
77888- if (!todo || time_after(jiffies, end_time))
77889+ if (!todo || timedout)
77890 break;
77891
77892 if (pm_wakeup_pending()) {
77893diff --git a/kernel/printk.c b/kernel/printk.c
77894index d37d45c..ab918b3 100644
77895--- a/kernel/printk.c
77896+++ b/kernel/printk.c
77897@@ -390,6 +390,11 @@ static int check_syslog_permissions(int type, bool from_file)
77898 if (from_file && type != SYSLOG_ACTION_OPEN)
77899 return 0;
77900
77901+#ifdef CONFIG_GRKERNSEC_DMESG
77902+ if (grsec_enable_dmesg && !capable(CAP_SYSLOG) && !capable_nolog(CAP_SYS_ADMIN))
77903+ return -EPERM;
77904+#endif
77905+
77906 if (syslog_action_restricted(type)) {
77907 if (capable(CAP_SYSLOG))
77908 return 0;
77909diff --git a/kernel/profile.c b/kernel/profile.c
77910index 0bf4007..6234708 100644
77911--- a/kernel/profile.c
77912+++ b/kernel/profile.c
77913@@ -37,7 +37,7 @@ struct profile_hit {
77914 #define NR_PROFILE_HIT (PAGE_SIZE/sizeof(struct profile_hit))
77915 #define NR_PROFILE_GRP (NR_PROFILE_HIT/PROFILE_GRPSZ)
77916
77917-static atomic_t *prof_buffer;
77918+static atomic_unchecked_t *prof_buffer;
77919 static unsigned long prof_len, prof_shift;
77920
77921 int prof_on __read_mostly;
77922@@ -260,7 +260,7 @@ static void profile_flip_buffers(void)
77923 hits[i].pc = 0;
77924 continue;
77925 }
77926- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77927+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77928 hits[i].hits = hits[i].pc = 0;
77929 }
77930 }
77931@@ -321,9 +321,9 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77932 * Add the current hit(s) and flush the write-queue out
77933 * to the global buffer:
77934 */
77935- atomic_add(nr_hits, &prof_buffer[pc]);
77936+ atomic_add_unchecked(nr_hits, &prof_buffer[pc]);
77937 for (i = 0; i < NR_PROFILE_HIT; ++i) {
77938- atomic_add(hits[i].hits, &prof_buffer[hits[i].pc]);
77939+ atomic_add_unchecked(hits[i].hits, &prof_buffer[hits[i].pc]);
77940 hits[i].pc = hits[i].hits = 0;
77941 }
77942 out:
77943@@ -398,7 +398,7 @@ static void do_profile_hits(int type, void *__pc, unsigned int nr_hits)
77944 {
77945 unsigned long pc;
77946 pc = ((unsigned long)__pc - (unsigned long)_stext) >> prof_shift;
77947- atomic_add(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77948+ atomic_add_unchecked(nr_hits, &prof_buffer[min(pc, prof_len - 1)]);
77949 }
77950 #endif /* !CONFIG_SMP */
77951
77952@@ -494,7 +494,7 @@ read_profile(struct file *file, char __user *buf, size_t count, loff_t *ppos)
77953 return -EFAULT;
77954 buf++; p++; count--; read++;
77955 }
77956- pnt = (char *)prof_buffer + p - sizeof(atomic_t);
77957+ pnt = (char *)prof_buffer + p - sizeof(atomic_unchecked_t);
77958 if (copy_to_user(buf, (void *)pnt, count))
77959 return -EFAULT;
77960 read += count;
77961@@ -525,7 +525,7 @@ static ssize_t write_profile(struct file *file, const char __user *buf,
77962 }
77963 #endif
77964 profile_discard_flip_buffers();
77965- memset(prof_buffer, 0, prof_len * sizeof(atomic_t));
77966+ memset(prof_buffer, 0, prof_len * sizeof(atomic_unchecked_t));
77967 return count;
77968 }
77969
77970diff --git a/kernel/ptrace.c b/kernel/ptrace.c
77971index 335a7ae..3bbbceb 100644
77972--- a/kernel/ptrace.c
77973+++ b/kernel/ptrace.c
77974@@ -326,7 +326,7 @@ static int ptrace_attach(struct task_struct *task, long request,
77975 if (seize)
77976 flags |= PT_SEIZED;
77977 rcu_read_lock();
77978- if (ns_capable(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77979+ if (ns_capable_nolog(__task_cred(task)->user_ns, CAP_SYS_PTRACE))
77980 flags |= PT_PTRACE_CAP;
77981 rcu_read_unlock();
77982 task->ptrace = flags;
77983@@ -537,7 +537,7 @@ int ptrace_readdata(struct task_struct *tsk, unsigned long src, char __user *dst
77984 break;
77985 return -EIO;
77986 }
77987- if (copy_to_user(dst, buf, retval))
77988+ if (retval > sizeof(buf) || copy_to_user(dst, buf, retval))
77989 return -EFAULT;
77990 copied += retval;
77991 src += retval;
77992@@ -805,7 +805,7 @@ int ptrace_request(struct task_struct *child, long request,
77993 bool seized = child->ptrace & PT_SEIZED;
77994 int ret = -EIO;
77995 siginfo_t siginfo, *si;
77996- void __user *datavp = (void __user *) data;
77997+ void __user *datavp = (__force void __user *) data;
77998 unsigned long __user *datalp = datavp;
77999 unsigned long flags;
78000
78001@@ -1011,14 +1011,21 @@ SYSCALL_DEFINE4(ptrace, long, request, long, pid, unsigned long, addr,
78002 goto out;
78003 }
78004
78005+ if (gr_handle_ptrace(child, request)) {
78006+ ret = -EPERM;
78007+ goto out_put_task_struct;
78008+ }
78009+
78010 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
78011 ret = ptrace_attach(child, request, addr, data);
78012 /*
78013 * Some architectures need to do book-keeping after
78014 * a ptrace attach.
78015 */
78016- if (!ret)
78017+ if (!ret) {
78018 arch_ptrace_attach(child);
78019+ gr_audit_ptrace(child);
78020+ }
78021 goto out_put_task_struct;
78022 }
78023
78024@@ -1046,7 +1053,7 @@ int generic_ptrace_peekdata(struct task_struct *tsk, unsigned long addr,
78025 copied = access_process_vm(tsk, addr, &tmp, sizeof(tmp), 0);
78026 if (copied != sizeof(tmp))
78027 return -EIO;
78028- return put_user(tmp, (unsigned long __user *)data);
78029+ return put_user(tmp, (__force unsigned long __user *)data);
78030 }
78031
78032 int generic_ptrace_pokedata(struct task_struct *tsk, unsigned long addr,
78033@@ -1140,7 +1147,7 @@ int compat_ptrace_request(struct task_struct *child, compat_long_t request,
78034 }
78035
78036 asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
78037- compat_long_t addr, compat_long_t data)
78038+ compat_ulong_t addr, compat_ulong_t data)
78039 {
78040 struct task_struct *child;
78041 long ret;
78042@@ -1156,14 +1163,21 @@ asmlinkage long compat_sys_ptrace(compat_long_t request, compat_long_t pid,
78043 goto out;
78044 }
78045
78046+ if (gr_handle_ptrace(child, request)) {
78047+ ret = -EPERM;
78048+ goto out_put_task_struct;
78049+ }
78050+
78051 if (request == PTRACE_ATTACH || request == PTRACE_SEIZE) {
78052 ret = ptrace_attach(child, request, addr, data);
78053 /*
78054 * Some architectures need to do book-keeping after
78055 * a ptrace attach.
78056 */
78057- if (!ret)
78058+ if (!ret) {
78059 arch_ptrace_attach(child);
78060+ gr_audit_ptrace(child);
78061+ }
78062 goto out_put_task_struct;
78063 }
78064
78065diff --git a/kernel/rcupdate.c b/kernel/rcupdate.c
78066index 48ab703..07561d4 100644
78067--- a/kernel/rcupdate.c
78068+++ b/kernel/rcupdate.c
78069@@ -439,10 +439,10 @@ int rcu_jiffies_till_stall_check(void)
78070 * for CONFIG_RCU_CPU_STALL_TIMEOUT.
78071 */
78072 if (till_stall_check < 3) {
78073- ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
78074+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 3;
78075 till_stall_check = 3;
78076 } else if (till_stall_check > 300) {
78077- ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
78078+ ACCESS_ONCE_RW(rcu_cpu_stall_timeout) = 300;
78079 till_stall_check = 300;
78080 }
78081 return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
78082diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
78083index a0714a5..2ab5e34 100644
78084--- a/kernel/rcutiny.c
78085+++ b/kernel/rcutiny.c
78086@@ -46,7 +46,7 @@
78087 struct rcu_ctrlblk;
78088 static void invoke_rcu_callbacks(void);
78089 static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp);
78090-static void rcu_process_callbacks(struct softirq_action *unused);
78091+static void rcu_process_callbacks(void);
78092 static void __call_rcu(struct rcu_head *head,
78093 void (*func)(struct rcu_head *rcu),
78094 struct rcu_ctrlblk *rcp);
78095@@ -312,7 +312,7 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
78096 rcu_is_callbacks_kthread()));
78097 }
78098
78099-static void rcu_process_callbacks(struct softirq_action *unused)
78100+static void rcu_process_callbacks(void)
78101 {
78102 __rcu_process_callbacks(&rcu_sched_ctrlblk);
78103 __rcu_process_callbacks(&rcu_bh_ctrlblk);
78104diff --git a/kernel/rcutiny_plugin.h b/kernel/rcutiny_plugin.h
78105index 8a23300..4255818 100644
78106--- a/kernel/rcutiny_plugin.h
78107+++ b/kernel/rcutiny_plugin.h
78108@@ -945,7 +945,7 @@ static int rcu_kthread(void *arg)
78109 have_rcu_kthread_work = morework;
78110 local_irq_restore(flags);
78111 if (work)
78112- rcu_process_callbacks(NULL);
78113+ rcu_process_callbacks();
78114 schedule_timeout_interruptible(1); /* Leave CPU for others. */
78115 }
78116
78117diff --git a/kernel/rcutorture.c b/kernel/rcutorture.c
78118index e1f3a8c..42c94a2 100644
78119--- a/kernel/rcutorture.c
78120+++ b/kernel/rcutorture.c
78121@@ -164,12 +164,12 @@ static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_count) =
78122 { 0 };
78123 static DEFINE_PER_CPU(long [RCU_TORTURE_PIPE_LEN + 1], rcu_torture_batch) =
78124 { 0 };
78125-static atomic_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
78126-static atomic_t n_rcu_torture_alloc;
78127-static atomic_t n_rcu_torture_alloc_fail;
78128-static atomic_t n_rcu_torture_free;
78129-static atomic_t n_rcu_torture_mberror;
78130-static atomic_t n_rcu_torture_error;
78131+static atomic_unchecked_t rcu_torture_wcount[RCU_TORTURE_PIPE_LEN + 1];
78132+static atomic_unchecked_t n_rcu_torture_alloc;
78133+static atomic_unchecked_t n_rcu_torture_alloc_fail;
78134+static atomic_unchecked_t n_rcu_torture_free;
78135+static atomic_unchecked_t n_rcu_torture_mberror;
78136+static atomic_unchecked_t n_rcu_torture_error;
78137 static long n_rcu_torture_barrier_error;
78138 static long n_rcu_torture_boost_ktrerror;
78139 static long n_rcu_torture_boost_rterror;
78140@@ -287,11 +287,11 @@ rcu_torture_alloc(void)
78141
78142 spin_lock_bh(&rcu_torture_lock);
78143 if (list_empty(&rcu_torture_freelist)) {
78144- atomic_inc(&n_rcu_torture_alloc_fail);
78145+ atomic_inc_unchecked(&n_rcu_torture_alloc_fail);
78146 spin_unlock_bh(&rcu_torture_lock);
78147 return NULL;
78148 }
78149- atomic_inc(&n_rcu_torture_alloc);
78150+ atomic_inc_unchecked(&n_rcu_torture_alloc);
78151 p = rcu_torture_freelist.next;
78152 list_del_init(p);
78153 spin_unlock_bh(&rcu_torture_lock);
78154@@ -304,7 +304,7 @@ rcu_torture_alloc(void)
78155 static void
78156 rcu_torture_free(struct rcu_torture *p)
78157 {
78158- atomic_inc(&n_rcu_torture_free);
78159+ atomic_inc_unchecked(&n_rcu_torture_free);
78160 spin_lock_bh(&rcu_torture_lock);
78161 list_add_tail(&p->rtort_free, &rcu_torture_freelist);
78162 spin_unlock_bh(&rcu_torture_lock);
78163@@ -424,7 +424,7 @@ rcu_torture_cb(struct rcu_head *p)
78164 i = rp->rtort_pipe_count;
78165 if (i > RCU_TORTURE_PIPE_LEN)
78166 i = RCU_TORTURE_PIPE_LEN;
78167- atomic_inc(&rcu_torture_wcount[i]);
78168+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78169 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
78170 rp->rtort_mbtest = 0;
78171 rcu_torture_free(rp);
78172@@ -472,7 +472,7 @@ static void rcu_sync_torture_deferred_free(struct rcu_torture *p)
78173 i = rp->rtort_pipe_count;
78174 if (i > RCU_TORTURE_PIPE_LEN)
78175 i = RCU_TORTURE_PIPE_LEN;
78176- atomic_inc(&rcu_torture_wcount[i]);
78177+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78178 if (++rp->rtort_pipe_count >= RCU_TORTURE_PIPE_LEN) {
78179 rp->rtort_mbtest = 0;
78180 list_del(&rp->rtort_free);
78181@@ -990,7 +990,7 @@ rcu_torture_writer(void *arg)
78182 i = old_rp->rtort_pipe_count;
78183 if (i > RCU_TORTURE_PIPE_LEN)
78184 i = RCU_TORTURE_PIPE_LEN;
78185- atomic_inc(&rcu_torture_wcount[i]);
78186+ atomic_inc_unchecked(&rcu_torture_wcount[i]);
78187 old_rp->rtort_pipe_count++;
78188 cur_ops->deferred_free(old_rp);
78189 }
78190@@ -1076,7 +1076,7 @@ static void rcu_torture_timer(unsigned long unused)
78191 return;
78192 }
78193 if (p->rtort_mbtest == 0)
78194- atomic_inc(&n_rcu_torture_mberror);
78195+ atomic_inc_unchecked(&n_rcu_torture_mberror);
78196 spin_lock(&rand_lock);
78197 cur_ops->read_delay(&rand);
78198 n_rcu_torture_timers++;
78199@@ -1146,7 +1146,7 @@ rcu_torture_reader(void *arg)
78200 continue;
78201 }
78202 if (p->rtort_mbtest == 0)
78203- atomic_inc(&n_rcu_torture_mberror);
78204+ atomic_inc_unchecked(&n_rcu_torture_mberror);
78205 cur_ops->read_delay(&rand);
78206 preempt_disable();
78207 pipe_count = p->rtort_pipe_count;
78208@@ -1209,11 +1209,11 @@ rcu_torture_printk(char *page)
78209 rcu_torture_current,
78210 rcu_torture_current_version,
78211 list_empty(&rcu_torture_freelist),
78212- atomic_read(&n_rcu_torture_alloc),
78213- atomic_read(&n_rcu_torture_alloc_fail),
78214- atomic_read(&n_rcu_torture_free));
78215+ atomic_read_unchecked(&n_rcu_torture_alloc),
78216+ atomic_read_unchecked(&n_rcu_torture_alloc_fail),
78217+ atomic_read_unchecked(&n_rcu_torture_free));
78218 cnt += sprintf(&page[cnt], "rtmbe: %d rtbke: %ld rtbre: %ld ",
78219- atomic_read(&n_rcu_torture_mberror),
78220+ atomic_read_unchecked(&n_rcu_torture_mberror),
78221 n_rcu_torture_boost_ktrerror,
78222 n_rcu_torture_boost_rterror);
78223 cnt += sprintf(&page[cnt], "rtbf: %ld rtb: %ld nt: %ld ",
78224@@ -1232,14 +1232,14 @@ rcu_torture_printk(char *page)
78225 n_barrier_attempts,
78226 n_rcu_torture_barrier_error);
78227 cnt += sprintf(&page[cnt], "\n%s%s ", torture_type, TORTURE_FLAG);
78228- if (atomic_read(&n_rcu_torture_mberror) != 0 ||
78229+ if (atomic_read_unchecked(&n_rcu_torture_mberror) != 0 ||
78230 n_rcu_torture_barrier_error != 0 ||
78231 n_rcu_torture_boost_ktrerror != 0 ||
78232 n_rcu_torture_boost_rterror != 0 ||
78233 n_rcu_torture_boost_failure != 0 ||
78234 i > 1) {
78235 cnt += sprintf(&page[cnt], "!!! ");
78236- atomic_inc(&n_rcu_torture_error);
78237+ atomic_inc_unchecked(&n_rcu_torture_error);
78238 WARN_ON_ONCE(1);
78239 }
78240 cnt += sprintf(&page[cnt], "Reader Pipe: ");
78241@@ -1253,7 +1253,7 @@ rcu_torture_printk(char *page)
78242 cnt += sprintf(&page[cnt], "Free-Block Circulation: ");
78243 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
78244 cnt += sprintf(&page[cnt], " %d",
78245- atomic_read(&rcu_torture_wcount[i]));
78246+ atomic_read_unchecked(&rcu_torture_wcount[i]));
78247 }
78248 cnt += sprintf(&page[cnt], "\n");
78249 if (cur_ops->stats)
78250@@ -1962,7 +1962,7 @@ rcu_torture_cleanup(void)
78251
78252 rcu_torture_stats_print(); /* -After- the stats thread is stopped! */
78253
78254- if (atomic_read(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
78255+ if (atomic_read_unchecked(&n_rcu_torture_error) || n_rcu_torture_barrier_error)
78256 rcu_torture_print_module_parms(cur_ops, "End of test: FAILURE");
78257 else if (n_online_successes != n_online_attempts ||
78258 n_offline_successes != n_offline_attempts)
78259@@ -2031,18 +2031,18 @@ rcu_torture_init(void)
78260
78261 rcu_torture_current = NULL;
78262 rcu_torture_current_version = 0;
78263- atomic_set(&n_rcu_torture_alloc, 0);
78264- atomic_set(&n_rcu_torture_alloc_fail, 0);
78265- atomic_set(&n_rcu_torture_free, 0);
78266- atomic_set(&n_rcu_torture_mberror, 0);
78267- atomic_set(&n_rcu_torture_error, 0);
78268+ atomic_set_unchecked(&n_rcu_torture_alloc, 0);
78269+ atomic_set_unchecked(&n_rcu_torture_alloc_fail, 0);
78270+ atomic_set_unchecked(&n_rcu_torture_free, 0);
78271+ atomic_set_unchecked(&n_rcu_torture_mberror, 0);
78272+ atomic_set_unchecked(&n_rcu_torture_error, 0);
78273 n_rcu_torture_barrier_error = 0;
78274 n_rcu_torture_boost_ktrerror = 0;
78275 n_rcu_torture_boost_rterror = 0;
78276 n_rcu_torture_boost_failure = 0;
78277 n_rcu_torture_boosts = 0;
78278 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++)
78279- atomic_set(&rcu_torture_wcount[i], 0);
78280+ atomic_set_unchecked(&rcu_torture_wcount[i], 0);
78281 for_each_possible_cpu(cpu) {
78282 for (i = 0; i < RCU_TORTURE_PIPE_LEN + 1; i++) {
78283 per_cpu(rcu_torture_count, cpu)[i] = 0;
78284diff --git a/kernel/rcutree.c b/kernel/rcutree.c
78285index 3538001..e379e0b 100644
78286--- a/kernel/rcutree.c
78287+++ b/kernel/rcutree.c
78288@@ -358,9 +358,9 @@ static void rcu_eqs_enter_common(struct rcu_dynticks *rdtp, long long oldval,
78289 rcu_prepare_for_idle(smp_processor_id());
78290 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78291 smp_mb__before_atomic_inc(); /* See above. */
78292- atomic_inc(&rdtp->dynticks);
78293+ atomic_inc_unchecked(&rdtp->dynticks);
78294 smp_mb__after_atomic_inc(); /* Force ordering with next sojourn. */
78295- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78296+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78297
78298 /*
78299 * It is illegal to enter an extended quiescent state while
78300@@ -496,10 +496,10 @@ static void rcu_eqs_exit_common(struct rcu_dynticks *rdtp, long long oldval,
78301 int user)
78302 {
78303 smp_mb__before_atomic_inc(); /* Force ordering w/previous sojourn. */
78304- atomic_inc(&rdtp->dynticks);
78305+ atomic_inc_unchecked(&rdtp->dynticks);
78306 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78307 smp_mb__after_atomic_inc(); /* See above. */
78308- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78309+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78310 rcu_cleanup_after_idle(smp_processor_id());
78311 trace_rcu_dyntick("End", oldval, rdtp->dynticks_nesting);
78312 if (!user && !is_idle_task(current)) {
78313@@ -638,14 +638,14 @@ void rcu_nmi_enter(void)
78314 struct rcu_dynticks *rdtp = &__get_cpu_var(rcu_dynticks);
78315
78316 if (rdtp->dynticks_nmi_nesting == 0 &&
78317- (atomic_read(&rdtp->dynticks) & 0x1))
78318+ (atomic_read_unchecked(&rdtp->dynticks) & 0x1))
78319 return;
78320 rdtp->dynticks_nmi_nesting++;
78321 smp_mb__before_atomic_inc(); /* Force delay from prior write. */
78322- atomic_inc(&rdtp->dynticks);
78323+ atomic_inc_unchecked(&rdtp->dynticks);
78324 /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
78325 smp_mb__after_atomic_inc(); /* See above. */
78326- WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
78327+ WARN_ON_ONCE(!(atomic_read_unchecked(&rdtp->dynticks) & 0x1));
78328 }
78329
78330 /**
78331@@ -664,9 +664,9 @@ void rcu_nmi_exit(void)
78332 return;
78333 /* CPUs seeing atomic_inc() must see prior RCU read-side crit sects */
78334 smp_mb__before_atomic_inc(); /* See above. */
78335- atomic_inc(&rdtp->dynticks);
78336+ atomic_inc_unchecked(&rdtp->dynticks);
78337 smp_mb__after_atomic_inc(); /* Force delay to next write. */
78338- WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
78339+ WARN_ON_ONCE(atomic_read_unchecked(&rdtp->dynticks) & 0x1);
78340 }
78341
78342 /**
78343@@ -680,7 +680,7 @@ int rcu_is_cpu_idle(void)
78344 int ret;
78345
78346 preempt_disable();
78347- ret = (atomic_read(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78348+ ret = (atomic_read_unchecked(&__get_cpu_var(rcu_dynticks).dynticks) & 0x1) == 0;
78349 preempt_enable();
78350 return ret;
78351 }
78352@@ -748,7 +748,7 @@ static int rcu_is_cpu_rrupt_from_idle(void)
78353 */
78354 static int dyntick_save_progress_counter(struct rcu_data *rdp)
78355 {
78356- rdp->dynticks_snap = atomic_add_return(0, &rdp->dynticks->dynticks);
78357+ rdp->dynticks_snap = atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78358 return (rdp->dynticks_snap & 0x1) == 0;
78359 }
78360
78361@@ -763,7 +763,7 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp)
78362 unsigned int curr;
78363 unsigned int snap;
78364
78365- curr = (unsigned int)atomic_add_return(0, &rdp->dynticks->dynticks);
78366+ curr = (unsigned int)atomic_add_return_unchecked(0, &rdp->dynticks->dynticks);
78367 snap = (unsigned int)rdp->dynticks_snap;
78368
78369 /*
78370@@ -1440,9 +1440,9 @@ static int rcu_gp_init(struct rcu_state *rsp)
78371 rdp = this_cpu_ptr(rsp->rda);
78372 rcu_preempt_check_blocked_tasks(rnp);
78373 rnp->qsmask = rnp->qsmaskinit;
78374- ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
78375+ ACCESS_ONCE_RW(rnp->gpnum) = rsp->gpnum;
78376 WARN_ON_ONCE(rnp->completed != rsp->completed);
78377- ACCESS_ONCE(rnp->completed) = rsp->completed;
78378+ ACCESS_ONCE_RW(rnp->completed) = rsp->completed;
78379 if (rnp == rdp->mynode)
78380 rcu_start_gp_per_cpu(rsp, rnp, rdp);
78381 rcu_preempt_boost_start_gp(rnp);
78382@@ -1524,7 +1524,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
78383 */
78384 rcu_for_each_node_breadth_first(rsp, rnp) {
78385 raw_spin_lock_irq(&rnp->lock);
78386- ACCESS_ONCE(rnp->completed) = rsp->gpnum;
78387+ ACCESS_ONCE_RW(rnp->completed) = rsp->gpnum;
78388 rdp = this_cpu_ptr(rsp->rda);
78389 if (rnp == rdp->mynode)
78390 __rcu_process_gp_end(rsp, rnp, rdp);
78391@@ -1855,7 +1855,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
78392 rsp->qlen += rdp->qlen;
78393 rdp->n_cbs_orphaned += rdp->qlen;
78394 rdp->qlen_lazy = 0;
78395- ACCESS_ONCE(rdp->qlen) = 0;
78396+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78397 }
78398
78399 /*
78400@@ -2101,7 +2101,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
78401 }
78402 smp_mb(); /* List handling before counting for rcu_barrier(). */
78403 rdp->qlen_lazy -= count_lazy;
78404- ACCESS_ONCE(rdp->qlen) -= count;
78405+ ACCESS_ONCE_RW(rdp->qlen) -= count;
78406 rdp->n_cbs_invoked += count;
78407
78408 /* Reinstate batch limit if we have worked down the excess. */
78409@@ -2295,7 +2295,7 @@ __rcu_process_callbacks(struct rcu_state *rsp)
78410 /*
78411 * Do RCU core processing for the current CPU.
78412 */
78413-static void rcu_process_callbacks(struct softirq_action *unused)
78414+static void rcu_process_callbacks(void)
78415 {
78416 struct rcu_state *rsp;
78417
78418@@ -2419,7 +2419,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
78419 local_irq_restore(flags);
78420 return;
78421 }
78422- ACCESS_ONCE(rdp->qlen)++;
78423+ ACCESS_ONCE_RW(rdp->qlen)++;
78424 if (lazy)
78425 rdp->qlen_lazy++;
78426 else
78427@@ -2628,11 +2628,11 @@ void synchronize_sched_expedited(void)
78428 * counter wrap on a 32-bit system. Quite a few more CPUs would of
78429 * course be required on a 64-bit system.
78430 */
78431- if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
78432+ if (ULONG_CMP_GE((ulong)atomic_long_read_unchecked(&rsp->expedited_start),
78433 (ulong)atomic_long_read(&rsp->expedited_done) +
78434 ULONG_MAX / 8)) {
78435 synchronize_sched();
78436- atomic_long_inc(&rsp->expedited_wrap);
78437+ atomic_long_inc_unchecked(&rsp->expedited_wrap);
78438 return;
78439 }
78440
78441@@ -2640,7 +2640,7 @@ void synchronize_sched_expedited(void)
78442 * Take a ticket. Note that atomic_inc_return() implies a
78443 * full memory barrier.
78444 */
78445- snap = atomic_long_inc_return(&rsp->expedited_start);
78446+ snap = atomic_long_inc_return_unchecked(&rsp->expedited_start);
78447 firstsnap = snap;
78448 get_online_cpus();
78449 WARN_ON_ONCE(cpu_is_offline(raw_smp_processor_id()));
78450@@ -2653,14 +2653,14 @@ void synchronize_sched_expedited(void)
78451 synchronize_sched_expedited_cpu_stop,
78452 NULL) == -EAGAIN) {
78453 put_online_cpus();
78454- atomic_long_inc(&rsp->expedited_tryfail);
78455+ atomic_long_inc_unchecked(&rsp->expedited_tryfail);
78456
78457 /* Check to see if someone else did our work for us. */
78458 s = atomic_long_read(&rsp->expedited_done);
78459 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78460 /* ensure test happens before caller kfree */
78461 smp_mb__before_atomic_inc(); /* ^^^ */
78462- atomic_long_inc(&rsp->expedited_workdone1);
78463+ atomic_long_inc_unchecked(&rsp->expedited_workdone1);
78464 return;
78465 }
78466
78467@@ -2669,7 +2669,7 @@ void synchronize_sched_expedited(void)
78468 udelay(trycount * num_online_cpus());
78469 } else {
78470 wait_rcu_gp(call_rcu_sched);
78471- atomic_long_inc(&rsp->expedited_normal);
78472+ atomic_long_inc_unchecked(&rsp->expedited_normal);
78473 return;
78474 }
78475
78476@@ -2678,7 +2678,7 @@ void synchronize_sched_expedited(void)
78477 if (ULONG_CMP_GE((ulong)s, (ulong)firstsnap)) {
78478 /* ensure test happens before caller kfree */
78479 smp_mb__before_atomic_inc(); /* ^^^ */
78480- atomic_long_inc(&rsp->expedited_workdone2);
78481+ atomic_long_inc_unchecked(&rsp->expedited_workdone2);
78482 return;
78483 }
78484
78485@@ -2690,10 +2690,10 @@ void synchronize_sched_expedited(void)
78486 * period works for us.
78487 */
78488 get_online_cpus();
78489- snap = atomic_long_read(&rsp->expedited_start);
78490+ snap = atomic_long_read_unchecked(&rsp->expedited_start);
78491 smp_mb(); /* ensure read is before try_stop_cpus(). */
78492 }
78493- atomic_long_inc(&rsp->expedited_stoppedcpus);
78494+ atomic_long_inc_unchecked(&rsp->expedited_stoppedcpus);
78495
78496 /*
78497 * Everyone up to our most recent fetch is covered by our grace
78498@@ -2702,16 +2702,16 @@ void synchronize_sched_expedited(void)
78499 * than we did already did their update.
78500 */
78501 do {
78502- atomic_long_inc(&rsp->expedited_done_tries);
78503+ atomic_long_inc_unchecked(&rsp->expedited_done_tries);
78504 s = atomic_long_read(&rsp->expedited_done);
78505 if (ULONG_CMP_GE((ulong)s, (ulong)snap)) {
78506 /* ensure test happens before caller kfree */
78507 smp_mb__before_atomic_inc(); /* ^^^ */
78508- atomic_long_inc(&rsp->expedited_done_lost);
78509+ atomic_long_inc_unchecked(&rsp->expedited_done_lost);
78510 break;
78511 }
78512 } while (atomic_long_cmpxchg(&rsp->expedited_done, s, snap) != s);
78513- atomic_long_inc(&rsp->expedited_done_exit);
78514+ atomic_long_inc_unchecked(&rsp->expedited_done_exit);
78515
78516 put_online_cpus();
78517 }
78518@@ -2893,7 +2893,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78519 * ACCESS_ONCE() to prevent the compiler from speculating
78520 * the increment to precede the early-exit check.
78521 */
78522- ACCESS_ONCE(rsp->n_barrier_done)++;
78523+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78524 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
78525 _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
78526 smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
78527@@ -2943,7 +2943,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
78528
78529 /* Increment ->n_barrier_done to prevent duplicate work. */
78530 smp_mb(); /* Keep increment after above mechanism. */
78531- ACCESS_ONCE(rsp->n_barrier_done)++;
78532+ ACCESS_ONCE_RW(rsp->n_barrier_done)++;
78533 WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
78534 _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
78535 smp_mb(); /* Keep increment before caller's subsequent code. */
78536@@ -2988,10 +2988,10 @@ rcu_boot_init_percpu_data(int cpu, struct rcu_state *rsp)
78537 rdp->grpmask = 1UL << (cpu - rdp->mynode->grplo);
78538 init_callback_list(rdp);
78539 rdp->qlen_lazy = 0;
78540- ACCESS_ONCE(rdp->qlen) = 0;
78541+ ACCESS_ONCE_RW(rdp->qlen) = 0;
78542 rdp->dynticks = &per_cpu(rcu_dynticks, cpu);
78543 WARN_ON_ONCE(rdp->dynticks->dynticks_nesting != DYNTICK_TASK_EXIT_IDLE);
78544- WARN_ON_ONCE(atomic_read(&rdp->dynticks->dynticks) != 1);
78545+ WARN_ON_ONCE(atomic_read_unchecked(&rdp->dynticks->dynticks) != 1);
78546 rdp->cpu = cpu;
78547 rdp->rsp = rsp;
78548 rcu_boot_init_nocb_percpu_data(rdp);
78549@@ -3024,8 +3024,8 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp, int preemptible)
78550 rdp->blimit = blimit;
78551 init_callback_list(rdp); /* Re-enable callbacks on this CPU. */
78552 rdp->dynticks->dynticks_nesting = DYNTICK_TASK_EXIT_IDLE;
78553- atomic_set(&rdp->dynticks->dynticks,
78554- (atomic_read(&rdp->dynticks->dynticks) & ~0x1) + 1);
78555+ atomic_set_unchecked(&rdp->dynticks->dynticks,
78556+ (atomic_read_unchecked(&rdp->dynticks->dynticks) & ~0x1) + 1);
78557 raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
78558
78559 /* Add CPU to rcu_node bitmasks. */
78560@@ -3120,7 +3120,7 @@ static int __init rcu_spawn_gp_kthread(void)
78561 struct task_struct *t;
78562
78563 for_each_rcu_flavor(rsp) {
78564- t = kthread_run(rcu_gp_kthread, rsp, rsp->name);
78565+ t = kthread_run(rcu_gp_kthread, rsp, "%s", rsp->name);
78566 BUG_ON(IS_ERR(t));
78567 rnp = rcu_get_root(rsp);
78568 raw_spin_lock_irqsave(&rnp->lock, flags);
78569diff --git a/kernel/rcutree.h b/kernel/rcutree.h
78570index 4df5034..5ee93f2 100644
78571--- a/kernel/rcutree.h
78572+++ b/kernel/rcutree.h
78573@@ -87,7 +87,7 @@ struct rcu_dynticks {
78574 long long dynticks_nesting; /* Track irq/process nesting level. */
78575 /* Process level is worth LLONG_MAX/2. */
78576 int dynticks_nmi_nesting; /* Track NMI nesting level. */
78577- atomic_t dynticks; /* Even value for idle, else odd. */
78578+ atomic_unchecked_t dynticks;/* Even value for idle, else odd. */
78579 #ifdef CONFIG_RCU_FAST_NO_HZ
78580 bool all_lazy; /* Are all CPU's CBs lazy? */
78581 unsigned long nonlazy_posted;
78582@@ -414,17 +414,17 @@ struct rcu_state {
78583 /* _rcu_barrier(). */
78584 /* End of fields guarded by barrier_mutex. */
78585
78586- atomic_long_t expedited_start; /* Starting ticket. */
78587- atomic_long_t expedited_done; /* Done ticket. */
78588- atomic_long_t expedited_wrap; /* # near-wrap incidents. */
78589- atomic_long_t expedited_tryfail; /* # acquisition failures. */
78590- atomic_long_t expedited_workdone1; /* # done by others #1. */
78591- atomic_long_t expedited_workdone2; /* # done by others #2. */
78592- atomic_long_t expedited_normal; /* # fallbacks to normal. */
78593- atomic_long_t expedited_stoppedcpus; /* # successful stop_cpus. */
78594- atomic_long_t expedited_done_tries; /* # tries to update _done. */
78595- atomic_long_t expedited_done_lost; /* # times beaten to _done. */
78596- atomic_long_t expedited_done_exit; /* # times exited _done loop. */
78597+ atomic_long_unchecked_t expedited_start; /* Starting ticket. */
78598+ atomic_long_t expedited_done; /* Done ticket. */
78599+ atomic_long_unchecked_t expedited_wrap; /* # near-wrap incidents. */
78600+ atomic_long_unchecked_t expedited_tryfail; /* # acquisition failures. */
78601+ atomic_long_unchecked_t expedited_workdone1; /* # done by others #1. */
78602+ atomic_long_unchecked_t expedited_workdone2; /* # done by others #2. */
78603+ atomic_long_unchecked_t expedited_normal; /* # fallbacks to normal. */
78604+ atomic_long_unchecked_t expedited_stoppedcpus; /* # successful stop_cpus. */
78605+ atomic_long_unchecked_t expedited_done_tries; /* # tries to update _done. */
78606+ atomic_long_unchecked_t expedited_done_lost; /* # times beaten to _done. */
78607+ atomic_long_unchecked_t expedited_done_exit; /* # times exited _done loop. */
78608
78609 unsigned long jiffies_force_qs; /* Time at which to invoke */
78610 /* force_quiescent_state(). */
78611diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h
78612index 3db5a37..b395fb35 100644
78613--- a/kernel/rcutree_plugin.h
78614+++ b/kernel/rcutree_plugin.h
78615@@ -903,7 +903,7 @@ void synchronize_rcu_expedited(void)
78616
78617 /* Clean up and exit. */
78618 smp_mb(); /* ensure expedited GP seen before counter increment. */
78619- ACCESS_ONCE(sync_rcu_preempt_exp_count)++;
78620+ ACCESS_ONCE_RW(sync_rcu_preempt_exp_count)++;
78621 unlock_mb_ret:
78622 mutex_unlock(&sync_rcu_preempt_exp_mutex);
78623 mb_ret:
78624@@ -1451,7 +1451,7 @@ static void rcu_boost_kthread_setaffinity(struct rcu_node *rnp, int outgoingcpu)
78625 free_cpumask_var(cm);
78626 }
78627
78628-static struct smp_hotplug_thread rcu_cpu_thread_spec = {
78629+static struct smp_hotplug_thread rcu_cpu_thread_spec __read_only = {
78630 .store = &rcu_cpu_kthread_task,
78631 .thread_should_run = rcu_cpu_kthread_should_run,
78632 .thread_fn = rcu_cpu_kthread,
78633@@ -1916,7 +1916,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
78634 print_cpu_stall_fast_no_hz(fast_no_hz, cpu);
78635 printk(KERN_ERR "\t%d: (%lu %s) idle=%03x/%llx/%d softirq=%u/%u %s\n",
78636 cpu, ticks_value, ticks_title,
78637- atomic_read(&rdtp->dynticks) & 0xfff,
78638+ atomic_read_unchecked(&rdtp->dynticks) & 0xfff,
78639 rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
78640 rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
78641 fast_no_hz);
78642@@ -2079,7 +2079,7 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
78643
78644 /* Enqueue the callback on the nocb list and update counts. */
78645 old_rhpp = xchg(&rdp->nocb_tail, rhtp);
78646- ACCESS_ONCE(*old_rhpp) = rhp;
78647+ ACCESS_ONCE_RW(*old_rhpp) = rhp;
78648 atomic_long_add(rhcount, &rdp->nocb_q_count);
78649 atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
78650
78651@@ -2219,12 +2219,12 @@ static int rcu_nocb_kthread(void *arg)
78652 * Extract queued callbacks, update counts, and wait
78653 * for a grace period to elapse.
78654 */
78655- ACCESS_ONCE(rdp->nocb_head) = NULL;
78656+ ACCESS_ONCE_RW(rdp->nocb_head) = NULL;
78657 tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
78658 c = atomic_long_xchg(&rdp->nocb_q_count, 0);
78659 cl = atomic_long_xchg(&rdp->nocb_q_count_lazy, 0);
78660- ACCESS_ONCE(rdp->nocb_p_count) += c;
78661- ACCESS_ONCE(rdp->nocb_p_count_lazy) += cl;
78662+ ACCESS_ONCE_RW(rdp->nocb_p_count) += c;
78663+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) += cl;
78664 rcu_nocb_wait_gp(rdp);
78665
78666 /* Each pass through the following loop invokes a callback. */
78667@@ -2246,8 +2246,8 @@ static int rcu_nocb_kthread(void *arg)
78668 list = next;
78669 }
78670 trace_rcu_batch_end(rdp->rsp->name, c, !!list, 0, 0, 1);
78671- ACCESS_ONCE(rdp->nocb_p_count) -= c;
78672- ACCESS_ONCE(rdp->nocb_p_count_lazy) -= cl;
78673+ ACCESS_ONCE_RW(rdp->nocb_p_count) -= c;
78674+ ACCESS_ONCE_RW(rdp->nocb_p_count_lazy) -= cl;
78675 rdp->n_nocbs_invoked += c;
78676 }
78677 return 0;
78678@@ -2274,7 +2274,7 @@ static void __init rcu_spawn_nocb_kthreads(struct rcu_state *rsp)
78679 t = kthread_run(rcu_nocb_kthread, rdp,
78680 "rcuo%c/%d", rsp->abbr, cpu);
78681 BUG_ON(IS_ERR(t));
78682- ACCESS_ONCE(rdp->nocb_kthread) = t;
78683+ ACCESS_ONCE_RW(rdp->nocb_kthread) = t;
78684 }
78685 }
78686
78687diff --git a/kernel/rcutree_trace.c b/kernel/rcutree_trace.c
78688index cf6c174..a8f4b50 100644
78689--- a/kernel/rcutree_trace.c
78690+++ b/kernel/rcutree_trace.c
78691@@ -121,7 +121,7 @@ static void print_one_rcu_data(struct seq_file *m, struct rcu_data *rdp)
78692 ulong2long(rdp->completed), ulong2long(rdp->gpnum),
78693 rdp->passed_quiesce, rdp->qs_pending);
78694 seq_printf(m, " dt=%d/%llx/%d df=%lu",
78695- atomic_read(&rdp->dynticks->dynticks),
78696+ atomic_read_unchecked(&rdp->dynticks->dynticks),
78697 rdp->dynticks->dynticks_nesting,
78698 rdp->dynticks->dynticks_nmi_nesting,
78699 rdp->dynticks_fqs);
78700@@ -182,17 +182,17 @@ static int show_rcuexp(struct seq_file *m, void *v)
78701 struct rcu_state *rsp = (struct rcu_state *)m->private;
78702
78703 seq_printf(m, "s=%lu d=%lu w=%lu tf=%lu wd1=%lu wd2=%lu n=%lu sc=%lu dt=%lu dl=%lu dx=%lu\n",
78704- atomic_long_read(&rsp->expedited_start),
78705+ atomic_long_read_unchecked(&rsp->expedited_start),
78706 atomic_long_read(&rsp->expedited_done),
78707- atomic_long_read(&rsp->expedited_wrap),
78708- atomic_long_read(&rsp->expedited_tryfail),
78709- atomic_long_read(&rsp->expedited_workdone1),
78710- atomic_long_read(&rsp->expedited_workdone2),
78711- atomic_long_read(&rsp->expedited_normal),
78712- atomic_long_read(&rsp->expedited_stoppedcpus),
78713- atomic_long_read(&rsp->expedited_done_tries),
78714- atomic_long_read(&rsp->expedited_done_lost),
78715- atomic_long_read(&rsp->expedited_done_exit));
78716+ atomic_long_read_unchecked(&rsp->expedited_wrap),
78717+ atomic_long_read_unchecked(&rsp->expedited_tryfail),
78718+ atomic_long_read_unchecked(&rsp->expedited_workdone1),
78719+ atomic_long_read_unchecked(&rsp->expedited_workdone2),
78720+ atomic_long_read_unchecked(&rsp->expedited_normal),
78721+ atomic_long_read_unchecked(&rsp->expedited_stoppedcpus),
78722+ atomic_long_read_unchecked(&rsp->expedited_done_tries),
78723+ atomic_long_read_unchecked(&rsp->expedited_done_lost),
78724+ atomic_long_read_unchecked(&rsp->expedited_done_exit));
78725 return 0;
78726 }
78727
78728diff --git a/kernel/resource.c b/kernel/resource.c
78729index d738698..5f8e60a 100644
78730--- a/kernel/resource.c
78731+++ b/kernel/resource.c
78732@@ -152,8 +152,18 @@ static const struct file_operations proc_iomem_operations = {
78733
78734 static int __init ioresources_init(void)
78735 {
78736+#ifdef CONFIG_GRKERNSEC_PROC_ADD
78737+#ifdef CONFIG_GRKERNSEC_PROC_USER
78738+ proc_create("ioports", S_IRUSR, NULL, &proc_ioports_operations);
78739+ proc_create("iomem", S_IRUSR, NULL, &proc_iomem_operations);
78740+#elif defined(CONFIG_GRKERNSEC_PROC_USERGROUP)
78741+ proc_create("ioports", S_IRUSR | S_IRGRP, NULL, &proc_ioports_operations);
78742+ proc_create("iomem", S_IRUSR | S_IRGRP, NULL, &proc_iomem_operations);
78743+#endif
78744+#else
78745 proc_create("ioports", 0, NULL, &proc_ioports_operations);
78746 proc_create("iomem", 0, NULL, &proc_iomem_operations);
78747+#endif
78748 return 0;
78749 }
78750 __initcall(ioresources_init);
78751diff --git a/kernel/rtmutex-tester.c b/kernel/rtmutex-tester.c
78752index 1d96dd0..994ff19 100644
78753--- a/kernel/rtmutex-tester.c
78754+++ b/kernel/rtmutex-tester.c
78755@@ -22,7 +22,7 @@
78756 #define MAX_RT_TEST_MUTEXES 8
78757
78758 static spinlock_t rttest_lock;
78759-static atomic_t rttest_event;
78760+static atomic_unchecked_t rttest_event;
78761
78762 struct test_thread_data {
78763 int opcode;
78764@@ -63,7 +63,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78765
78766 case RTTEST_LOCKCONT:
78767 td->mutexes[td->opdata] = 1;
78768- td->event = atomic_add_return(1, &rttest_event);
78769+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78770 return 0;
78771
78772 case RTTEST_RESET:
78773@@ -76,7 +76,7 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78774 return 0;
78775
78776 case RTTEST_RESETEVENT:
78777- atomic_set(&rttest_event, 0);
78778+ atomic_set_unchecked(&rttest_event, 0);
78779 return 0;
78780
78781 default:
78782@@ -93,9 +93,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78783 return ret;
78784
78785 td->mutexes[id] = 1;
78786- td->event = atomic_add_return(1, &rttest_event);
78787+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78788 rt_mutex_lock(&mutexes[id]);
78789- td->event = atomic_add_return(1, &rttest_event);
78790+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78791 td->mutexes[id] = 4;
78792 return 0;
78793
78794@@ -106,9 +106,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78795 return ret;
78796
78797 td->mutexes[id] = 1;
78798- td->event = atomic_add_return(1, &rttest_event);
78799+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78800 ret = rt_mutex_lock_interruptible(&mutexes[id], 0);
78801- td->event = atomic_add_return(1, &rttest_event);
78802+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78803 td->mutexes[id] = ret ? 0 : 4;
78804 return ret ? -EINTR : 0;
78805
78806@@ -117,9 +117,9 @@ static int handle_op(struct test_thread_data *td, int lockwakeup)
78807 if (id < 0 || id >= MAX_RT_TEST_MUTEXES || td->mutexes[id] != 4)
78808 return ret;
78809
78810- td->event = atomic_add_return(1, &rttest_event);
78811+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78812 rt_mutex_unlock(&mutexes[id]);
78813- td->event = atomic_add_return(1, &rttest_event);
78814+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78815 td->mutexes[id] = 0;
78816 return 0;
78817
78818@@ -166,7 +166,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78819 break;
78820
78821 td->mutexes[dat] = 2;
78822- td->event = atomic_add_return(1, &rttest_event);
78823+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78824 break;
78825
78826 default:
78827@@ -186,7 +186,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78828 return;
78829
78830 td->mutexes[dat] = 3;
78831- td->event = atomic_add_return(1, &rttest_event);
78832+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78833 break;
78834
78835 case RTTEST_LOCKNOWAIT:
78836@@ -198,7 +198,7 @@ void schedule_rt_mutex_test(struct rt_mutex *mutex)
78837 return;
78838
78839 td->mutexes[dat] = 1;
78840- td->event = atomic_add_return(1, &rttest_event);
78841+ td->event = atomic_add_return_unchecked(1, &rttest_event);
78842 return;
78843
78844 default:
78845diff --git a/kernel/sched/auto_group.c b/kernel/sched/auto_group.c
78846index 64de5f8..7735e12 100644
78847--- a/kernel/sched/auto_group.c
78848+++ b/kernel/sched/auto_group.c
78849@@ -11,7 +11,7 @@
78850
78851 unsigned int __read_mostly sysctl_sched_autogroup_enabled = 1;
78852 static struct autogroup autogroup_default;
78853-static atomic_t autogroup_seq_nr;
78854+static atomic_unchecked_t autogroup_seq_nr;
78855
78856 void __init autogroup_init(struct task_struct *init_task)
78857 {
78858@@ -81,7 +81,7 @@ static inline struct autogroup *autogroup_create(void)
78859
78860 kref_init(&ag->kref);
78861 init_rwsem(&ag->lock);
78862- ag->id = atomic_inc_return(&autogroup_seq_nr);
78863+ ag->id = atomic_inc_return_unchecked(&autogroup_seq_nr);
78864 ag->tg = tg;
78865 #ifdef CONFIG_RT_GROUP_SCHED
78866 /*
78867diff --git a/kernel/sched/core.c b/kernel/sched/core.c
78868index e8b3350..d83d44e 100644
78869--- a/kernel/sched/core.c
78870+++ b/kernel/sched/core.c
78871@@ -3440,7 +3440,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible);
78872 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78873 * positive (at least 1, or number of jiffies left till timeout) if completed.
78874 */
78875-long __sched
78876+long __sched __intentional_overflow(-1)
78877 wait_for_completion_interruptible_timeout(struct completion *x,
78878 unsigned long timeout)
78879 {
78880@@ -3457,7 +3457,7 @@ EXPORT_SYMBOL(wait_for_completion_interruptible_timeout);
78881 *
78882 * The return value is -ERESTARTSYS if interrupted, 0 if completed.
78883 */
78884-int __sched wait_for_completion_killable(struct completion *x)
78885+int __sched __intentional_overflow(-1) wait_for_completion_killable(struct completion *x)
78886 {
78887 long t = wait_for_common(x, MAX_SCHEDULE_TIMEOUT, TASK_KILLABLE);
78888 if (t == -ERESTARTSYS)
78889@@ -3478,7 +3478,7 @@ EXPORT_SYMBOL(wait_for_completion_killable);
78890 * The return value is -ERESTARTSYS if interrupted, 0 if timed out,
78891 * positive (at least 1, or number of jiffies left till timeout) if completed.
78892 */
78893-long __sched
78894+long __sched __intentional_overflow(-1)
78895 wait_for_completion_killable_timeout(struct completion *x,
78896 unsigned long timeout)
78897 {
78898@@ -3704,6 +3704,8 @@ int can_nice(const struct task_struct *p, const int nice)
78899 /* convert nice value [19,-20] to rlimit style value [1,40] */
78900 int nice_rlim = 20 - nice;
78901
78902+ gr_learn_resource(p, RLIMIT_NICE, nice_rlim, 1);
78903+
78904 return (nice_rlim <= task_rlimit(p, RLIMIT_NICE) ||
78905 capable(CAP_SYS_NICE));
78906 }
78907@@ -3737,7 +3739,8 @@ SYSCALL_DEFINE1(nice, int, increment)
78908 if (nice > 19)
78909 nice = 19;
78910
78911- if (increment < 0 && !can_nice(current, nice))
78912+ if (increment < 0 && (!can_nice(current, nice) ||
78913+ gr_handle_chroot_nice()))
78914 return -EPERM;
78915
78916 retval = security_task_setnice(current, nice);
78917@@ -3891,6 +3894,7 @@ recheck:
78918 unsigned long rlim_rtprio =
78919 task_rlimit(p, RLIMIT_RTPRIO);
78920
78921+ gr_learn_resource(p, RLIMIT_RTPRIO, param->sched_priority, 1);
78922 /* can't set/change the rt policy */
78923 if (policy != p->policy && !rlim_rtprio)
78924 return -EPERM;
78925@@ -4988,7 +4992,7 @@ static void migrate_tasks(unsigned int dead_cpu)
78926
78927 #if defined(CONFIG_SCHED_DEBUG) && defined(CONFIG_SYSCTL)
78928
78929-static struct ctl_table sd_ctl_dir[] = {
78930+static ctl_table_no_const sd_ctl_dir[] __read_only = {
78931 {
78932 .procname = "sched_domain",
78933 .mode = 0555,
78934@@ -5005,17 +5009,17 @@ static struct ctl_table sd_ctl_root[] = {
78935 {}
78936 };
78937
78938-static struct ctl_table *sd_alloc_ctl_entry(int n)
78939+static ctl_table_no_const *sd_alloc_ctl_entry(int n)
78940 {
78941- struct ctl_table *entry =
78942+ ctl_table_no_const *entry =
78943 kcalloc(n, sizeof(struct ctl_table), GFP_KERNEL);
78944
78945 return entry;
78946 }
78947
78948-static void sd_free_ctl_entry(struct ctl_table **tablep)
78949+static void sd_free_ctl_entry(ctl_table_no_const *tablep)
78950 {
78951- struct ctl_table *entry;
78952+ ctl_table_no_const *entry;
78953
78954 /*
78955 * In the intermediate directories, both the child directory and
78956@@ -5023,22 +5027,25 @@ static void sd_free_ctl_entry(struct ctl_table **tablep)
78957 * will always be set. In the lowest directory the names are
78958 * static strings and all have proc handlers.
78959 */
78960- for (entry = *tablep; entry->mode; entry++) {
78961- if (entry->child)
78962- sd_free_ctl_entry(&entry->child);
78963+ for (entry = tablep; entry->mode; entry++) {
78964+ if (entry->child) {
78965+ sd_free_ctl_entry(entry->child);
78966+ pax_open_kernel();
78967+ entry->child = NULL;
78968+ pax_close_kernel();
78969+ }
78970 if (entry->proc_handler == NULL)
78971 kfree(entry->procname);
78972 }
78973
78974- kfree(*tablep);
78975- *tablep = NULL;
78976+ kfree(tablep);
78977 }
78978
78979 static int min_load_idx = 0;
78980 static int max_load_idx = CPU_LOAD_IDX_MAX-1;
78981
78982 static void
78983-set_table_entry(struct ctl_table *entry,
78984+set_table_entry(ctl_table_no_const *entry,
78985 const char *procname, void *data, int maxlen,
78986 umode_t mode, proc_handler *proc_handler,
78987 bool load_idx)
78988@@ -5058,7 +5065,7 @@ set_table_entry(struct ctl_table *entry,
78989 static struct ctl_table *
78990 sd_alloc_ctl_domain_table(struct sched_domain *sd)
78991 {
78992- struct ctl_table *table = sd_alloc_ctl_entry(13);
78993+ ctl_table_no_const *table = sd_alloc_ctl_entry(13);
78994
78995 if (table == NULL)
78996 return NULL;
78997@@ -5093,9 +5100,9 @@ sd_alloc_ctl_domain_table(struct sched_domain *sd)
78998 return table;
78999 }
79000
79001-static ctl_table *sd_alloc_ctl_cpu_table(int cpu)
79002+static ctl_table_no_const *sd_alloc_ctl_cpu_table(int cpu)
79003 {
79004- struct ctl_table *entry, *table;
79005+ ctl_table_no_const *entry, *table;
79006 struct sched_domain *sd;
79007 int domain_num = 0, i;
79008 char buf[32];
79009@@ -5122,11 +5129,13 @@ static struct ctl_table_header *sd_sysctl_header;
79010 static void register_sched_domain_sysctl(void)
79011 {
79012 int i, cpu_num = num_possible_cpus();
79013- struct ctl_table *entry = sd_alloc_ctl_entry(cpu_num + 1);
79014+ ctl_table_no_const *entry = sd_alloc_ctl_entry(cpu_num + 1);
79015 char buf[32];
79016
79017 WARN_ON(sd_ctl_dir[0].child);
79018+ pax_open_kernel();
79019 sd_ctl_dir[0].child = entry;
79020+ pax_close_kernel();
79021
79022 if (entry == NULL)
79023 return;
79024@@ -5149,8 +5158,12 @@ static void unregister_sched_domain_sysctl(void)
79025 if (sd_sysctl_header)
79026 unregister_sysctl_table(sd_sysctl_header);
79027 sd_sysctl_header = NULL;
79028- if (sd_ctl_dir[0].child)
79029- sd_free_ctl_entry(&sd_ctl_dir[0].child);
79030+ if (sd_ctl_dir[0].child) {
79031+ sd_free_ctl_entry(sd_ctl_dir[0].child);
79032+ pax_open_kernel();
79033+ sd_ctl_dir[0].child = NULL;
79034+ pax_close_kernel();
79035+ }
79036 }
79037 #else
79038 static void register_sched_domain_sysctl(void)
79039@@ -5249,7 +5262,7 @@ migration_call(struct notifier_block *nfb, unsigned long action, void *hcpu)
79040 * happens before everything else. This has to be lower priority than
79041 * the notifier in the perf_event subsystem, though.
79042 */
79043-static struct notifier_block __cpuinitdata migration_notifier = {
79044+static struct notifier_block migration_notifier = {
79045 .notifier_call = migration_call,
79046 .priority = CPU_PRI_MIGRATION,
79047 };
79048diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
79049index c61a614..d7f3d7e 100644
79050--- a/kernel/sched/fair.c
79051+++ b/kernel/sched/fair.c
79052@@ -831,7 +831,7 @@ void task_numa_fault(int node, int pages, bool migrated)
79053
79054 static void reset_ptenuma_scan(struct task_struct *p)
79055 {
79056- ACCESS_ONCE(p->mm->numa_scan_seq)++;
79057+ ACCESS_ONCE_RW(p->mm->numa_scan_seq)++;
79058 p->mm->numa_scan_offset = 0;
79059 }
79060
79061@@ -5686,7 +5686,7 @@ static void nohz_idle_balance(int this_cpu, enum cpu_idle_type idle) { }
79062 * run_rebalance_domains is triggered when needed from the scheduler tick.
79063 * Also triggered for nohz idle balancing (with nohz_balancing_kick set).
79064 */
79065-static void run_rebalance_domains(struct softirq_action *h)
79066+static void run_rebalance_domains(void)
79067 {
79068 int this_cpu = smp_processor_id();
79069 struct rq *this_rq = cpu_rq(this_cpu);
79070diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
79071index ce39224d..0e09343 100644
79072--- a/kernel/sched/sched.h
79073+++ b/kernel/sched/sched.h
79074@@ -1009,7 +1009,7 @@ struct sched_class {
79075 #ifdef CONFIG_FAIR_GROUP_SCHED
79076 void (*task_move_group) (struct task_struct *p, int on_rq);
79077 #endif
79078-};
79079+} __do_const;
79080
79081 #define sched_class_highest (&stop_sched_class)
79082 #define for_each_class(class) \
79083diff --git a/kernel/signal.c b/kernel/signal.c
79084index 113411b..17190e2 100644
79085--- a/kernel/signal.c
79086+++ b/kernel/signal.c
79087@@ -51,12 +51,12 @@ static struct kmem_cache *sigqueue_cachep;
79088
79089 int print_fatal_signals __read_mostly;
79090
79091-static void __user *sig_handler(struct task_struct *t, int sig)
79092+static __sighandler_t sig_handler(struct task_struct *t, int sig)
79093 {
79094 return t->sighand->action[sig - 1].sa.sa_handler;
79095 }
79096
79097-static int sig_handler_ignored(void __user *handler, int sig)
79098+static int sig_handler_ignored(__sighandler_t handler, int sig)
79099 {
79100 /* Is it explicitly or implicitly ignored? */
79101 return handler == SIG_IGN ||
79102@@ -65,7 +65,7 @@ static int sig_handler_ignored(void __user *handler, int sig)
79103
79104 static int sig_task_ignored(struct task_struct *t, int sig, bool force)
79105 {
79106- void __user *handler;
79107+ __sighandler_t handler;
79108
79109 handler = sig_handler(t, sig);
79110
79111@@ -369,6 +369,9 @@ __sigqueue_alloc(int sig, struct task_struct *t, gfp_t flags, int override_rlimi
79112 atomic_inc(&user->sigpending);
79113 rcu_read_unlock();
79114
79115+ if (!override_rlimit)
79116+ gr_learn_resource(t, RLIMIT_SIGPENDING, atomic_read(&user->sigpending), 1);
79117+
79118 if (override_rlimit ||
79119 atomic_read(&user->sigpending) <=
79120 task_rlimit(t, RLIMIT_SIGPENDING)) {
79121@@ -496,7 +499,7 @@ flush_signal_handlers(struct task_struct *t, int force_default)
79122
79123 int unhandled_signal(struct task_struct *tsk, int sig)
79124 {
79125- void __user *handler = tsk->sighand->action[sig-1].sa.sa_handler;
79126+ __sighandler_t handler = tsk->sighand->action[sig-1].sa.sa_handler;
79127 if (is_global_init(tsk))
79128 return 1;
79129 if (handler != SIG_IGN && handler != SIG_DFL)
79130@@ -816,6 +819,13 @@ static int check_kill_permission(int sig, struct siginfo *info,
79131 }
79132 }
79133
79134+ /* allow glibc communication via tgkill to other threads in our
79135+ thread group */
79136+ if ((info == SEND_SIG_NOINFO || info->si_code != SI_TKILL ||
79137+ sig != (SIGRTMIN+1) || task_tgid_vnr(t) != info->si_pid)
79138+ && gr_handle_signal(t, sig))
79139+ return -EPERM;
79140+
79141 return security_task_kill(t, info, sig, 0);
79142 }
79143
79144@@ -1199,7 +1209,7 @@ __group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
79145 return send_signal(sig, info, p, 1);
79146 }
79147
79148-static int
79149+int
79150 specific_send_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79151 {
79152 return send_signal(sig, info, t, 0);
79153@@ -1236,6 +1246,7 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79154 unsigned long int flags;
79155 int ret, blocked, ignored;
79156 struct k_sigaction *action;
79157+ int is_unhandled = 0;
79158
79159 spin_lock_irqsave(&t->sighand->siglock, flags);
79160 action = &t->sighand->action[sig-1];
79161@@ -1250,9 +1261,18 @@ force_sig_info(int sig, struct siginfo *info, struct task_struct *t)
79162 }
79163 if (action->sa.sa_handler == SIG_DFL)
79164 t->signal->flags &= ~SIGNAL_UNKILLABLE;
79165+ if (action->sa.sa_handler == SIG_IGN || action->sa.sa_handler == SIG_DFL)
79166+ is_unhandled = 1;
79167 ret = specific_send_sig_info(sig, info, t);
79168 spin_unlock_irqrestore(&t->sighand->siglock, flags);
79169
79170+ /* only deal with unhandled signals, java etc trigger SIGSEGV during
79171+ normal operation */
79172+ if (is_unhandled) {
79173+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, t);
79174+ gr_handle_crash(t, sig);
79175+ }
79176+
79177 return ret;
79178 }
79179
79180@@ -1319,8 +1339,11 @@ int group_send_sig_info(int sig, struct siginfo *info, struct task_struct *p)
79181 ret = check_kill_permission(sig, info, p);
79182 rcu_read_unlock();
79183
79184- if (!ret && sig)
79185+ if (!ret && sig) {
79186 ret = do_send_sig_info(sig, info, p, true);
79187+ if (!ret)
79188+ gr_log_signal(sig, !is_si_special(info) ? info->si_addr : NULL, p);
79189+ }
79190
79191 return ret;
79192 }
79193@@ -2926,7 +2949,15 @@ do_send_specific(pid_t tgid, pid_t pid, int sig, struct siginfo *info)
79194 int error = -ESRCH;
79195
79196 rcu_read_lock();
79197- p = find_task_by_vpid(pid);
79198+#ifdef CONFIG_GRKERNSEC_CHROOT_FINDTASK
79199+ /* allow glibc communication via tgkill to other threads in our
79200+ thread group */
79201+ if (grsec_enable_chroot_findtask && info->si_code == SI_TKILL &&
79202+ sig == (SIGRTMIN+1) && tgid == info->si_pid)
79203+ p = find_task_by_vpid_unrestricted(pid);
79204+ else
79205+#endif
79206+ p = find_task_by_vpid(pid);
79207 if (p && (tgid <= 0 || task_tgid_vnr(p) == tgid)) {
79208 error = check_kill_permission(sig, info, p);
79209 /*
79210@@ -3240,8 +3271,8 @@ COMPAT_SYSCALL_DEFINE2(sigaltstack,
79211 }
79212 seg = get_fs();
79213 set_fs(KERNEL_DS);
79214- ret = do_sigaltstack((stack_t __force __user *) (uss_ptr ? &uss : NULL),
79215- (stack_t __force __user *) &uoss,
79216+ ret = do_sigaltstack((stack_t __force_user *) (uss_ptr ? &uss : NULL),
79217+ (stack_t __force_user *) &uoss,
79218 compat_user_stack_pointer());
79219 set_fs(seg);
79220 if (ret >= 0 && uoss_ptr) {
79221diff --git a/kernel/smp.c b/kernel/smp.c
79222index 4dba0f7..fe9f773 100644
79223--- a/kernel/smp.c
79224+++ b/kernel/smp.c
79225@@ -73,7 +73,7 @@ hotplug_cfd(struct notifier_block *nfb, unsigned long action, void *hcpu)
79226 return NOTIFY_OK;
79227 }
79228
79229-static struct notifier_block __cpuinitdata hotplug_cfd_notifier = {
79230+static struct notifier_block hotplug_cfd_notifier = {
79231 .notifier_call = hotplug_cfd,
79232 };
79233
79234diff --git a/kernel/smpboot.c b/kernel/smpboot.c
79235index 02fc5c9..e54c335 100644
79236--- a/kernel/smpboot.c
79237+++ b/kernel/smpboot.c
79238@@ -288,7 +288,7 @@ int smpboot_register_percpu_thread(struct smp_hotplug_thread *plug_thread)
79239 }
79240 smpboot_unpark_thread(plug_thread, cpu);
79241 }
79242- list_add(&plug_thread->list, &hotplug_threads);
79243+ pax_list_add(&plug_thread->list, &hotplug_threads);
79244 out:
79245 mutex_unlock(&smpboot_threads_lock);
79246 return ret;
79247@@ -305,7 +305,7 @@ void smpboot_unregister_percpu_thread(struct smp_hotplug_thread *plug_thread)
79248 {
79249 get_online_cpus();
79250 mutex_lock(&smpboot_threads_lock);
79251- list_del(&plug_thread->list);
79252+ pax_list_del(&plug_thread->list);
79253 smpboot_destroy_threads(plug_thread);
79254 mutex_unlock(&smpboot_threads_lock);
79255 put_online_cpus();
79256diff --git a/kernel/softirq.c b/kernel/softirq.c
79257index 3d6833f..da6d93d 100644
79258--- a/kernel/softirq.c
79259+++ b/kernel/softirq.c
79260@@ -53,11 +53,11 @@ irq_cpustat_t irq_stat[NR_CPUS] ____cacheline_aligned;
79261 EXPORT_SYMBOL(irq_stat);
79262 #endif
79263
79264-static struct softirq_action softirq_vec[NR_SOFTIRQS] __cacheline_aligned_in_smp;
79265+static struct softirq_action softirq_vec[NR_SOFTIRQS] __read_only __aligned(PAGE_SIZE);
79266
79267 DEFINE_PER_CPU(struct task_struct *, ksoftirqd);
79268
79269-char *softirq_to_name[NR_SOFTIRQS] = {
79270+const char * const softirq_to_name[NR_SOFTIRQS] = {
79271 "HI", "TIMER", "NET_TX", "NET_RX", "BLOCK", "BLOCK_IOPOLL",
79272 "TASKLET", "SCHED", "HRTIMER", "RCU"
79273 };
79274@@ -250,7 +250,7 @@ restart:
79275 kstat_incr_softirqs_this_cpu(vec_nr);
79276
79277 trace_softirq_entry(vec_nr);
79278- h->action(h);
79279+ h->action();
79280 trace_softirq_exit(vec_nr);
79281 if (unlikely(prev_count != preempt_count())) {
79282 printk(KERN_ERR "huh, entered softirq %u %s %p"
79283@@ -405,7 +405,7 @@ void __raise_softirq_irqoff(unsigned int nr)
79284 or_softirq_pending(1UL << nr);
79285 }
79286
79287-void open_softirq(int nr, void (*action)(struct softirq_action *))
79288+void __init open_softirq(int nr, void (*action)(void))
79289 {
79290 softirq_vec[nr].action = action;
79291 }
79292@@ -461,7 +461,7 @@ void __tasklet_hi_schedule_first(struct tasklet_struct *t)
79293
79294 EXPORT_SYMBOL(__tasklet_hi_schedule_first);
79295
79296-static void tasklet_action(struct softirq_action *a)
79297+static void tasklet_action(void)
79298 {
79299 struct tasklet_struct *list;
79300
79301@@ -496,7 +496,7 @@ static void tasklet_action(struct softirq_action *a)
79302 }
79303 }
79304
79305-static void tasklet_hi_action(struct softirq_action *a)
79306+static void tasklet_hi_action(void)
79307 {
79308 struct tasklet_struct *list;
79309
79310@@ -730,7 +730,7 @@ static int __cpuinit remote_softirq_cpu_notify(struct notifier_block *self,
79311 return NOTIFY_OK;
79312 }
79313
79314-static struct notifier_block __cpuinitdata remote_softirq_cpu_notifier = {
79315+static struct notifier_block remote_softirq_cpu_notifier = {
79316 .notifier_call = remote_softirq_cpu_notify,
79317 };
79318
79319@@ -847,11 +847,11 @@ static int __cpuinit cpu_callback(struct notifier_block *nfb,
79320 return NOTIFY_OK;
79321 }
79322
79323-static struct notifier_block __cpuinitdata cpu_nfb = {
79324+static struct notifier_block cpu_nfb = {
79325 .notifier_call = cpu_callback
79326 };
79327
79328-static struct smp_hotplug_thread softirq_threads = {
79329+static struct smp_hotplug_thread softirq_threads __read_only = {
79330 .store = &ksoftirqd,
79331 .thread_should_run = ksoftirqd_should_run,
79332 .thread_fn = run_ksoftirqd,
79333diff --git a/kernel/srcu.c b/kernel/srcu.c
79334index 01d5ccb..cdcbee6 100644
79335--- a/kernel/srcu.c
79336+++ b/kernel/srcu.c
79337@@ -300,9 +300,9 @@ int __srcu_read_lock(struct srcu_struct *sp)
79338
79339 idx = ACCESS_ONCE(sp->completed) & 0x1;
79340 preempt_disable();
79341- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
79342+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->c[idx]) += 1;
79343 smp_mb(); /* B */ /* Avoid leaking the critical section. */
79344- ACCESS_ONCE(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79345+ ACCESS_ONCE_RW(this_cpu_ptr(sp->per_cpu_ref)->seq[idx]) += 1;
79346 preempt_enable();
79347 return idx;
79348 }
79349diff --git a/kernel/sys.c b/kernel/sys.c
79350index 2bbd9a7..0875671 100644
79351--- a/kernel/sys.c
79352+++ b/kernel/sys.c
79353@@ -163,6 +163,12 @@ static int set_one_prio(struct task_struct *p, int niceval, int error)
79354 error = -EACCES;
79355 goto out;
79356 }
79357+
79358+ if (gr_handle_chroot_setpriority(p, niceval)) {
79359+ error = -EACCES;
79360+ goto out;
79361+ }
79362+
79363 no_nice = security_task_setnice(p, niceval);
79364 if (no_nice) {
79365 error = no_nice;
79366@@ -626,6 +632,9 @@ SYSCALL_DEFINE2(setregid, gid_t, rgid, gid_t, egid)
79367 goto error;
79368 }
79369
79370+ if (gr_check_group_change(new->gid, new->egid, INVALID_GID))
79371+ goto error;
79372+
79373 if (rgid != (gid_t) -1 ||
79374 (egid != (gid_t) -1 && !gid_eq(kegid, old->gid)))
79375 new->sgid = new->egid;
79376@@ -661,6 +670,10 @@ SYSCALL_DEFINE1(setgid, gid_t, gid)
79377 old = current_cred();
79378
79379 retval = -EPERM;
79380+
79381+ if (gr_check_group_change(kgid, kgid, kgid))
79382+ goto error;
79383+
79384 if (nsown_capable(CAP_SETGID))
79385 new->gid = new->egid = new->sgid = new->fsgid = kgid;
79386 else if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->sgid))
79387@@ -678,7 +691,7 @@ error:
79388 /*
79389 * change the user struct in a credentials set to match the new UID
79390 */
79391-static int set_user(struct cred *new)
79392+int set_user(struct cred *new)
79393 {
79394 struct user_struct *new_user;
79395
79396@@ -758,6 +771,9 @@ SYSCALL_DEFINE2(setreuid, uid_t, ruid, uid_t, euid)
79397 goto error;
79398 }
79399
79400+ if (gr_check_user_change(new->uid, new->euid, INVALID_UID))
79401+ goto error;
79402+
79403 if (!uid_eq(new->uid, old->uid)) {
79404 retval = set_user(new);
79405 if (retval < 0)
79406@@ -808,6 +824,12 @@ SYSCALL_DEFINE1(setuid, uid_t, uid)
79407 old = current_cred();
79408
79409 retval = -EPERM;
79410+
79411+ if (gr_check_crash_uid(kuid))
79412+ goto error;
79413+ if (gr_check_user_change(kuid, kuid, kuid))
79414+ goto error;
79415+
79416 if (nsown_capable(CAP_SETUID)) {
79417 new->suid = new->uid = kuid;
79418 if (!uid_eq(kuid, old->uid)) {
79419@@ -877,6 +899,9 @@ SYSCALL_DEFINE3(setresuid, uid_t, ruid, uid_t, euid, uid_t, suid)
79420 goto error;
79421 }
79422
79423+ if (gr_check_user_change(kruid, keuid, INVALID_UID))
79424+ goto error;
79425+
79426 if (ruid != (uid_t) -1) {
79427 new->uid = kruid;
79428 if (!uid_eq(kruid, old->uid)) {
79429@@ -959,6 +984,9 @@ SYSCALL_DEFINE3(setresgid, gid_t, rgid, gid_t, egid, gid_t, sgid)
79430 goto error;
79431 }
79432
79433+ if (gr_check_group_change(krgid, kegid, INVALID_GID))
79434+ goto error;
79435+
79436 if (rgid != (gid_t) -1)
79437 new->gid = krgid;
79438 if (egid != (gid_t) -1)
79439@@ -1020,12 +1048,16 @@ SYSCALL_DEFINE1(setfsuid, uid_t, uid)
79440 uid_eq(kuid, old->suid) || uid_eq(kuid, old->fsuid) ||
79441 nsown_capable(CAP_SETUID)) {
79442 if (!uid_eq(kuid, old->fsuid)) {
79443+ if (gr_check_user_change(INVALID_UID, INVALID_UID, kuid))
79444+ goto error;
79445+
79446 new->fsuid = kuid;
79447 if (security_task_fix_setuid(new, old, LSM_SETID_FS) == 0)
79448 goto change_okay;
79449 }
79450 }
79451
79452+error:
79453 abort_creds(new);
79454 return old_fsuid;
79455
79456@@ -1058,12 +1090,16 @@ SYSCALL_DEFINE1(setfsgid, gid_t, gid)
79457 if (gid_eq(kgid, old->gid) || gid_eq(kgid, old->egid) ||
79458 gid_eq(kgid, old->sgid) || gid_eq(kgid, old->fsgid) ||
79459 nsown_capable(CAP_SETGID)) {
79460+ if (gr_check_group_change(INVALID_GID, INVALID_GID, kgid))
79461+ goto error;
79462+
79463 if (!gid_eq(kgid, old->fsgid)) {
79464 new->fsgid = kgid;
79465 goto change_okay;
79466 }
79467 }
79468
79469+error:
79470 abort_creds(new);
79471 return old_fsgid;
79472
79473@@ -1432,19 +1468,19 @@ SYSCALL_DEFINE1(olduname, struct oldold_utsname __user *, name)
79474 return -EFAULT;
79475
79476 down_read(&uts_sem);
79477- error = __copy_to_user(&name->sysname, &utsname()->sysname,
79478+ error = __copy_to_user(name->sysname, &utsname()->sysname,
79479 __OLD_UTS_LEN);
79480 error |= __put_user(0, name->sysname + __OLD_UTS_LEN);
79481- error |= __copy_to_user(&name->nodename, &utsname()->nodename,
79482+ error |= __copy_to_user(name->nodename, &utsname()->nodename,
79483 __OLD_UTS_LEN);
79484 error |= __put_user(0, name->nodename + __OLD_UTS_LEN);
79485- error |= __copy_to_user(&name->release, &utsname()->release,
79486+ error |= __copy_to_user(name->release, &utsname()->release,
79487 __OLD_UTS_LEN);
79488 error |= __put_user(0, name->release + __OLD_UTS_LEN);
79489- error |= __copy_to_user(&name->version, &utsname()->version,
79490+ error |= __copy_to_user(name->version, &utsname()->version,
79491 __OLD_UTS_LEN);
79492 error |= __put_user(0, name->version + __OLD_UTS_LEN);
79493- error |= __copy_to_user(&name->machine, &utsname()->machine,
79494+ error |= __copy_to_user(name->machine, &utsname()->machine,
79495 __OLD_UTS_LEN);
79496 error |= __put_user(0, name->machine + __OLD_UTS_LEN);
79497 up_read(&uts_sem);
79498@@ -1646,6 +1682,13 @@ int do_prlimit(struct task_struct *tsk, unsigned int resource,
79499 */
79500 new_rlim->rlim_cur = 1;
79501 }
79502+ /* Handle the case where a fork and setuid occur and then RLIMIT_NPROC
79503+ is changed to a lower value. Since tasks can be created by the same
79504+ user in between this limit change and an execve by this task, force
79505+ a recheck only for this task by setting PF_NPROC_EXCEEDED
79506+ */
79507+ if (resource == RLIMIT_NPROC && tsk->real_cred->user != INIT_USER)
79508+ tsk->flags |= PF_NPROC_EXCEEDED;
79509 }
79510 if (!retval) {
79511 if (old_rlim)
79512diff --git a/kernel/sysctl.c b/kernel/sysctl.c
79513index 9edcf45..713c960 100644
79514--- a/kernel/sysctl.c
79515+++ b/kernel/sysctl.c
79516@@ -93,7 +93,6 @@
79517
79518
79519 #if defined(CONFIG_SYSCTL)
79520-
79521 /* External variables not in a header file. */
79522 extern int sysctl_overcommit_memory;
79523 extern int sysctl_overcommit_ratio;
79524@@ -119,18 +118,18 @@ extern int blk_iopoll_enabled;
79525
79526 /* Constants used for minimum and maximum */
79527 #ifdef CONFIG_LOCKUP_DETECTOR
79528-static int sixty = 60;
79529-static int neg_one = -1;
79530+static int sixty __read_only = 60;
79531 #endif
79532
79533-static int zero;
79534-static int __maybe_unused one = 1;
79535-static int __maybe_unused two = 2;
79536-static int __maybe_unused three = 3;
79537-static unsigned long one_ul = 1;
79538-static int one_hundred = 100;
79539+static int neg_one __read_only = -1;
79540+static int zero __read_only = 0;
79541+static int __maybe_unused one __read_only = 1;
79542+static int __maybe_unused two __read_only = 2;
79543+static int __maybe_unused three __read_only = 3;
79544+static unsigned long one_ul __read_only = 1;
79545+static int one_hundred __read_only = 100;
79546 #ifdef CONFIG_PRINTK
79547-static int ten_thousand = 10000;
79548+static int ten_thousand __read_only = 10000;
79549 #endif
79550
79551 /* this is needed for the proc_doulongvec_minmax of vm_dirty_bytes */
79552@@ -177,10 +176,8 @@ static int proc_taint(struct ctl_table *table, int write,
79553 void __user *buffer, size_t *lenp, loff_t *ppos);
79554 #endif
79555
79556-#ifdef CONFIG_PRINTK
79557 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79558 void __user *buffer, size_t *lenp, loff_t *ppos);
79559-#endif
79560
79561 static int proc_dointvec_minmax_coredump(struct ctl_table *table, int write,
79562 void __user *buffer, size_t *lenp, loff_t *ppos);
79563@@ -211,6 +208,8 @@ static int sysrq_sysctl_handler(ctl_table *table, int write,
79564
79565 #endif
79566
79567+extern struct ctl_table grsecurity_table[];
79568+
79569 static struct ctl_table kern_table[];
79570 static struct ctl_table vm_table[];
79571 static struct ctl_table fs_table[];
79572@@ -225,6 +224,20 @@ extern struct ctl_table epoll_table[];
79573 int sysctl_legacy_va_layout;
79574 #endif
79575
79576+#ifdef CONFIG_PAX_SOFTMODE
79577+static ctl_table pax_table[] = {
79578+ {
79579+ .procname = "softmode",
79580+ .data = &pax_softmode,
79581+ .maxlen = sizeof(unsigned int),
79582+ .mode = 0600,
79583+ .proc_handler = &proc_dointvec,
79584+ },
79585+
79586+ { }
79587+};
79588+#endif
79589+
79590 /* The default sysctl tables: */
79591
79592 static struct ctl_table sysctl_base_table[] = {
79593@@ -273,6 +286,22 @@ static int max_extfrag_threshold = 1000;
79594 #endif
79595
79596 static struct ctl_table kern_table[] = {
79597+#if defined(CONFIG_GRKERNSEC_SYSCTL) || defined(CONFIG_GRKERNSEC_ROFS)
79598+ {
79599+ .procname = "grsecurity",
79600+ .mode = 0500,
79601+ .child = grsecurity_table,
79602+ },
79603+#endif
79604+
79605+#ifdef CONFIG_PAX_SOFTMODE
79606+ {
79607+ .procname = "pax",
79608+ .mode = 0500,
79609+ .child = pax_table,
79610+ },
79611+#endif
79612+
79613 {
79614 .procname = "sched_child_runs_first",
79615 .data = &sysctl_sched_child_runs_first,
79616@@ -607,7 +636,7 @@ static struct ctl_table kern_table[] = {
79617 .data = &modprobe_path,
79618 .maxlen = KMOD_PATH_LEN,
79619 .mode = 0644,
79620- .proc_handler = proc_dostring,
79621+ .proc_handler = proc_dostring_modpriv,
79622 },
79623 {
79624 .procname = "modules_disabled",
79625@@ -774,16 +803,20 @@ static struct ctl_table kern_table[] = {
79626 .extra1 = &zero,
79627 .extra2 = &one,
79628 },
79629+#endif
79630 {
79631 .procname = "kptr_restrict",
79632 .data = &kptr_restrict,
79633 .maxlen = sizeof(int),
79634 .mode = 0644,
79635 .proc_handler = proc_dointvec_minmax_sysadmin,
79636+#ifdef CONFIG_GRKERNSEC_HIDESYM
79637+ .extra1 = &two,
79638+#else
79639 .extra1 = &zero,
79640+#endif
79641 .extra2 = &two,
79642 },
79643-#endif
79644 {
79645 .procname = "ngroups_max",
79646 .data = &ngroups_max,
79647@@ -1025,10 +1058,17 @@ static struct ctl_table kern_table[] = {
79648 */
79649 {
79650 .procname = "perf_event_paranoid",
79651- .data = &sysctl_perf_event_paranoid,
79652- .maxlen = sizeof(sysctl_perf_event_paranoid),
79653+ .data = &sysctl_perf_event_legitimately_concerned,
79654+ .maxlen = sizeof(sysctl_perf_event_legitimately_concerned),
79655 .mode = 0644,
79656- .proc_handler = proc_dointvec,
79657+ /* go ahead, be a hero */
79658+ .proc_handler = proc_dointvec_minmax_sysadmin,
79659+ .extra1 = &neg_one,
79660+#ifdef CONFIG_GRKERNSEC_PERF_HARDEN
79661+ .extra2 = &three,
79662+#else
79663+ .extra2 = &two,
79664+#endif
79665 },
79666 {
79667 .procname = "perf_event_mlock_kb",
79668@@ -1282,6 +1322,13 @@ static struct ctl_table vm_table[] = {
79669 .proc_handler = proc_dointvec_minmax,
79670 .extra1 = &zero,
79671 },
79672+ {
79673+ .procname = "heap_stack_gap",
79674+ .data = &sysctl_heap_stack_gap,
79675+ .maxlen = sizeof(sysctl_heap_stack_gap),
79676+ .mode = 0644,
79677+ .proc_handler = proc_doulongvec_minmax,
79678+ },
79679 #else
79680 {
79681 .procname = "nr_trim_pages",
79682@@ -1746,6 +1793,16 @@ int proc_dostring(struct ctl_table *table, int write,
79683 buffer, lenp, ppos);
79684 }
79685
79686+int proc_dostring_modpriv(struct ctl_table *table, int write,
79687+ void __user *buffer, size_t *lenp, loff_t *ppos)
79688+{
79689+ if (write && !capable(CAP_SYS_MODULE))
79690+ return -EPERM;
79691+
79692+ return _proc_do_string(table->data, table->maxlen, write,
79693+ buffer, lenp, ppos);
79694+}
79695+
79696 static size_t proc_skip_spaces(char **buf)
79697 {
79698 size_t ret;
79699@@ -1851,6 +1908,8 @@ static int proc_put_long(void __user **buf, size_t *size, unsigned long val,
79700 len = strlen(tmp);
79701 if (len > *size)
79702 len = *size;
79703+ if (len > sizeof(tmp))
79704+ len = sizeof(tmp);
79705 if (copy_to_user(*buf, tmp, len))
79706 return -EFAULT;
79707 *size -= len;
79708@@ -2015,7 +2074,7 @@ int proc_dointvec(struct ctl_table *table, int write,
79709 static int proc_taint(struct ctl_table *table, int write,
79710 void __user *buffer, size_t *lenp, loff_t *ppos)
79711 {
79712- struct ctl_table t;
79713+ ctl_table_no_const t;
79714 unsigned long tmptaint = get_taint();
79715 int err;
79716
79717@@ -2043,7 +2102,6 @@ static int proc_taint(struct ctl_table *table, int write,
79718 return err;
79719 }
79720
79721-#ifdef CONFIG_PRINTK
79722 static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79723 void __user *buffer, size_t *lenp, loff_t *ppos)
79724 {
79725@@ -2052,7 +2110,6 @@ static int proc_dointvec_minmax_sysadmin(struct ctl_table *table, int write,
79726
79727 return proc_dointvec_minmax(table, write, buffer, lenp, ppos);
79728 }
79729-#endif
79730
79731 struct do_proc_dointvec_minmax_conv_param {
79732 int *min;
79733@@ -2199,8 +2256,11 @@ static int __do_proc_doulongvec_minmax(void *data, struct ctl_table *table, int
79734 *i = val;
79735 } else {
79736 val = convdiv * (*i) / convmul;
79737- if (!first)
79738+ if (!first) {
79739 err = proc_put_char(&buffer, &left, '\t');
79740+ if (err)
79741+ break;
79742+ }
79743 err = proc_put_long(&buffer, &left, val, false);
79744 if (err)
79745 break;
79746@@ -2592,6 +2652,12 @@ int proc_dostring(struct ctl_table *table, int write,
79747 return -ENOSYS;
79748 }
79749
79750+int proc_dostring_modpriv(struct ctl_table *table, int write,
79751+ void __user *buffer, size_t *lenp, loff_t *ppos)
79752+{
79753+ return -ENOSYS;
79754+}
79755+
79756 int proc_dointvec(struct ctl_table *table, int write,
79757 void __user *buffer, size_t *lenp, loff_t *ppos)
79758 {
79759@@ -2648,5 +2714,6 @@ EXPORT_SYMBOL(proc_dointvec_minmax);
79760 EXPORT_SYMBOL(proc_dointvec_userhz_jiffies);
79761 EXPORT_SYMBOL(proc_dointvec_ms_jiffies);
79762 EXPORT_SYMBOL(proc_dostring);
79763+EXPORT_SYMBOL(proc_dostring_modpriv);
79764 EXPORT_SYMBOL(proc_doulongvec_minmax);
79765 EXPORT_SYMBOL(proc_doulongvec_ms_jiffies_minmax);
79766diff --git a/kernel/taskstats.c b/kernel/taskstats.c
79767index 145bb4d..b2aa969 100644
79768--- a/kernel/taskstats.c
79769+++ b/kernel/taskstats.c
79770@@ -28,9 +28,12 @@
79771 #include <linux/fs.h>
79772 #include <linux/file.h>
79773 #include <linux/pid_namespace.h>
79774+#include <linux/grsecurity.h>
79775 #include <net/genetlink.h>
79776 #include <linux/atomic.h>
79777
79778+extern int gr_is_taskstats_denied(int pid);
79779+
79780 /*
79781 * Maximum length of a cpumask that can be specified in
79782 * the TASKSTATS_CMD_ATTR_REGISTER/DEREGISTER_CPUMASK attribute
79783@@ -570,6 +573,9 @@ err:
79784
79785 static int taskstats_user_cmd(struct sk_buff *skb, struct genl_info *info)
79786 {
79787+ if (gr_is_taskstats_denied(current->pid))
79788+ return -EACCES;
79789+
79790 if (info->attrs[TASKSTATS_CMD_ATTR_REGISTER_CPUMASK])
79791 return cmd_attr_register_cpumask(info);
79792 else if (info->attrs[TASKSTATS_CMD_ATTR_DEREGISTER_CPUMASK])
79793diff --git a/kernel/time.c b/kernel/time.c
79794index d3617db..c98bbe9 100644
79795--- a/kernel/time.c
79796+++ b/kernel/time.c
79797@@ -172,6 +172,11 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
79798 return error;
79799
79800 if (tz) {
79801+ /* we log in do_settimeofday called below, so don't log twice
79802+ */
79803+ if (!tv)
79804+ gr_log_timechange();
79805+
79806 sys_tz = *tz;
79807 update_vsyscall_tz();
79808 if (firsttime) {
79809@@ -502,7 +507,7 @@ EXPORT_SYMBOL(usecs_to_jiffies);
79810 * The >> (NSEC_JIFFIE_SC - SEC_JIFFIE_SC) converts the scaled nsec
79811 * value to a scaled second value.
79812 */
79813-unsigned long
79814+unsigned long __intentional_overflow(-1)
79815 timespec_to_jiffies(const struct timespec *value)
79816 {
79817 unsigned long sec = value->tv_sec;
79818diff --git a/kernel/time/alarmtimer.c b/kernel/time/alarmtimer.c
79819index f11d83b..d016d91 100644
79820--- a/kernel/time/alarmtimer.c
79821+++ b/kernel/time/alarmtimer.c
79822@@ -750,7 +750,7 @@ static int __init alarmtimer_init(void)
79823 struct platform_device *pdev;
79824 int error = 0;
79825 int i;
79826- struct k_clock alarm_clock = {
79827+ static struct k_clock alarm_clock = {
79828 .clock_getres = alarm_clock_getres,
79829 .clock_get = alarm_clock_get,
79830 .timer_create = alarm_timer_create,
79831diff --git a/kernel/time/timekeeping.c b/kernel/time/timekeeping.c
79832index baeeb5c..c22704a 100644
79833--- a/kernel/time/timekeeping.c
79834+++ b/kernel/time/timekeeping.c
79835@@ -15,6 +15,7 @@
79836 #include <linux/init.h>
79837 #include <linux/mm.h>
79838 #include <linux/sched.h>
79839+#include <linux/grsecurity.h>
79840 #include <linux/syscore_ops.h>
79841 #include <linux/clocksource.h>
79842 #include <linux/jiffies.h>
79843@@ -495,6 +496,8 @@ int do_settimeofday(const struct timespec *tv)
79844 if (!timespec_valid_strict(tv))
79845 return -EINVAL;
79846
79847+ gr_log_timechange();
79848+
79849 raw_spin_lock_irqsave(&timekeeper_lock, flags);
79850 write_seqcount_begin(&timekeeper_seq);
79851
79852diff --git a/kernel/time/timer_list.c b/kernel/time/timer_list.c
79853index 3bdf283..cc68d83 100644
79854--- a/kernel/time/timer_list.c
79855+++ b/kernel/time/timer_list.c
79856@@ -45,12 +45,16 @@ DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
79857
79858 static void print_name_offset(struct seq_file *m, void *sym)
79859 {
79860+#ifdef CONFIG_GRKERNSEC_HIDESYM
79861+ SEQ_printf(m, "<%p>", NULL);
79862+#else
79863 char symname[KSYM_NAME_LEN];
79864
79865 if (lookup_symbol_name((unsigned long)sym, symname) < 0)
79866 SEQ_printf(m, "<%pK>", sym);
79867 else
79868 SEQ_printf(m, "%s", symname);
79869+#endif
79870 }
79871
79872 static void
79873@@ -119,7 +123,11 @@ next_one:
79874 static void
79875 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
79876 {
79877+#ifdef CONFIG_GRKERNSEC_HIDESYM
79878+ SEQ_printf(m, " .base: %p\n", NULL);
79879+#else
79880 SEQ_printf(m, " .base: %pK\n", base);
79881+#endif
79882 SEQ_printf(m, " .index: %d\n",
79883 base->index);
79884 SEQ_printf(m, " .resolution: %Lu nsecs\n",
79885@@ -355,7 +363,11 @@ static int __init init_timer_list_procfs(void)
79886 {
79887 struct proc_dir_entry *pe;
79888
79889+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79890+ pe = proc_create("timer_list", 0400, NULL, &timer_list_fops);
79891+#else
79892 pe = proc_create("timer_list", 0444, NULL, &timer_list_fops);
79893+#endif
79894 if (!pe)
79895 return -ENOMEM;
79896 return 0;
79897diff --git a/kernel/time/timer_stats.c b/kernel/time/timer_stats.c
79898index 0b537f2..40d6c20 100644
79899--- a/kernel/time/timer_stats.c
79900+++ b/kernel/time/timer_stats.c
79901@@ -116,7 +116,7 @@ static ktime_t time_start, time_stop;
79902 static unsigned long nr_entries;
79903 static struct entry entries[MAX_ENTRIES];
79904
79905-static atomic_t overflow_count;
79906+static atomic_unchecked_t overflow_count;
79907
79908 /*
79909 * The entries are in a hash-table, for fast lookup:
79910@@ -140,7 +140,7 @@ static void reset_entries(void)
79911 nr_entries = 0;
79912 memset(entries, 0, sizeof(entries));
79913 memset(tstat_hash_table, 0, sizeof(tstat_hash_table));
79914- atomic_set(&overflow_count, 0);
79915+ atomic_set_unchecked(&overflow_count, 0);
79916 }
79917
79918 static struct entry *alloc_entry(void)
79919@@ -261,7 +261,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79920 if (likely(entry))
79921 entry->count++;
79922 else
79923- atomic_inc(&overflow_count);
79924+ atomic_inc_unchecked(&overflow_count);
79925
79926 out_unlock:
79927 raw_spin_unlock_irqrestore(lock, flags);
79928@@ -269,12 +269,16 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
79929
79930 static void print_name_offset(struct seq_file *m, unsigned long addr)
79931 {
79932+#ifdef CONFIG_GRKERNSEC_HIDESYM
79933+ seq_printf(m, "<%p>", NULL);
79934+#else
79935 char symname[KSYM_NAME_LEN];
79936
79937 if (lookup_symbol_name(addr, symname) < 0)
79938- seq_printf(m, "<%p>", (void *)addr);
79939+ seq_printf(m, "<%pK>", (void *)addr);
79940 else
79941 seq_printf(m, "%s", symname);
79942+#endif
79943 }
79944
79945 static int tstats_show(struct seq_file *m, void *v)
79946@@ -300,9 +304,9 @@ static int tstats_show(struct seq_file *m, void *v)
79947
79948 seq_puts(m, "Timer Stats Version: v0.2\n");
79949 seq_printf(m, "Sample period: %ld.%03ld s\n", period.tv_sec, ms);
79950- if (atomic_read(&overflow_count))
79951+ if (atomic_read_unchecked(&overflow_count))
79952 seq_printf(m, "Overflow: %d entries\n",
79953- atomic_read(&overflow_count));
79954+ atomic_read_unchecked(&overflow_count));
79955
79956 for (i = 0; i < nr_entries; i++) {
79957 entry = entries + i;
79958@@ -417,7 +421,11 @@ static int __init init_tstats_procfs(void)
79959 {
79960 struct proc_dir_entry *pe;
79961
79962+#ifdef CONFIG_GRKERNSEC_PROC_ADD
79963+ pe = proc_create("timer_stats", 0600, NULL, &tstats_fops);
79964+#else
79965 pe = proc_create("timer_stats", 0644, NULL, &tstats_fops);
79966+#endif
79967 if (!pe)
79968 return -ENOMEM;
79969 return 0;
79970diff --git a/kernel/timer.c b/kernel/timer.c
79971index 15bc1b4..32da49c 100644
79972--- a/kernel/timer.c
79973+++ b/kernel/timer.c
79974@@ -1366,7 +1366,7 @@ void update_process_times(int user_tick)
79975 /*
79976 * This function runs timers and the timer-tq in bottom half context.
79977 */
79978-static void run_timer_softirq(struct softirq_action *h)
79979+static void run_timer_softirq(void)
79980 {
79981 struct tvec_base *base = __this_cpu_read(tvec_bases);
79982
79983@@ -1429,7 +1429,7 @@ static void process_timeout(unsigned long __data)
79984 *
79985 * In all cases the return value is guaranteed to be non-negative.
79986 */
79987-signed long __sched schedule_timeout(signed long timeout)
79988+signed long __sched __intentional_overflow(-1) schedule_timeout(signed long timeout)
79989 {
79990 struct timer_list timer;
79991 unsigned long expire;
79992@@ -1635,7 +1635,7 @@ static int __cpuinit timer_cpu_notify(struct notifier_block *self,
79993 return NOTIFY_OK;
79994 }
79995
79996-static struct notifier_block __cpuinitdata timers_nb = {
79997+static struct notifier_block timers_nb = {
79998 .notifier_call = timer_cpu_notify,
79999 };
80000
80001diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
80002index b8b8560..75b1a09 100644
80003--- a/kernel/trace/blktrace.c
80004+++ b/kernel/trace/blktrace.c
80005@@ -317,7 +317,7 @@ static ssize_t blk_dropped_read(struct file *filp, char __user *buffer,
80006 struct blk_trace *bt = filp->private_data;
80007 char buf[16];
80008
80009- snprintf(buf, sizeof(buf), "%u\n", atomic_read(&bt->dropped));
80010+ snprintf(buf, sizeof(buf), "%u\n", atomic_read_unchecked(&bt->dropped));
80011
80012 return simple_read_from_buffer(buffer, count, ppos, buf, strlen(buf));
80013 }
80014@@ -375,7 +375,7 @@ static int blk_subbuf_start_callback(struct rchan_buf *buf, void *subbuf,
80015 return 1;
80016
80017 bt = buf->chan->private_data;
80018- atomic_inc(&bt->dropped);
80019+ atomic_inc_unchecked(&bt->dropped);
80020 return 0;
80021 }
80022
80023@@ -476,7 +476,7 @@ int do_blk_trace_setup(struct request_queue *q, char *name, dev_t dev,
80024
80025 bt->dir = dir;
80026 bt->dev = dev;
80027- atomic_set(&bt->dropped, 0);
80028+ atomic_set_unchecked(&bt->dropped, 0);
80029
80030 ret = -EIO;
80031 bt->dropped_file = debugfs_create_file("dropped", 0444, dir, bt,
80032diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c
80033index 6c508ff..ee55a13 100644
80034--- a/kernel/trace/ftrace.c
80035+++ b/kernel/trace/ftrace.c
80036@@ -1915,12 +1915,17 @@ ftrace_code_disable(struct module *mod, struct dyn_ftrace *rec)
80037 if (unlikely(ftrace_disabled))
80038 return 0;
80039
80040+ ret = ftrace_arch_code_modify_prepare();
80041+ FTRACE_WARN_ON(ret);
80042+ if (ret)
80043+ return 0;
80044+
80045 ret = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
80046+ FTRACE_WARN_ON(ftrace_arch_code_modify_post_process());
80047 if (ret) {
80048 ftrace_bug(ret, ip);
80049- return 0;
80050 }
80051- return 1;
80052+ return ret ? 0 : 1;
80053 }
80054
80055 /*
80056@@ -3931,8 +3936,10 @@ static int ftrace_process_locs(struct module *mod,
80057 if (!count)
80058 return 0;
80059
80060+ pax_open_kernel();
80061 sort(start, count, sizeof(*start),
80062 ftrace_cmp_ips, ftrace_swap_ips);
80063+ pax_close_kernel();
80064
80065 start_pg = ftrace_allocate_pages(count);
80066 if (!start_pg)
80067@@ -4655,8 +4662,6 @@ ftrace_enable_sysctl(struct ctl_table *table, int write,
80068 #ifdef CONFIG_FUNCTION_GRAPH_TRACER
80069
80070 static int ftrace_graph_active;
80071-static struct notifier_block ftrace_suspend_notifier;
80072-
80073 int ftrace_graph_entry_stub(struct ftrace_graph_ent *trace)
80074 {
80075 return 0;
80076@@ -4800,6 +4805,10 @@ ftrace_suspend_notifier_call(struct notifier_block *bl, unsigned long state,
80077 return NOTIFY_DONE;
80078 }
80079
80080+static struct notifier_block ftrace_suspend_notifier = {
80081+ .notifier_call = ftrace_suspend_notifier_call
80082+};
80083+
80084 int register_ftrace_graph(trace_func_graph_ret_t retfunc,
80085 trace_func_graph_ent_t entryfunc)
80086 {
80087@@ -4813,7 +4822,6 @@ int register_ftrace_graph(trace_func_graph_ret_t retfunc,
80088 goto out;
80089 }
80090
80091- ftrace_suspend_notifier.notifier_call = ftrace_suspend_notifier_call;
80092 register_pm_notifier(&ftrace_suspend_notifier);
80093
80094 ftrace_graph_active++;
80095diff --git a/kernel/trace/ring_buffer.c b/kernel/trace/ring_buffer.c
80096index e444ff8..438b8f4 100644
80097--- a/kernel/trace/ring_buffer.c
80098+++ b/kernel/trace/ring_buffer.c
80099@@ -352,9 +352,9 @@ struct buffer_data_page {
80100 */
80101 struct buffer_page {
80102 struct list_head list; /* list of buffer pages */
80103- local_t write; /* index for next write */
80104+ local_unchecked_t write; /* index for next write */
80105 unsigned read; /* index for next read */
80106- local_t entries; /* entries on this page */
80107+ local_unchecked_t entries; /* entries on this page */
80108 unsigned long real_end; /* real end of data */
80109 struct buffer_data_page *page; /* Actual data page */
80110 };
80111@@ -473,8 +473,8 @@ struct ring_buffer_per_cpu {
80112 unsigned long last_overrun;
80113 local_t entries_bytes;
80114 local_t entries;
80115- local_t overrun;
80116- local_t commit_overrun;
80117+ local_unchecked_t overrun;
80118+ local_unchecked_t commit_overrun;
80119 local_t dropped_events;
80120 local_t committing;
80121 local_t commits;
80122@@ -992,8 +992,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
80123 *
80124 * We add a counter to the write field to denote this.
80125 */
80126- old_write = local_add_return(RB_WRITE_INTCNT, &next_page->write);
80127- old_entries = local_add_return(RB_WRITE_INTCNT, &next_page->entries);
80128+ old_write = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->write);
80129+ old_entries = local_add_return_unchecked(RB_WRITE_INTCNT, &next_page->entries);
80130
80131 /*
80132 * Just make sure we have seen our old_write and synchronize
80133@@ -1021,8 +1021,8 @@ static int rb_tail_page_update(struct ring_buffer_per_cpu *cpu_buffer,
80134 * cmpxchg to only update if an interrupt did not already
80135 * do it for us. If the cmpxchg fails, we don't care.
80136 */
80137- (void)local_cmpxchg(&next_page->write, old_write, val);
80138- (void)local_cmpxchg(&next_page->entries, old_entries, eval);
80139+ (void)local_cmpxchg_unchecked(&next_page->write, old_write, val);
80140+ (void)local_cmpxchg_unchecked(&next_page->entries, old_entries, eval);
80141
80142 /*
80143 * No need to worry about races with clearing out the commit.
80144@@ -1386,12 +1386,12 @@ static void rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer);
80145
80146 static inline unsigned long rb_page_entries(struct buffer_page *bpage)
80147 {
80148- return local_read(&bpage->entries) & RB_WRITE_MASK;
80149+ return local_read_unchecked(&bpage->entries) & RB_WRITE_MASK;
80150 }
80151
80152 static inline unsigned long rb_page_write(struct buffer_page *bpage)
80153 {
80154- return local_read(&bpage->write) & RB_WRITE_MASK;
80155+ return local_read_unchecked(&bpage->write) & RB_WRITE_MASK;
80156 }
80157
80158 static int
80159@@ -1486,7 +1486,7 @@ rb_remove_pages(struct ring_buffer_per_cpu *cpu_buffer, unsigned int nr_pages)
80160 * bytes consumed in ring buffer from here.
80161 * Increment overrun to account for the lost events.
80162 */
80163- local_add(page_entries, &cpu_buffer->overrun);
80164+ local_add_unchecked(page_entries, &cpu_buffer->overrun);
80165 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
80166 }
80167
80168@@ -2063,7 +2063,7 @@ rb_handle_head_page(struct ring_buffer_per_cpu *cpu_buffer,
80169 * it is our responsibility to update
80170 * the counters.
80171 */
80172- local_add(entries, &cpu_buffer->overrun);
80173+ local_add_unchecked(entries, &cpu_buffer->overrun);
80174 local_sub(BUF_PAGE_SIZE, &cpu_buffer->entries_bytes);
80175
80176 /*
80177@@ -2213,7 +2213,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80178 if (tail == BUF_PAGE_SIZE)
80179 tail_page->real_end = 0;
80180
80181- local_sub(length, &tail_page->write);
80182+ local_sub_unchecked(length, &tail_page->write);
80183 return;
80184 }
80185
80186@@ -2248,7 +2248,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80187 rb_event_set_padding(event);
80188
80189 /* Set the write back to the previous setting */
80190- local_sub(length, &tail_page->write);
80191+ local_sub_unchecked(length, &tail_page->write);
80192 return;
80193 }
80194
80195@@ -2260,7 +2260,7 @@ rb_reset_tail(struct ring_buffer_per_cpu *cpu_buffer,
80196
80197 /* Set write to end of buffer */
80198 length = (tail + length) - BUF_PAGE_SIZE;
80199- local_sub(length, &tail_page->write);
80200+ local_sub_unchecked(length, &tail_page->write);
80201 }
80202
80203 /*
80204@@ -2286,7 +2286,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
80205 * about it.
80206 */
80207 if (unlikely(next_page == commit_page)) {
80208- local_inc(&cpu_buffer->commit_overrun);
80209+ local_inc_unchecked(&cpu_buffer->commit_overrun);
80210 goto out_reset;
80211 }
80212
80213@@ -2342,7 +2342,7 @@ rb_move_tail(struct ring_buffer_per_cpu *cpu_buffer,
80214 cpu_buffer->tail_page) &&
80215 (cpu_buffer->commit_page ==
80216 cpu_buffer->reader_page))) {
80217- local_inc(&cpu_buffer->commit_overrun);
80218+ local_inc_unchecked(&cpu_buffer->commit_overrun);
80219 goto out_reset;
80220 }
80221 }
80222@@ -2390,7 +2390,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
80223 length += RB_LEN_TIME_EXTEND;
80224
80225 tail_page = cpu_buffer->tail_page;
80226- write = local_add_return(length, &tail_page->write);
80227+ write = local_add_return_unchecked(length, &tail_page->write);
80228
80229 /* set write to only the index of the write */
80230 write &= RB_WRITE_MASK;
80231@@ -2407,7 +2407,7 @@ __rb_reserve_next(struct ring_buffer_per_cpu *cpu_buffer,
80232 kmemcheck_annotate_bitfield(event, bitfield);
80233 rb_update_event(cpu_buffer, event, length, add_timestamp, delta);
80234
80235- local_inc(&tail_page->entries);
80236+ local_inc_unchecked(&tail_page->entries);
80237
80238 /*
80239 * If this is the first commit on the page, then update
80240@@ -2440,7 +2440,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
80241
80242 if (bpage->page == (void *)addr && rb_page_write(bpage) == old_index) {
80243 unsigned long write_mask =
80244- local_read(&bpage->write) & ~RB_WRITE_MASK;
80245+ local_read_unchecked(&bpage->write) & ~RB_WRITE_MASK;
80246 unsigned long event_length = rb_event_length(event);
80247 /*
80248 * This is on the tail page. It is possible that
80249@@ -2450,7 +2450,7 @@ rb_try_to_discard(struct ring_buffer_per_cpu *cpu_buffer,
80250 */
80251 old_index += write_mask;
80252 new_index += write_mask;
80253- index = local_cmpxchg(&bpage->write, old_index, new_index);
80254+ index = local_cmpxchg_unchecked(&bpage->write, old_index, new_index);
80255 if (index == old_index) {
80256 /* update counters */
80257 local_sub(event_length, &cpu_buffer->entries_bytes);
80258@@ -2842,7 +2842,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
80259
80260 /* Do the likely case first */
80261 if (likely(bpage->page == (void *)addr)) {
80262- local_dec(&bpage->entries);
80263+ local_dec_unchecked(&bpage->entries);
80264 return;
80265 }
80266
80267@@ -2854,7 +2854,7 @@ rb_decrement_entry(struct ring_buffer_per_cpu *cpu_buffer,
80268 start = bpage;
80269 do {
80270 if (bpage->page == (void *)addr) {
80271- local_dec(&bpage->entries);
80272+ local_dec_unchecked(&bpage->entries);
80273 return;
80274 }
80275 rb_inc_page(cpu_buffer, &bpage);
80276@@ -3138,7 +3138,7 @@ static inline unsigned long
80277 rb_num_of_entries(struct ring_buffer_per_cpu *cpu_buffer)
80278 {
80279 return local_read(&cpu_buffer->entries) -
80280- (local_read(&cpu_buffer->overrun) + cpu_buffer->read);
80281+ (local_read_unchecked(&cpu_buffer->overrun) + cpu_buffer->read);
80282 }
80283
80284 /**
80285@@ -3227,7 +3227,7 @@ unsigned long ring_buffer_overrun_cpu(struct ring_buffer *buffer, int cpu)
80286 return 0;
80287
80288 cpu_buffer = buffer->buffers[cpu];
80289- ret = local_read(&cpu_buffer->overrun);
80290+ ret = local_read_unchecked(&cpu_buffer->overrun);
80291
80292 return ret;
80293 }
80294@@ -3250,7 +3250,7 @@ ring_buffer_commit_overrun_cpu(struct ring_buffer *buffer, int cpu)
80295 return 0;
80296
80297 cpu_buffer = buffer->buffers[cpu];
80298- ret = local_read(&cpu_buffer->commit_overrun);
80299+ ret = local_read_unchecked(&cpu_buffer->commit_overrun);
80300
80301 return ret;
80302 }
80303@@ -3335,7 +3335,7 @@ unsigned long ring_buffer_overruns(struct ring_buffer *buffer)
80304 /* if you care about this being correct, lock the buffer */
80305 for_each_buffer_cpu(buffer, cpu) {
80306 cpu_buffer = buffer->buffers[cpu];
80307- overruns += local_read(&cpu_buffer->overrun);
80308+ overruns += local_read_unchecked(&cpu_buffer->overrun);
80309 }
80310
80311 return overruns;
80312@@ -3511,8 +3511,8 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80313 /*
80314 * Reset the reader page to size zero.
80315 */
80316- local_set(&cpu_buffer->reader_page->write, 0);
80317- local_set(&cpu_buffer->reader_page->entries, 0);
80318+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80319+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80320 local_set(&cpu_buffer->reader_page->page->commit, 0);
80321 cpu_buffer->reader_page->real_end = 0;
80322
80323@@ -3546,7 +3546,7 @@ rb_get_reader_page(struct ring_buffer_per_cpu *cpu_buffer)
80324 * want to compare with the last_overrun.
80325 */
80326 smp_mb();
80327- overwrite = local_read(&(cpu_buffer->overrun));
80328+ overwrite = local_read_unchecked(&(cpu_buffer->overrun));
80329
80330 /*
80331 * Here's the tricky part.
80332@@ -4116,8 +4116,8 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80333
80334 cpu_buffer->head_page
80335 = list_entry(cpu_buffer->pages, struct buffer_page, list);
80336- local_set(&cpu_buffer->head_page->write, 0);
80337- local_set(&cpu_buffer->head_page->entries, 0);
80338+ local_set_unchecked(&cpu_buffer->head_page->write, 0);
80339+ local_set_unchecked(&cpu_buffer->head_page->entries, 0);
80340 local_set(&cpu_buffer->head_page->page->commit, 0);
80341
80342 cpu_buffer->head_page->read = 0;
80343@@ -4127,14 +4127,14 @@ rb_reset_cpu(struct ring_buffer_per_cpu *cpu_buffer)
80344
80345 INIT_LIST_HEAD(&cpu_buffer->reader_page->list);
80346 INIT_LIST_HEAD(&cpu_buffer->new_pages);
80347- local_set(&cpu_buffer->reader_page->write, 0);
80348- local_set(&cpu_buffer->reader_page->entries, 0);
80349+ local_set_unchecked(&cpu_buffer->reader_page->write, 0);
80350+ local_set_unchecked(&cpu_buffer->reader_page->entries, 0);
80351 local_set(&cpu_buffer->reader_page->page->commit, 0);
80352 cpu_buffer->reader_page->read = 0;
80353
80354 local_set(&cpu_buffer->entries_bytes, 0);
80355- local_set(&cpu_buffer->overrun, 0);
80356- local_set(&cpu_buffer->commit_overrun, 0);
80357+ local_set_unchecked(&cpu_buffer->overrun, 0);
80358+ local_set_unchecked(&cpu_buffer->commit_overrun, 0);
80359 local_set(&cpu_buffer->dropped_events, 0);
80360 local_set(&cpu_buffer->entries, 0);
80361 local_set(&cpu_buffer->committing, 0);
80362@@ -4538,8 +4538,8 @@ int ring_buffer_read_page(struct ring_buffer *buffer,
80363 rb_init_page(bpage);
80364 bpage = reader->page;
80365 reader->page = *data_page;
80366- local_set(&reader->write, 0);
80367- local_set(&reader->entries, 0);
80368+ local_set_unchecked(&reader->write, 0);
80369+ local_set_unchecked(&reader->entries, 0);
80370 reader->read = 0;
80371 *data_page = bpage;
80372
80373diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
80374index 0b936d8..306a7eb 100644
80375--- a/kernel/trace/trace.c
80376+++ b/kernel/trace/trace.c
80377@@ -3302,7 +3302,7 @@ int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set)
80378 return 0;
80379 }
80380
80381-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled)
80382+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled)
80383 {
80384 /* do nothing if flag is already set */
80385 if (!!(trace_flags & mask) == !!enabled)
80386diff --git a/kernel/trace/trace.h b/kernel/trace/trace.h
80387index 51b4448..7be601f 100644
80388--- a/kernel/trace/trace.h
80389+++ b/kernel/trace/trace.h
80390@@ -1035,7 +1035,7 @@ extern const char *__stop___trace_bprintk_fmt[];
80391 void trace_printk_init_buffers(void);
80392 void trace_printk_start_comm(void);
80393 int trace_keep_overwrite(struct tracer *tracer, u32 mask, int set);
80394-int set_tracer_flag(struct trace_array *tr, unsigned int mask, int enabled);
80395+int set_tracer_flag(struct trace_array *tr, unsigned long mask, int enabled);
80396
80397 /*
80398 * Normal trace_printk() and friends allocates special buffers
80399diff --git a/kernel/trace/trace_events.c b/kernel/trace/trace_events.c
80400index 6dfd48b..a6d88d0 100644
80401--- a/kernel/trace/trace_events.c
80402+++ b/kernel/trace/trace_events.c
80403@@ -1731,10 +1731,6 @@ static LIST_HEAD(ftrace_module_file_list);
80404 struct ftrace_module_file_ops {
80405 struct list_head list;
80406 struct module *mod;
80407- struct file_operations id;
80408- struct file_operations enable;
80409- struct file_operations format;
80410- struct file_operations filter;
80411 };
80412
80413 static struct ftrace_module_file_ops *
80414@@ -1775,17 +1771,12 @@ trace_create_file_ops(struct module *mod)
80415
80416 file_ops->mod = mod;
80417
80418- file_ops->id = ftrace_event_id_fops;
80419- file_ops->id.owner = mod;
80420-
80421- file_ops->enable = ftrace_enable_fops;
80422- file_ops->enable.owner = mod;
80423-
80424- file_ops->filter = ftrace_event_filter_fops;
80425- file_ops->filter.owner = mod;
80426-
80427- file_ops->format = ftrace_event_format_fops;
80428- file_ops->format.owner = mod;
80429+ pax_open_kernel();
80430+ mod->trace_id.owner = mod;
80431+ mod->trace_enable.owner = mod;
80432+ mod->trace_filter.owner = mod;
80433+ mod->trace_format.owner = mod;
80434+ pax_close_kernel();
80435
80436 list_add(&file_ops->list, &ftrace_module_file_list);
80437
80438@@ -1878,8 +1869,8 @@ __trace_add_new_mod_event(struct ftrace_event_call *call,
80439 struct ftrace_module_file_ops *file_ops)
80440 {
80441 return __trace_add_new_event(call, tr,
80442- &file_ops->id, &file_ops->enable,
80443- &file_ops->filter, &file_ops->format);
80444+ &file_ops->mod->trace_id, &file_ops->mod->trace_enable,
80445+ &file_ops->mod->trace_filter, &file_ops->mod->trace_format);
80446 }
80447
80448 #else
80449diff --git a/kernel/trace/trace_mmiotrace.c b/kernel/trace/trace_mmiotrace.c
80450index a5e8f48..a9690d2 100644
80451--- a/kernel/trace/trace_mmiotrace.c
80452+++ b/kernel/trace/trace_mmiotrace.c
80453@@ -24,7 +24,7 @@ struct header_iter {
80454 static struct trace_array *mmio_trace_array;
80455 static bool overrun_detected;
80456 static unsigned long prev_overruns;
80457-static atomic_t dropped_count;
80458+static atomic_unchecked_t dropped_count;
80459
80460 static void mmio_reset_data(struct trace_array *tr)
80461 {
80462@@ -127,7 +127,7 @@ static void mmio_close(struct trace_iterator *iter)
80463
80464 static unsigned long count_overruns(struct trace_iterator *iter)
80465 {
80466- unsigned long cnt = atomic_xchg(&dropped_count, 0);
80467+ unsigned long cnt = atomic_xchg_unchecked(&dropped_count, 0);
80468 unsigned long over = ring_buffer_overruns(iter->trace_buffer->buffer);
80469
80470 if (over > prev_overruns)
80471@@ -317,7 +317,7 @@ static void __trace_mmiotrace_rw(struct trace_array *tr,
80472 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_RW,
80473 sizeof(*entry), 0, pc);
80474 if (!event) {
80475- atomic_inc(&dropped_count);
80476+ atomic_inc_unchecked(&dropped_count);
80477 return;
80478 }
80479 entry = ring_buffer_event_data(event);
80480@@ -347,7 +347,7 @@ static void __trace_mmiotrace_map(struct trace_array *tr,
80481 event = trace_buffer_lock_reserve(buffer, TRACE_MMIO_MAP,
80482 sizeof(*entry), 0, pc);
80483 if (!event) {
80484- atomic_inc(&dropped_count);
80485+ atomic_inc_unchecked(&dropped_count);
80486 return;
80487 }
80488 entry = ring_buffer_event_data(event);
80489diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c
80490index bb922d9..2a54a257 100644
80491--- a/kernel/trace/trace_output.c
80492+++ b/kernel/trace/trace_output.c
80493@@ -294,7 +294,7 @@ int trace_seq_path(struct trace_seq *s, const struct path *path)
80494
80495 p = d_path(path, s->buffer + s->len, PAGE_SIZE - s->len);
80496 if (!IS_ERR(p)) {
80497- p = mangle_path(s->buffer + s->len, p, "\n");
80498+ p = mangle_path(s->buffer + s->len, p, "\n\\");
80499 if (p) {
80500 s->len = p - s->buffer;
80501 return 1;
80502@@ -893,14 +893,16 @@ int register_ftrace_event(struct trace_event *event)
80503 goto out;
80504 }
80505
80506+ pax_open_kernel();
80507 if (event->funcs->trace == NULL)
80508- event->funcs->trace = trace_nop_print;
80509+ *(void **)&event->funcs->trace = trace_nop_print;
80510 if (event->funcs->raw == NULL)
80511- event->funcs->raw = trace_nop_print;
80512+ *(void **)&event->funcs->raw = trace_nop_print;
80513 if (event->funcs->hex == NULL)
80514- event->funcs->hex = trace_nop_print;
80515+ *(void **)&event->funcs->hex = trace_nop_print;
80516 if (event->funcs->binary == NULL)
80517- event->funcs->binary = trace_nop_print;
80518+ *(void **)&event->funcs->binary = trace_nop_print;
80519+ pax_close_kernel();
80520
80521 key = event->type & (EVENT_HASHSIZE - 1);
80522
80523diff --git a/kernel/trace/trace_stack.c b/kernel/trace/trace_stack.c
80524index b20428c..4845a10 100644
80525--- a/kernel/trace/trace_stack.c
80526+++ b/kernel/trace/trace_stack.c
80527@@ -68,7 +68,7 @@ check_stack(unsigned long ip, unsigned long *stack)
80528 return;
80529
80530 /* we do not handle interrupt stacks yet */
80531- if (!object_is_on_stack(stack))
80532+ if (!object_starts_on_stack(stack))
80533 return;
80534
80535 local_irq_save(flags);
80536diff --git a/kernel/user_namespace.c b/kernel/user_namespace.c
80537index d8c30db..e065e89 100644
80538--- a/kernel/user_namespace.c
80539+++ b/kernel/user_namespace.c
80540@@ -853,7 +853,7 @@ static int userns_install(struct nsproxy *nsproxy, void *ns)
80541 if (atomic_read(&current->mm->mm_users) > 1)
80542 return -EINVAL;
80543
80544- if (current->fs->users != 1)
80545+ if (atomic_read(&current->fs->users) != 1)
80546 return -EINVAL;
80547
80548 if (!ns_capable(user_ns, CAP_SYS_ADMIN))
80549diff --git a/kernel/utsname_sysctl.c b/kernel/utsname_sysctl.c
80550index 4f69f9a..7c6f8f8 100644
80551--- a/kernel/utsname_sysctl.c
80552+++ b/kernel/utsname_sysctl.c
80553@@ -47,7 +47,7 @@ static void put_uts(ctl_table *table, int write, void *which)
80554 static int proc_do_uts_string(ctl_table *table, int write,
80555 void __user *buffer, size_t *lenp, loff_t *ppos)
80556 {
80557- struct ctl_table uts_table;
80558+ ctl_table_no_const uts_table;
80559 int r;
80560 memcpy(&uts_table, table, sizeof(uts_table));
80561 uts_table.data = get_uts(table, write);
80562diff --git a/kernel/watchdog.c b/kernel/watchdog.c
80563index 05039e3..17490c7 100644
80564--- a/kernel/watchdog.c
80565+++ b/kernel/watchdog.c
80566@@ -531,7 +531,7 @@ int proc_dowatchdog(struct ctl_table *table, int write,
80567 }
80568 #endif /* CONFIG_SYSCTL */
80569
80570-static struct smp_hotplug_thread watchdog_threads = {
80571+static struct smp_hotplug_thread watchdog_threads __read_only = {
80572 .store = &softlockup_watchdog,
80573 .thread_should_run = watchdog_should_run,
80574 .thread_fn = watchdog,
80575diff --git a/kernel/workqueue.c b/kernel/workqueue.c
80576index ee8e29a..410568e 100644
80577--- a/kernel/workqueue.c
80578+++ b/kernel/workqueue.c
80579@@ -4584,7 +4584,7 @@ static void rebind_workers(struct worker_pool *pool)
80580 WARN_ON_ONCE(!(worker_flags & WORKER_UNBOUND));
80581 worker_flags |= WORKER_REBOUND;
80582 worker_flags &= ~WORKER_UNBOUND;
80583- ACCESS_ONCE(worker->flags) = worker_flags;
80584+ ACCESS_ONCE_RW(worker->flags) = worker_flags;
80585 }
80586
80587 spin_unlock_irq(&pool->lock);
80588diff --git a/lib/Kconfig.debug b/lib/Kconfig.debug
80589index 566cf2b..fdaa52c 100644
80590--- a/lib/Kconfig.debug
80591+++ b/lib/Kconfig.debug
80592@@ -549,7 +549,7 @@ config DEBUG_MUTEXES
80593
80594 config DEBUG_LOCK_ALLOC
80595 bool "Lock debugging: detect incorrect freeing of live locks"
80596- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80597+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80598 select DEBUG_SPINLOCK
80599 select DEBUG_MUTEXES
80600 select LOCKDEP
80601@@ -563,7 +563,7 @@ config DEBUG_LOCK_ALLOC
80602
80603 config PROVE_LOCKING
80604 bool "Lock debugging: prove locking correctness"
80605- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80606+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80607 select LOCKDEP
80608 select DEBUG_SPINLOCK
80609 select DEBUG_MUTEXES
80610@@ -614,7 +614,7 @@ config LOCKDEP
80611
80612 config LOCK_STAT
80613 bool "Lock usage statistics"
80614- depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT
80615+ depends on DEBUG_KERNEL && TRACE_IRQFLAGS_SUPPORT && STACKTRACE_SUPPORT && LOCKDEP_SUPPORT && !PAX_CONSTIFY_PLUGIN
80616 select LOCKDEP
80617 select DEBUG_SPINLOCK
80618 select DEBUG_MUTEXES
80619@@ -1282,6 +1282,7 @@ config LATENCYTOP
80620 depends on DEBUG_KERNEL
80621 depends on STACKTRACE_SUPPORT
80622 depends on PROC_FS
80623+ depends on !GRKERNSEC_HIDESYM
80624 select FRAME_POINTER if !MIPS && !PPC && !S390 && !MICROBLAZE && !ARM_UNWIND
80625 select KALLSYMS
80626 select KALLSYMS_ALL
80627@@ -1298,7 +1299,7 @@ config ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
80628 config DEBUG_STRICT_USER_COPY_CHECKS
80629 bool "Strict user copy size checks"
80630 depends on ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
80631- depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
80632+ depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING && !PAX_SIZE_OVERFLOW
80633 help
80634 Enabling this option turns a certain set of sanity checks for user
80635 copy operations into compile time failures.
80636@@ -1328,7 +1329,7 @@ config INTERVAL_TREE_TEST
80637
80638 config PROVIDE_OHCI1394_DMA_INIT
80639 bool "Remote debugging over FireWire early on boot"
80640- depends on PCI && X86
80641+ depends on PCI && X86 && !GRKERNSEC
80642 help
80643 If you want to debug problems which hang or crash the kernel early
80644 on boot and the crashing machine has a FireWire port, you can use
80645@@ -1357,7 +1358,7 @@ config PROVIDE_OHCI1394_DMA_INIT
80646
80647 config FIREWIRE_OHCI_REMOTE_DMA
80648 bool "Remote debugging over FireWire with firewire-ohci"
80649- depends on FIREWIRE_OHCI
80650+ depends on FIREWIRE_OHCI && !GRKERNSEC
80651 help
80652 This option lets you use the FireWire bus for remote debugging
80653 with help of the firewire-ohci driver. It enables unfiltered
80654diff --git a/lib/Makefile b/lib/Makefile
80655index c55a037..fb46e3b 100644
80656--- a/lib/Makefile
80657+++ b/lib/Makefile
80658@@ -50,7 +50,7 @@ obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
80659
80660 obj-$(CONFIG_BTREE) += btree.o
80661 obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
80662-obj-$(CONFIG_DEBUG_LIST) += list_debug.o
80663+obj-y += list_debug.o
80664 obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o
80665
80666 ifneq ($(CONFIG_HAVE_DEC_LOCK),y)
80667diff --git a/lib/bitmap.c b/lib/bitmap.c
80668index 06f7e4f..f3cf2b0 100644
80669--- a/lib/bitmap.c
80670+++ b/lib/bitmap.c
80671@@ -422,7 +422,7 @@ int __bitmap_parse(const char *buf, unsigned int buflen,
80672 {
80673 int c, old_c, totaldigits, ndigits, nchunks, nbits;
80674 u32 chunk;
80675- const char __user __force *ubuf = (const char __user __force *)buf;
80676+ const char __user *ubuf = (const char __force_user *)buf;
80677
80678 bitmap_zero(maskp, nmaskbits);
80679
80680@@ -507,7 +507,7 @@ int bitmap_parse_user(const char __user *ubuf,
80681 {
80682 if (!access_ok(VERIFY_READ, ubuf, ulen))
80683 return -EFAULT;
80684- return __bitmap_parse((const char __force *)ubuf,
80685+ return __bitmap_parse((const char __force_kernel *)ubuf,
80686 ulen, 1, maskp, nmaskbits);
80687
80688 }
80689@@ -598,7 +598,7 @@ static int __bitmap_parselist(const char *buf, unsigned int buflen,
80690 {
80691 unsigned a, b;
80692 int c, old_c, totaldigits;
80693- const char __user __force *ubuf = (const char __user __force *)buf;
80694+ const char __user *ubuf = (const char __force_user *)buf;
80695 int exp_digit, in_range;
80696
80697 totaldigits = c = 0;
80698@@ -698,7 +698,7 @@ int bitmap_parselist_user(const char __user *ubuf,
80699 {
80700 if (!access_ok(VERIFY_READ, ubuf, ulen))
80701 return -EFAULT;
80702- return __bitmap_parselist((const char __force *)ubuf,
80703+ return __bitmap_parselist((const char __force_kernel *)ubuf,
80704 ulen, 1, maskp, nmaskbits);
80705 }
80706 EXPORT_SYMBOL(bitmap_parselist_user);
80707diff --git a/lib/bug.c b/lib/bug.c
80708index 1686034..a9c00c8 100644
80709--- a/lib/bug.c
80710+++ b/lib/bug.c
80711@@ -134,6 +134,8 @@ enum bug_trap_type report_bug(unsigned long bugaddr, struct pt_regs *regs)
80712 return BUG_TRAP_TYPE_NONE;
80713
80714 bug = find_bug(bugaddr);
80715+ if (!bug)
80716+ return BUG_TRAP_TYPE_NONE;
80717
80718 file = NULL;
80719 line = 0;
80720diff --git a/lib/debugobjects.c b/lib/debugobjects.c
80721index 37061ed..da83f48 100644
80722--- a/lib/debugobjects.c
80723+++ b/lib/debugobjects.c
80724@@ -286,7 +286,7 @@ static void debug_object_is_on_stack(void *addr, int onstack)
80725 if (limit > 4)
80726 return;
80727
80728- is_on_stack = object_is_on_stack(addr);
80729+ is_on_stack = object_starts_on_stack(addr);
80730 if (is_on_stack == onstack)
80731 return;
80732
80733diff --git a/lib/devres.c b/lib/devres.c
80734index 8235331..5881053 100644
80735--- a/lib/devres.c
80736+++ b/lib/devres.c
80737@@ -81,7 +81,7 @@ EXPORT_SYMBOL(devm_ioremap_nocache);
80738 void devm_iounmap(struct device *dev, void __iomem *addr)
80739 {
80740 WARN_ON(devres_destroy(dev, devm_ioremap_release, devm_ioremap_match,
80741- (void *)addr));
80742+ (void __force *)addr));
80743 iounmap(addr);
80744 }
80745 EXPORT_SYMBOL(devm_iounmap);
80746@@ -224,7 +224,7 @@ void devm_ioport_unmap(struct device *dev, void __iomem *addr)
80747 {
80748 ioport_unmap(addr);
80749 WARN_ON(devres_destroy(dev, devm_ioport_map_release,
80750- devm_ioport_map_match, (void *)addr));
80751+ devm_ioport_map_match, (void __force *)addr));
80752 }
80753 EXPORT_SYMBOL(devm_ioport_unmap);
80754 #endif /* CONFIG_HAS_IOPORT */
80755diff --git a/lib/div64.c b/lib/div64.c
80756index a163b6c..9618fa5 100644
80757--- a/lib/div64.c
80758+++ b/lib/div64.c
80759@@ -59,7 +59,7 @@ uint32_t __attribute__((weak)) __div64_32(uint64_t *n, uint32_t base)
80760 EXPORT_SYMBOL(__div64_32);
80761
80762 #ifndef div_s64_rem
80763-s64 div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80764+s64 __intentional_overflow(-1) div_s64_rem(s64 dividend, s32 divisor, s32 *remainder)
80765 {
80766 u64 quotient;
80767
80768@@ -90,7 +90,7 @@ EXPORT_SYMBOL(div_s64_rem);
80769 * 'http://www.hackersdelight.org/HDcode/newCode/divDouble.c.txt'
80770 */
80771 #ifndef div64_u64
80772-u64 div64_u64(u64 dividend, u64 divisor)
80773+u64 __intentional_overflow(-1) div64_u64(u64 dividend, u64 divisor)
80774 {
80775 u32 high = divisor >> 32;
80776 u64 quot;
80777diff --git a/lib/dma-debug.c b/lib/dma-debug.c
80778index d87a17a..ac0d79a 100644
80779--- a/lib/dma-debug.c
80780+++ b/lib/dma-debug.c
80781@@ -768,7 +768,7 @@ static int dma_debug_device_change(struct notifier_block *nb, unsigned long acti
80782
80783 void dma_debug_add_bus(struct bus_type *bus)
80784 {
80785- struct notifier_block *nb;
80786+ notifier_block_no_const *nb;
80787
80788 if (global_disable)
80789 return;
80790@@ -945,7 +945,7 @@ static void check_unmap(struct dma_debug_entry *ref)
80791
80792 static void check_for_stack(struct device *dev, void *addr)
80793 {
80794- if (object_is_on_stack(addr))
80795+ if (object_starts_on_stack(addr))
80796 err_printk(dev, NULL, "DMA-API: device driver maps memory from"
80797 "stack [addr=%p]\n", addr);
80798 }
80799diff --git a/lib/inflate.c b/lib/inflate.c
80800index 013a761..c28f3fc 100644
80801--- a/lib/inflate.c
80802+++ b/lib/inflate.c
80803@@ -269,7 +269,7 @@ static void free(void *where)
80804 malloc_ptr = free_mem_ptr;
80805 }
80806 #else
80807-#define malloc(a) kmalloc(a, GFP_KERNEL)
80808+#define malloc(a) kmalloc((a), GFP_KERNEL)
80809 #define free(a) kfree(a)
80810 #endif
80811
80812diff --git a/lib/ioremap.c b/lib/ioremap.c
80813index 0c9216c..863bd89 100644
80814--- a/lib/ioremap.c
80815+++ b/lib/ioremap.c
80816@@ -38,7 +38,7 @@ static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
80817 unsigned long next;
80818
80819 phys_addr -= addr;
80820- pmd = pmd_alloc(&init_mm, pud, addr);
80821+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
80822 if (!pmd)
80823 return -ENOMEM;
80824 do {
80825@@ -56,7 +56,7 @@ static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
80826 unsigned long next;
80827
80828 phys_addr -= addr;
80829- pud = pud_alloc(&init_mm, pgd, addr);
80830+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
80831 if (!pud)
80832 return -ENOMEM;
80833 do {
80834diff --git a/lib/is_single_threaded.c b/lib/is_single_threaded.c
80835index bd2bea9..6b3c95e 100644
80836--- a/lib/is_single_threaded.c
80837+++ b/lib/is_single_threaded.c
80838@@ -22,6 +22,9 @@ bool current_is_single_threaded(void)
80839 struct task_struct *p, *t;
80840 bool ret;
80841
80842+ if (!mm)
80843+ return true;
80844+
80845 if (atomic_read(&task->signal->live) != 1)
80846 return false;
80847
80848diff --git a/lib/kobject.c b/lib/kobject.c
80849index b7e29a6..2f3ca75 100644
80850--- a/lib/kobject.c
80851+++ b/lib/kobject.c
80852@@ -805,7 +805,7 @@ static struct kset *kset_create(const char *name,
80853 kset = kzalloc(sizeof(*kset), GFP_KERNEL);
80854 if (!kset)
80855 return NULL;
80856- retval = kobject_set_name(&kset->kobj, name);
80857+ retval = kobject_set_name(&kset->kobj, "%s", name);
80858 if (retval) {
80859 kfree(kset);
80860 return NULL;
80861@@ -859,9 +859,9 @@ EXPORT_SYMBOL_GPL(kset_create_and_add);
80862
80863
80864 static DEFINE_SPINLOCK(kobj_ns_type_lock);
80865-static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES];
80866+static const struct kobj_ns_type_operations *kobj_ns_ops_tbl[KOBJ_NS_TYPES] __read_only;
80867
80868-int kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80869+int __init kobj_ns_type_register(const struct kobj_ns_type_operations *ops)
80870 {
80871 enum kobj_ns_type type = ops->type;
80872 int error;
80873diff --git a/lib/list_debug.c b/lib/list_debug.c
80874index c24c2f7..06e070b 100644
80875--- a/lib/list_debug.c
80876+++ b/lib/list_debug.c
80877@@ -11,7 +11,9 @@
80878 #include <linux/bug.h>
80879 #include <linux/kernel.h>
80880 #include <linux/rculist.h>
80881+#include <linux/mm.h>
80882
80883+#ifdef CONFIG_DEBUG_LIST
80884 /*
80885 * Insert a new entry between two known consecutive entries.
80886 *
80887@@ -19,21 +21,32 @@
80888 * the prev/next entries already!
80889 */
80890
80891-void __list_add(struct list_head *new,
80892- struct list_head *prev,
80893- struct list_head *next)
80894+static bool __list_add_debug(struct list_head *new,
80895+ struct list_head *prev,
80896+ struct list_head *next)
80897 {
80898- WARN(next->prev != prev,
80899+ if (WARN(next->prev != prev,
80900 "list_add corruption. next->prev should be "
80901 "prev (%p), but was %p. (next=%p).\n",
80902- prev, next->prev, next);
80903- WARN(prev->next != next,
80904+ prev, next->prev, next) ||
80905+ WARN(prev->next != next,
80906 "list_add corruption. prev->next should be "
80907 "next (%p), but was %p. (prev=%p).\n",
80908- next, prev->next, prev);
80909- WARN(new == prev || new == next,
80910- "list_add double add: new=%p, prev=%p, next=%p.\n",
80911- new, prev, next);
80912+ next, prev->next, prev) ||
80913+ WARN(new == prev || new == next,
80914+ "list_add double add: new=%p, prev=%p, next=%p.\n",
80915+ new, prev, next))
80916+ return false;
80917+ return true;
80918+}
80919+
80920+void __list_add(struct list_head *new,
80921+ struct list_head *prev,
80922+ struct list_head *next)
80923+{
80924+ if (!__list_add_debug(new, prev, next))
80925+ return;
80926+
80927 next->prev = new;
80928 new->next = next;
80929 new->prev = prev;
80930@@ -41,7 +54,7 @@ void __list_add(struct list_head *new,
80931 }
80932 EXPORT_SYMBOL(__list_add);
80933
80934-void __list_del_entry(struct list_head *entry)
80935+static bool __list_del_entry_debug(struct list_head *entry)
80936 {
80937 struct list_head *prev, *next;
80938
80939@@ -60,9 +73,16 @@ void __list_del_entry(struct list_head *entry)
80940 WARN(next->prev != entry,
80941 "list_del corruption. next->prev should be %p, "
80942 "but was %p\n", entry, next->prev))
80943+ return false;
80944+ return true;
80945+}
80946+
80947+void __list_del_entry(struct list_head *entry)
80948+{
80949+ if (!__list_del_entry_debug(entry))
80950 return;
80951
80952- __list_del(prev, next);
80953+ __list_del(entry->prev, entry->next);
80954 }
80955 EXPORT_SYMBOL(__list_del_entry);
80956
80957@@ -86,15 +106,85 @@ EXPORT_SYMBOL(list_del);
80958 void __list_add_rcu(struct list_head *new,
80959 struct list_head *prev, struct list_head *next)
80960 {
80961- WARN(next->prev != prev,
80962- "list_add_rcu corruption. next->prev should be prev (%p), but was %p. (next=%p).\n",
80963- prev, next->prev, next);
80964- WARN(prev->next != next,
80965- "list_add_rcu corruption. prev->next should be next (%p), but was %p. (prev=%p).\n",
80966- next, prev->next, prev);
80967+ if (!__list_add_debug(new, prev, next))
80968+ return;
80969+
80970 new->next = next;
80971 new->prev = prev;
80972 rcu_assign_pointer(list_next_rcu(prev), new);
80973 next->prev = new;
80974 }
80975 EXPORT_SYMBOL(__list_add_rcu);
80976+#endif
80977+
80978+void __pax_list_add(struct list_head *new, struct list_head *prev, struct list_head *next)
80979+{
80980+#ifdef CONFIG_DEBUG_LIST
80981+ if (!__list_add_debug(new, prev, next))
80982+ return;
80983+#endif
80984+
80985+ pax_open_kernel();
80986+ next->prev = new;
80987+ new->next = next;
80988+ new->prev = prev;
80989+ prev->next = new;
80990+ pax_close_kernel();
80991+}
80992+EXPORT_SYMBOL(__pax_list_add);
80993+
80994+void pax_list_del(struct list_head *entry)
80995+{
80996+#ifdef CONFIG_DEBUG_LIST
80997+ if (!__list_del_entry_debug(entry))
80998+ return;
80999+#endif
81000+
81001+ pax_open_kernel();
81002+ __list_del(entry->prev, entry->next);
81003+ entry->next = LIST_POISON1;
81004+ entry->prev = LIST_POISON2;
81005+ pax_close_kernel();
81006+}
81007+EXPORT_SYMBOL(pax_list_del);
81008+
81009+void pax_list_del_init(struct list_head *entry)
81010+{
81011+ pax_open_kernel();
81012+ __list_del(entry->prev, entry->next);
81013+ INIT_LIST_HEAD(entry);
81014+ pax_close_kernel();
81015+}
81016+EXPORT_SYMBOL(pax_list_del_init);
81017+
81018+void __pax_list_add_rcu(struct list_head *new,
81019+ struct list_head *prev, struct list_head *next)
81020+{
81021+#ifdef CONFIG_DEBUG_LIST
81022+ if (!__list_add_debug(new, prev, next))
81023+ return;
81024+#endif
81025+
81026+ pax_open_kernel();
81027+ new->next = next;
81028+ new->prev = prev;
81029+ rcu_assign_pointer(list_next_rcu(prev), new);
81030+ next->prev = new;
81031+ pax_close_kernel();
81032+}
81033+EXPORT_SYMBOL(__pax_list_add_rcu);
81034+
81035+void pax_list_del_rcu(struct list_head *entry)
81036+{
81037+#ifdef CONFIG_DEBUG_LIST
81038+ if (!__list_del_entry_debug(entry))
81039+ return;
81040+#endif
81041+
81042+ pax_open_kernel();
81043+ __list_del(entry->prev, entry->next);
81044+ entry->next = LIST_POISON1;
81045+ entry->prev = LIST_POISON2;
81046+ pax_close_kernel();
81047+}
81048+EXPORT_SYMBOL(pax_list_del_rcu);
81049diff --git a/lib/radix-tree.c b/lib/radix-tree.c
81050index e796429..6e38f9f 100644
81051--- a/lib/radix-tree.c
81052+++ b/lib/radix-tree.c
81053@@ -92,7 +92,7 @@ struct radix_tree_preload {
81054 int nr;
81055 struct radix_tree_node *nodes[RADIX_TREE_PRELOAD_SIZE];
81056 };
81057-static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads) = { 0, };
81058+static DEFINE_PER_CPU(struct radix_tree_preload, radix_tree_preloads);
81059
81060 static inline void *ptr_to_indirect(void *ptr)
81061 {
81062diff --git a/lib/strncpy_from_user.c b/lib/strncpy_from_user.c
81063index bb2b201..46abaf9 100644
81064--- a/lib/strncpy_from_user.c
81065+++ b/lib/strncpy_from_user.c
81066@@ -21,7 +21,7 @@
81067 */
81068 static inline long do_strncpy_from_user(char *dst, const char __user *src, long count, unsigned long max)
81069 {
81070- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81071+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81072 long res = 0;
81073
81074 /*
81075diff --git a/lib/strnlen_user.c b/lib/strnlen_user.c
81076index a28df52..3d55877 100644
81077--- a/lib/strnlen_user.c
81078+++ b/lib/strnlen_user.c
81079@@ -26,7 +26,7 @@
81080 */
81081 static inline long do_strnlen_user(const char __user *src, unsigned long count, unsigned long max)
81082 {
81083- const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81084+ static const struct word_at_a_time constants = WORD_AT_A_TIME_CONSTANTS;
81085 long align, res = 0;
81086 unsigned long c;
81087
81088diff --git a/lib/swiotlb.c b/lib/swiotlb.c
81089index d23762e..e21eab2 100644
81090--- a/lib/swiotlb.c
81091+++ b/lib/swiotlb.c
81092@@ -664,7 +664,7 @@ EXPORT_SYMBOL(swiotlb_alloc_coherent);
81093
81094 void
81095 swiotlb_free_coherent(struct device *hwdev, size_t size, void *vaddr,
81096- dma_addr_t dev_addr)
81097+ dma_addr_t dev_addr, struct dma_attrs *attrs)
81098 {
81099 phys_addr_t paddr = dma_to_phys(hwdev, dev_addr);
81100
81101diff --git a/lib/usercopy.c b/lib/usercopy.c
81102index 4f5b1dd..7cab418 100644
81103--- a/lib/usercopy.c
81104+++ b/lib/usercopy.c
81105@@ -7,3 +7,9 @@ void copy_from_user_overflow(void)
81106 WARN(1, "Buffer overflow detected!\n");
81107 }
81108 EXPORT_SYMBOL(copy_from_user_overflow);
81109+
81110+void copy_to_user_overflow(void)
81111+{
81112+ WARN(1, "Buffer overflow detected!\n");
81113+}
81114+EXPORT_SYMBOL(copy_to_user_overflow);
81115diff --git a/lib/vsprintf.c b/lib/vsprintf.c
81116index e149c64..24aa71a 100644
81117--- a/lib/vsprintf.c
81118+++ b/lib/vsprintf.c
81119@@ -16,6 +16,9 @@
81120 * - scnprintf and vscnprintf
81121 */
81122
81123+#ifdef CONFIG_GRKERNSEC_HIDESYM
81124+#define __INCLUDED_BY_HIDESYM 1
81125+#endif
81126 #include <stdarg.h>
81127 #include <linux/module.h> /* for KSYM_SYMBOL_LEN */
81128 #include <linux/types.h>
81129@@ -981,7 +984,11 @@ char *netdev_feature_string(char *buf, char *end, const u8 *addr,
81130 return number(buf, end, *(const netdev_features_t *)addr, spec);
81131 }
81132
81133+#ifdef CONFIG_GRKERNSEC_HIDESYM
81134+int kptr_restrict __read_mostly = 2;
81135+#else
81136 int kptr_restrict __read_mostly;
81137+#endif
81138
81139 /*
81140 * Show a '%p' thing. A kernel extension is that the '%p' is followed
81141@@ -994,6 +1001,7 @@ int kptr_restrict __read_mostly;
81142 * - 'f' For simple symbolic function names without offset
81143 * - 'S' For symbolic direct pointers with offset
81144 * - 's' For symbolic direct pointers without offset
81145+ * - 'A' For symbolic direct pointers with offset approved for use with GRKERNSEC_HIDESYM
81146 * - '[FfSs]R' as above with __builtin_extract_return_addr() translation
81147 * - 'B' For backtraced symbolic direct pointers with offset
81148 * - 'R' For decoded struct resource, e.g., [mem 0x0-0x1f 64bit pref]
81149@@ -1052,12 +1060,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81150
81151 if (!ptr && *fmt != 'K') {
81152 /*
81153- * Print (null) with the same width as a pointer so it makes
81154+ * Print (nil) with the same width as a pointer so it makes
81155 * tabular output look nice.
81156 */
81157 if (spec.field_width == -1)
81158 spec.field_width = default_width;
81159- return string(buf, end, "(null)", spec);
81160+ return string(buf, end, "(nil)", spec);
81161 }
81162
81163 switch (*fmt) {
81164@@ -1067,6 +1075,12 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81165 /* Fallthrough */
81166 case 'S':
81167 case 's':
81168+#ifdef CONFIG_GRKERNSEC_HIDESYM
81169+ break;
81170+#else
81171+ return symbol_string(buf, end, ptr, spec, fmt);
81172+#endif
81173+ case 'A':
81174 case 'B':
81175 return symbol_string(buf, end, ptr, spec, fmt);
81176 case 'R':
81177@@ -1107,6 +1121,8 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81178 va_end(va);
81179 return buf;
81180 }
81181+ case 'P':
81182+ break;
81183 case 'K':
81184 /*
81185 * %pK cannot be used in IRQ context because its test
81186@@ -1136,6 +1152,21 @@ char *pointer(const char *fmt, char *buf, char *end, void *ptr,
81187 return number(buf, end,
81188 (unsigned long long) *((phys_addr_t *)ptr), spec);
81189 }
81190+
81191+#ifdef CONFIG_GRKERNSEC_HIDESYM
81192+ /* 'P' = approved pointers to copy to userland,
81193+ as in the /proc/kallsyms case, as we make it display nothing
81194+ for non-root users, and the real contents for root users
81195+ Also ignore 'K' pointers, since we force their NULLing for non-root users
81196+ above
81197+ */
81198+ if ((unsigned long)ptr > TASK_SIZE && *fmt != 'P' && *fmt != 'K' && is_usercopy_object(buf)) {
81199+ printk(KERN_ALERT "grsec: kernel infoleak detected! Please report this log to spender@grsecurity.net.\n");
81200+ dump_stack();
81201+ ptr = NULL;
81202+ }
81203+#endif
81204+
81205 spec.flags |= SMALL;
81206 if (spec.field_width == -1) {
81207 spec.field_width = default_width;
81208@@ -1857,11 +1888,11 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
81209 typeof(type) value; \
81210 if (sizeof(type) == 8) { \
81211 args = PTR_ALIGN(args, sizeof(u32)); \
81212- *(u32 *)&value = *(u32 *)args; \
81213- *((u32 *)&value + 1) = *(u32 *)(args + 4); \
81214+ *(u32 *)&value = *(const u32 *)args; \
81215+ *((u32 *)&value + 1) = *(const u32 *)(args + 4); \
81216 } else { \
81217 args = PTR_ALIGN(args, sizeof(type)); \
81218- value = *(typeof(type) *)args; \
81219+ value = *(const typeof(type) *)args; \
81220 } \
81221 args += sizeof(type); \
81222 value; \
81223@@ -1924,7 +1955,7 @@ int bstr_printf(char *buf, size_t size, const char *fmt, const u32 *bin_buf)
81224 case FORMAT_TYPE_STR: {
81225 const char *str_arg = args;
81226 args += strlen(str_arg) + 1;
81227- str = string(str, end, (char *)str_arg, spec);
81228+ str = string(str, end, str_arg, spec);
81229 break;
81230 }
81231
81232diff --git a/localversion-grsec b/localversion-grsec
81233new file mode 100644
81234index 0000000..7cd6065
81235--- /dev/null
81236+++ b/localversion-grsec
81237@@ -0,0 +1 @@
81238+-grsec
81239diff --git a/mm/Kconfig b/mm/Kconfig
81240index e742d06..c56fdd8 100644
81241--- a/mm/Kconfig
81242+++ b/mm/Kconfig
81243@@ -317,10 +317,10 @@ config KSM
81244 root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set).
81245
81246 config DEFAULT_MMAP_MIN_ADDR
81247- int "Low address space to protect from user allocation"
81248+ int "Low address space to protect from user allocation"
81249 depends on MMU
81250- default 4096
81251- help
81252+ default 65536
81253+ help
81254 This is the portion of low virtual memory which should be protected
81255 from userspace allocation. Keeping a user from writing to low pages
81256 can help reduce the impact of kernel NULL pointer bugs.
81257@@ -351,7 +351,7 @@ config MEMORY_FAILURE
81258
81259 config HWPOISON_INJECT
81260 tristate "HWPoison pages injector"
81261- depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS
81262+ depends on MEMORY_FAILURE && DEBUG_KERNEL && PROC_FS && !GRKERNSEC
81263 select PROC_PAGE_MONITOR
81264
81265 config NOMMU_INITIAL_TRIM_EXCESS
81266diff --git a/mm/backing-dev.c b/mm/backing-dev.c
81267index 5025174..9fc1c5c 100644
81268--- a/mm/backing-dev.c
81269+++ b/mm/backing-dev.c
81270@@ -515,7 +515,6 @@ EXPORT_SYMBOL(bdi_destroy);
81271 int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
81272 unsigned int cap)
81273 {
81274- char tmp[32];
81275 int err;
81276
81277 bdi->name = name;
81278@@ -524,8 +523,7 @@ int bdi_setup_and_register(struct backing_dev_info *bdi, char *name,
81279 if (err)
81280 return err;
81281
81282- sprintf(tmp, "%.28s%s", name, "-%d");
81283- err = bdi_register(bdi, NULL, tmp, atomic_long_inc_return(&bdi_seq));
81284+ err = bdi_register(bdi, NULL, "%.28s-%ld", name, atomic_long_inc_return(&bdi_seq));
81285 if (err) {
81286 bdi_destroy(bdi);
81287 return err;
81288diff --git a/mm/filemap.c b/mm/filemap.c
81289index 7905fe7..e60faa8 100644
81290--- a/mm/filemap.c
81291+++ b/mm/filemap.c
81292@@ -1766,7 +1766,7 @@ int generic_file_mmap(struct file * file, struct vm_area_struct * vma)
81293 struct address_space *mapping = file->f_mapping;
81294
81295 if (!mapping->a_ops->readpage)
81296- return -ENOEXEC;
81297+ return -ENODEV;
81298 file_accessed(file);
81299 vma->vm_ops = &generic_file_vm_ops;
81300 return 0;
81301@@ -2106,6 +2106,7 @@ inline int generic_write_checks(struct file *file, loff_t *pos, size_t *count, i
81302 *pos = i_size_read(inode);
81303
81304 if (limit != RLIM_INFINITY) {
81305+ gr_learn_resource(current, RLIMIT_FSIZE,*pos, 0);
81306 if (*pos >= limit) {
81307 send_sig(SIGXFSZ, current, 0);
81308 return -EFBIG;
81309diff --git a/mm/fremap.c b/mm/fremap.c
81310index 87da359..3f41cb1 100644
81311--- a/mm/fremap.c
81312+++ b/mm/fremap.c
81313@@ -158,6 +158,11 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
81314 retry:
81315 vma = find_vma(mm, start);
81316
81317+#ifdef CONFIG_PAX_SEGMEXEC
81318+ if (vma && (mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_MAYEXEC))
81319+ goto out;
81320+#endif
81321+
81322 /*
81323 * Make sure the vma is shared, that it supports prefaulting,
81324 * and that the remapped range is valid and fully within
81325diff --git a/mm/highmem.c b/mm/highmem.c
81326index b32b70c..e512eb0 100644
81327--- a/mm/highmem.c
81328+++ b/mm/highmem.c
81329@@ -138,8 +138,9 @@ static void flush_all_zero_pkmaps(void)
81330 * So no dangers, even with speculative execution.
81331 */
81332 page = pte_page(pkmap_page_table[i]);
81333+ pax_open_kernel();
81334 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
81335-
81336+ pax_close_kernel();
81337 set_page_address(page, NULL);
81338 need_flush = 1;
81339 }
81340@@ -198,9 +199,11 @@ start:
81341 }
81342 }
81343 vaddr = PKMAP_ADDR(last_pkmap_nr);
81344+
81345+ pax_open_kernel();
81346 set_pte_at(&init_mm, vaddr,
81347 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
81348-
81349+ pax_close_kernel();
81350 pkmap_count[last_pkmap_nr] = 1;
81351 set_page_address(page, (void *)vaddr);
81352
81353diff --git a/mm/hugetlb.c b/mm/hugetlb.c
81354index 5cf99bf..28634c8 100644
81355--- a/mm/hugetlb.c
81356+++ b/mm/hugetlb.c
81357@@ -2022,15 +2022,17 @@ static int hugetlb_sysctl_handler_common(bool obey_mempolicy,
81358 struct hstate *h = &default_hstate;
81359 unsigned long tmp;
81360 int ret;
81361+ ctl_table_no_const hugetlb_table;
81362
81363 tmp = h->max_huge_pages;
81364
81365 if (write && h->order >= MAX_ORDER)
81366 return -EINVAL;
81367
81368- table->data = &tmp;
81369- table->maxlen = sizeof(unsigned long);
81370- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81371+ hugetlb_table = *table;
81372+ hugetlb_table.data = &tmp;
81373+ hugetlb_table.maxlen = sizeof(unsigned long);
81374+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81375 if (ret)
81376 goto out;
81377
81378@@ -2087,15 +2089,17 @@ int hugetlb_overcommit_handler(struct ctl_table *table, int write,
81379 struct hstate *h = &default_hstate;
81380 unsigned long tmp;
81381 int ret;
81382+ ctl_table_no_const hugetlb_table;
81383
81384 tmp = h->nr_overcommit_huge_pages;
81385
81386 if (write && h->order >= MAX_ORDER)
81387 return -EINVAL;
81388
81389- table->data = &tmp;
81390- table->maxlen = sizeof(unsigned long);
81391- ret = proc_doulongvec_minmax(table, write, buffer, length, ppos);
81392+ hugetlb_table = *table;
81393+ hugetlb_table.data = &tmp;
81394+ hugetlb_table.maxlen = sizeof(unsigned long);
81395+ ret = proc_doulongvec_minmax(&hugetlb_table, write, buffer, length, ppos);
81396 if (ret)
81397 goto out;
81398
81399@@ -2545,6 +2549,27 @@ static int unmap_ref_private(struct mm_struct *mm, struct vm_area_struct *vma,
81400 return 1;
81401 }
81402
81403+#ifdef CONFIG_PAX_SEGMEXEC
81404+static void pax_mirror_huge_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m)
81405+{
81406+ struct mm_struct *mm = vma->vm_mm;
81407+ struct vm_area_struct *vma_m;
81408+ unsigned long address_m;
81409+ pte_t *ptep_m;
81410+
81411+ vma_m = pax_find_mirror_vma(vma);
81412+ if (!vma_m)
81413+ return;
81414+
81415+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81416+ address_m = address + SEGMEXEC_TASK_SIZE;
81417+ ptep_m = huge_pte_offset(mm, address_m & HPAGE_MASK);
81418+ get_page(page_m);
81419+ hugepage_add_anon_rmap(page_m, vma_m, address_m);
81420+ set_huge_pte_at(mm, address_m, ptep_m, make_huge_pte(vma_m, page_m, 0));
81421+}
81422+#endif
81423+
81424 /*
81425 * Hugetlb_cow() should be called with page lock of the original hugepage held.
81426 * Called with hugetlb_instantiation_mutex held and pte_page locked so we
81427@@ -2663,6 +2688,11 @@ retry_avoidcopy:
81428 make_huge_pte(vma, new_page, 1));
81429 page_remove_rmap(old_page);
81430 hugepage_add_new_anon_rmap(new_page, vma, address);
81431+
81432+#ifdef CONFIG_PAX_SEGMEXEC
81433+ pax_mirror_huge_pte(vma, address, new_page);
81434+#endif
81435+
81436 /* Make the old page be freed below */
81437 new_page = old_page;
81438 }
81439@@ -2821,6 +2851,10 @@ retry:
81440 && (vma->vm_flags & VM_SHARED)));
81441 set_huge_pte_at(mm, address, ptep, new_pte);
81442
81443+#ifdef CONFIG_PAX_SEGMEXEC
81444+ pax_mirror_huge_pte(vma, address, page);
81445+#endif
81446+
81447 if ((flags & FAULT_FLAG_WRITE) && !(vma->vm_flags & VM_SHARED)) {
81448 /* Optimization, do the COW without a second fault */
81449 ret = hugetlb_cow(mm, vma, address, ptep, new_pte, page);
81450@@ -2850,6 +2884,10 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81451 static DEFINE_MUTEX(hugetlb_instantiation_mutex);
81452 struct hstate *h = hstate_vma(vma);
81453
81454+#ifdef CONFIG_PAX_SEGMEXEC
81455+ struct vm_area_struct *vma_m;
81456+#endif
81457+
81458 address &= huge_page_mask(h);
81459
81460 ptep = huge_pte_offset(mm, address);
81461@@ -2863,6 +2901,26 @@ int hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
81462 VM_FAULT_SET_HINDEX(hstate_index(h));
81463 }
81464
81465+#ifdef CONFIG_PAX_SEGMEXEC
81466+ vma_m = pax_find_mirror_vma(vma);
81467+ if (vma_m) {
81468+ unsigned long address_m;
81469+
81470+ if (vma->vm_start > vma_m->vm_start) {
81471+ address_m = address;
81472+ address -= SEGMEXEC_TASK_SIZE;
81473+ vma = vma_m;
81474+ h = hstate_vma(vma);
81475+ } else
81476+ address_m = address + SEGMEXEC_TASK_SIZE;
81477+
81478+ if (!huge_pte_alloc(mm, address_m, huge_page_size(h)))
81479+ return VM_FAULT_OOM;
81480+ address_m &= HPAGE_MASK;
81481+ unmap_hugepage_range(vma, address_m, address_m + HPAGE_SIZE, NULL);
81482+ }
81483+#endif
81484+
81485 ptep = huge_pte_alloc(mm, address, huge_page_size(h));
81486 if (!ptep)
81487 return VM_FAULT_OOM;
81488diff --git a/mm/internal.h b/mm/internal.h
81489index 8562de0..7fdfe92 100644
81490--- a/mm/internal.h
81491+++ b/mm/internal.h
81492@@ -100,6 +100,7 @@ extern pmd_t *mm_find_pmd(struct mm_struct *mm, unsigned long address);
81493 * in mm/page_alloc.c
81494 */
81495 extern void __free_pages_bootmem(struct page *page, unsigned int order);
81496+extern void free_compound_page(struct page *page);
81497 extern void prep_compound_page(struct page *page, unsigned long order);
81498 #ifdef CONFIG_MEMORY_FAILURE
81499 extern bool is_free_buddy_page(struct page *page);
81500diff --git a/mm/kmemleak.c b/mm/kmemleak.c
81501index c8d7f31..2dbeffd 100644
81502--- a/mm/kmemleak.c
81503+++ b/mm/kmemleak.c
81504@@ -363,7 +363,7 @@ static void print_unreferenced(struct seq_file *seq,
81505
81506 for (i = 0; i < object->trace_len; i++) {
81507 void *ptr = (void *)object->trace[i];
81508- seq_printf(seq, " [<%p>] %pS\n", ptr, ptr);
81509+ seq_printf(seq, " [<%pP>] %pA\n", ptr, ptr);
81510 }
81511 }
81512
81513@@ -1851,7 +1851,7 @@ static int __init kmemleak_late_init(void)
81514 return -ENOMEM;
81515 }
81516
81517- dentry = debugfs_create_file("kmemleak", S_IRUGO, NULL, NULL,
81518+ dentry = debugfs_create_file("kmemleak", S_IRUSR, NULL, NULL,
81519 &kmemleak_fops);
81520 if (!dentry)
81521 pr_warning("Failed to create the debugfs kmemleak file\n");
81522diff --git a/mm/maccess.c b/mm/maccess.c
81523index d53adf9..03a24bf 100644
81524--- a/mm/maccess.c
81525+++ b/mm/maccess.c
81526@@ -26,7 +26,7 @@ long __probe_kernel_read(void *dst, const void *src, size_t size)
81527 set_fs(KERNEL_DS);
81528 pagefault_disable();
81529 ret = __copy_from_user_inatomic(dst,
81530- (__force const void __user *)src, size);
81531+ (const void __force_user *)src, size);
81532 pagefault_enable();
81533 set_fs(old_fs);
81534
81535@@ -53,7 +53,7 @@ long __probe_kernel_write(void *dst, const void *src, size_t size)
81536
81537 set_fs(KERNEL_DS);
81538 pagefault_disable();
81539- ret = __copy_to_user_inatomic((__force void __user *)dst, src, size);
81540+ ret = __copy_to_user_inatomic((void __force_user *)dst, src, size);
81541 pagefault_enable();
81542 set_fs(old_fs);
81543
81544diff --git a/mm/madvise.c b/mm/madvise.c
81545index 7055883..aafb1ed 100644
81546--- a/mm/madvise.c
81547+++ b/mm/madvise.c
81548@@ -51,6 +51,10 @@ static long madvise_behavior(struct vm_area_struct * vma,
81549 pgoff_t pgoff;
81550 unsigned long new_flags = vma->vm_flags;
81551
81552+#ifdef CONFIG_PAX_SEGMEXEC
81553+ struct vm_area_struct *vma_m;
81554+#endif
81555+
81556 switch (behavior) {
81557 case MADV_NORMAL:
81558 new_flags = new_flags & ~VM_RAND_READ & ~VM_SEQ_READ;
81559@@ -126,6 +130,13 @@ success:
81560 /*
81561 * vm_flags is protected by the mmap_sem held in write mode.
81562 */
81563+
81564+#ifdef CONFIG_PAX_SEGMEXEC
81565+ vma_m = pax_find_mirror_vma(vma);
81566+ if (vma_m)
81567+ vma_m->vm_flags = new_flags & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT);
81568+#endif
81569+
81570 vma->vm_flags = new_flags;
81571
81572 out:
81573@@ -274,6 +285,11 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81574 struct vm_area_struct ** prev,
81575 unsigned long start, unsigned long end)
81576 {
81577+
81578+#ifdef CONFIG_PAX_SEGMEXEC
81579+ struct vm_area_struct *vma_m;
81580+#endif
81581+
81582 *prev = vma;
81583 if (vma->vm_flags & (VM_LOCKED|VM_HUGETLB|VM_PFNMAP))
81584 return -EINVAL;
81585@@ -286,6 +302,21 @@ static long madvise_dontneed(struct vm_area_struct * vma,
81586 zap_page_range(vma, start, end - start, &details);
81587 } else
81588 zap_page_range(vma, start, end - start, NULL);
81589+
81590+#ifdef CONFIG_PAX_SEGMEXEC
81591+ vma_m = pax_find_mirror_vma(vma);
81592+ if (vma_m) {
81593+ if (unlikely(vma->vm_flags & VM_NONLINEAR)) {
81594+ struct zap_details details = {
81595+ .nonlinear_vma = vma_m,
81596+ .last_index = ULONG_MAX,
81597+ };
81598+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, &details);
81599+ } else
81600+ zap_page_range(vma, start + SEGMEXEC_TASK_SIZE, end - start, NULL);
81601+ }
81602+#endif
81603+
81604 return 0;
81605 }
81606
81607@@ -485,6 +516,16 @@ SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
81608 if (end < start)
81609 return error;
81610
81611+#ifdef CONFIG_PAX_SEGMEXEC
81612+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
81613+ if (end > SEGMEXEC_TASK_SIZE)
81614+ return error;
81615+ } else
81616+#endif
81617+
81618+ if (end > TASK_SIZE)
81619+ return error;
81620+
81621 error = 0;
81622 if (end == start)
81623 return error;
81624diff --git a/mm/memory-failure.c b/mm/memory-failure.c
81625index ceb0c7f..b2b8e94 100644
81626--- a/mm/memory-failure.c
81627+++ b/mm/memory-failure.c
81628@@ -61,7 +61,7 @@ int sysctl_memory_failure_early_kill __read_mostly = 0;
81629
81630 int sysctl_memory_failure_recovery __read_mostly = 1;
81631
81632-atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
81633+atomic_long_unchecked_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0);
81634
81635 #if defined(CONFIG_HWPOISON_INJECT) || defined(CONFIG_HWPOISON_INJECT_MODULE)
81636
81637@@ -202,7 +202,7 @@ static int kill_proc(struct task_struct *t, unsigned long addr, int trapno,
81638 pfn, t->comm, t->pid);
81639 si.si_signo = SIGBUS;
81640 si.si_errno = 0;
81641- si.si_addr = (void *)addr;
81642+ si.si_addr = (void __user *)addr;
81643 #ifdef __ARCH_SI_TRAPNO
81644 si.si_trapno = trapno;
81645 #endif
81646@@ -760,7 +760,7 @@ static struct page_state {
81647 unsigned long res;
81648 char *msg;
81649 int (*action)(struct page *p, unsigned long pfn);
81650-} error_states[] = {
81651+} __do_const error_states[] = {
81652 { reserved, reserved, "reserved kernel", me_kernel },
81653 /*
81654 * free pages are specially detected outside this table:
81655@@ -1051,7 +1051,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81656 nr_pages = 1 << compound_order(hpage);
81657 else /* normal page or thp */
81658 nr_pages = 1;
81659- atomic_long_add(nr_pages, &num_poisoned_pages);
81660+ atomic_long_add_unchecked(nr_pages, &num_poisoned_pages);
81661
81662 /*
81663 * We need/can do nothing about count=0 pages.
81664@@ -1081,7 +1081,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81665 if (!PageHWPoison(hpage)
81666 || (hwpoison_filter(p) && TestClearPageHWPoison(p))
81667 || (p != hpage && TestSetPageHWPoison(hpage))) {
81668- atomic_long_sub(nr_pages, &num_poisoned_pages);
81669+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81670 return 0;
81671 }
81672 set_page_hwpoison_huge_page(hpage);
81673@@ -1148,7 +1148,7 @@ int memory_failure(unsigned long pfn, int trapno, int flags)
81674 }
81675 if (hwpoison_filter(p)) {
81676 if (TestClearPageHWPoison(p))
81677- atomic_long_sub(nr_pages, &num_poisoned_pages);
81678+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81679 unlock_page(hpage);
81680 put_page(hpage);
81681 return 0;
81682@@ -1350,7 +1350,7 @@ int unpoison_memory(unsigned long pfn)
81683 return 0;
81684 }
81685 if (TestClearPageHWPoison(p))
81686- atomic_long_sub(nr_pages, &num_poisoned_pages);
81687+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81688 pr_info("MCE: Software-unpoisoned free page %#lx\n", pfn);
81689 return 0;
81690 }
81691@@ -1364,7 +1364,7 @@ int unpoison_memory(unsigned long pfn)
81692 */
81693 if (TestClearPageHWPoison(page)) {
81694 pr_info("MCE: Software-unpoisoned page %#lx\n", pfn);
81695- atomic_long_sub(nr_pages, &num_poisoned_pages);
81696+ atomic_long_sub_unchecked(nr_pages, &num_poisoned_pages);
81697 freeit = 1;
81698 if (PageHuge(page))
81699 clear_page_hwpoison_huge_page(page);
81700@@ -1491,7 +1491,7 @@ static int soft_offline_huge_page(struct page *page, int flags)
81701 } else {
81702 set_page_hwpoison_huge_page(hpage);
81703 dequeue_hwpoisoned_huge_page(hpage);
81704- atomic_long_add(1 << compound_trans_order(hpage),
81705+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81706 &num_poisoned_pages);
81707 }
81708 /* keep elevated page count for bad page */
81709@@ -1552,11 +1552,11 @@ int soft_offline_page(struct page *page, int flags)
81710 if (PageHuge(page)) {
81711 set_page_hwpoison_huge_page(hpage);
81712 dequeue_hwpoisoned_huge_page(hpage);
81713- atomic_long_add(1 << compound_trans_order(hpage),
81714+ atomic_long_add_unchecked(1 << compound_trans_order(hpage),
81715 &num_poisoned_pages);
81716 } else {
81717 SetPageHWPoison(page);
81718- atomic_long_inc(&num_poisoned_pages);
81719+ atomic_long_inc_unchecked(&num_poisoned_pages);
81720 }
81721 }
81722 /* keep elevated page count for bad page */
81723@@ -1596,7 +1596,7 @@ static int __soft_offline_page(struct page *page, int flags)
81724 put_page(page);
81725 pr_info("soft_offline: %#lx: invalidated\n", pfn);
81726 SetPageHWPoison(page);
81727- atomic_long_inc(&num_poisoned_pages);
81728+ atomic_long_inc_unchecked(&num_poisoned_pages);
81729 return 0;
81730 }
81731
81732@@ -1626,7 +1626,7 @@ static int __soft_offline_page(struct page *page, int flags)
81733 ret = -EIO;
81734 } else {
81735 SetPageHWPoison(page);
81736- atomic_long_inc(&num_poisoned_pages);
81737+ atomic_long_inc_unchecked(&num_poisoned_pages);
81738 }
81739 } else {
81740 pr_info("soft offline: %#lx: isolation failed: %d, page count %d, type %lx\n",
81741diff --git a/mm/memory.c b/mm/memory.c
81742index 61a262b..77a94d1 100644
81743--- a/mm/memory.c
81744+++ b/mm/memory.c
81745@@ -429,6 +429,7 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81746 free_pte_range(tlb, pmd, addr);
81747 } while (pmd++, addr = next, addr != end);
81748
81749+#if !defined(CONFIG_X86_32) || !defined(CONFIG_PAX_PER_CPU_PGD)
81750 start &= PUD_MASK;
81751 if (start < floor)
81752 return;
81753@@ -443,6 +444,8 @@ static inline void free_pmd_range(struct mmu_gather *tlb, pud_t *pud,
81754 pmd = pmd_offset(pud, start);
81755 pud_clear(pud);
81756 pmd_free_tlb(tlb, pmd, start);
81757+#endif
81758+
81759 }
81760
81761 static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81762@@ -462,6 +465,7 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81763 free_pmd_range(tlb, pud, addr, next, floor, ceiling);
81764 } while (pud++, addr = next, addr != end);
81765
81766+#if !defined(CONFIG_X86_64) || !defined(CONFIG_PAX_PER_CPU_PGD)
81767 start &= PGDIR_MASK;
81768 if (start < floor)
81769 return;
81770@@ -476,6 +480,8 @@ static inline void free_pud_range(struct mmu_gather *tlb, pgd_t *pgd,
81771 pud = pud_offset(pgd, start);
81772 pgd_clear(pgd);
81773 pud_free_tlb(tlb, pud, start);
81774+#endif
81775+
81776 }
81777
81778 /*
81779@@ -1635,12 +1641,6 @@ no_page_table:
81780 return page;
81781 }
81782
81783-static inline int stack_guard_page(struct vm_area_struct *vma, unsigned long addr)
81784-{
81785- return stack_guard_page_start(vma, addr) ||
81786- stack_guard_page_end(vma, addr+PAGE_SIZE);
81787-}
81788-
81789 /**
81790 * __get_user_pages() - pin user pages in memory
81791 * @tsk: task_struct of target task
81792@@ -1727,10 +1727,10 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81793
81794 i = 0;
81795
81796- do {
81797+ while (nr_pages) {
81798 struct vm_area_struct *vma;
81799
81800- vma = find_extend_vma(mm, start);
81801+ vma = find_vma(mm, start);
81802 if (!vma && in_gate_area(mm, start)) {
81803 unsigned long pg = start & PAGE_MASK;
81804 pgd_t *pgd;
81805@@ -1779,7 +1779,7 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81806 goto next_page;
81807 }
81808
81809- if (!vma ||
81810+ if (!vma || start < vma->vm_start ||
81811 (vma->vm_flags & (VM_IO | VM_PFNMAP)) ||
81812 !(vm_flags & vma->vm_flags))
81813 return i ? : -EFAULT;
81814@@ -1808,11 +1808,6 @@ long __get_user_pages(struct task_struct *tsk, struct mm_struct *mm,
81815 int ret;
81816 unsigned int fault_flags = 0;
81817
81818- /* For mlock, just skip the stack guard page. */
81819- if (foll_flags & FOLL_MLOCK) {
81820- if (stack_guard_page(vma, start))
81821- goto next_page;
81822- }
81823 if (foll_flags & FOLL_WRITE)
81824 fault_flags |= FAULT_FLAG_WRITE;
81825 if (nonblocking)
81826@@ -1892,7 +1887,7 @@ next_page:
81827 start += page_increm * PAGE_SIZE;
81828 nr_pages -= page_increm;
81829 } while (nr_pages && start < vma->vm_end);
81830- } while (nr_pages);
81831+ }
81832 return i;
81833 }
81834 EXPORT_SYMBOL(__get_user_pages);
81835@@ -2099,6 +2094,10 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
81836 page_add_file_rmap(page);
81837 set_pte_at(mm, addr, pte, mk_pte(page, prot));
81838
81839+#ifdef CONFIG_PAX_SEGMEXEC
81840+ pax_mirror_file_pte(vma, addr, page, ptl);
81841+#endif
81842+
81843 retval = 0;
81844 pte_unmap_unlock(pte, ptl);
81845 return retval;
81846@@ -2143,9 +2142,21 @@ int vm_insert_page(struct vm_area_struct *vma, unsigned long addr,
81847 if (!page_count(page))
81848 return -EINVAL;
81849 if (!(vma->vm_flags & VM_MIXEDMAP)) {
81850+
81851+#ifdef CONFIG_PAX_SEGMEXEC
81852+ struct vm_area_struct *vma_m;
81853+#endif
81854+
81855 BUG_ON(down_read_trylock(&vma->vm_mm->mmap_sem));
81856 BUG_ON(vma->vm_flags & VM_PFNMAP);
81857 vma->vm_flags |= VM_MIXEDMAP;
81858+
81859+#ifdef CONFIG_PAX_SEGMEXEC
81860+ vma_m = pax_find_mirror_vma(vma);
81861+ if (vma_m)
81862+ vma_m->vm_flags |= VM_MIXEDMAP;
81863+#endif
81864+
81865 }
81866 return insert_page(vma, addr, page, vma->vm_page_prot);
81867 }
81868@@ -2228,6 +2239,7 @@ int vm_insert_mixed(struct vm_area_struct *vma, unsigned long addr,
81869 unsigned long pfn)
81870 {
81871 BUG_ON(!(vma->vm_flags & VM_MIXEDMAP));
81872+ BUG_ON(vma->vm_mirror);
81873
81874 if (addr < vma->vm_start || addr >= vma->vm_end)
81875 return -EFAULT;
81876@@ -2475,7 +2487,9 @@ static int apply_to_pmd_range(struct mm_struct *mm, pud_t *pud,
81877
81878 BUG_ON(pud_huge(*pud));
81879
81880- pmd = pmd_alloc(mm, pud, addr);
81881+ pmd = (mm == &init_mm) ?
81882+ pmd_alloc_kernel(mm, pud, addr) :
81883+ pmd_alloc(mm, pud, addr);
81884 if (!pmd)
81885 return -ENOMEM;
81886 do {
81887@@ -2495,7 +2509,9 @@ static int apply_to_pud_range(struct mm_struct *mm, pgd_t *pgd,
81888 unsigned long next;
81889 int err;
81890
81891- pud = pud_alloc(mm, pgd, addr);
81892+ pud = (mm == &init_mm) ?
81893+ pud_alloc_kernel(mm, pgd, addr) :
81894+ pud_alloc(mm, pgd, addr);
81895 if (!pud)
81896 return -ENOMEM;
81897 do {
81898@@ -2583,6 +2599,186 @@ static inline void cow_user_page(struct page *dst, struct page *src, unsigned lo
81899 copy_user_highpage(dst, src, va, vma);
81900 }
81901
81902+#ifdef CONFIG_PAX_SEGMEXEC
81903+static void pax_unmap_mirror_pte(struct vm_area_struct *vma, unsigned long address, pmd_t *pmd)
81904+{
81905+ struct mm_struct *mm = vma->vm_mm;
81906+ spinlock_t *ptl;
81907+ pte_t *pte, entry;
81908+
81909+ pte = pte_offset_map_lock(mm, pmd, address, &ptl);
81910+ entry = *pte;
81911+ if (!pte_present(entry)) {
81912+ if (!pte_none(entry)) {
81913+ BUG_ON(pte_file(entry));
81914+ free_swap_and_cache(pte_to_swp_entry(entry));
81915+ pte_clear_not_present_full(mm, address, pte, 0);
81916+ }
81917+ } else {
81918+ struct page *page;
81919+
81920+ flush_cache_page(vma, address, pte_pfn(entry));
81921+ entry = ptep_clear_flush(vma, address, pte);
81922+ BUG_ON(pte_dirty(entry));
81923+ page = vm_normal_page(vma, address, entry);
81924+ if (page) {
81925+ update_hiwater_rss(mm);
81926+ if (PageAnon(page))
81927+ dec_mm_counter_fast(mm, MM_ANONPAGES);
81928+ else
81929+ dec_mm_counter_fast(mm, MM_FILEPAGES);
81930+ page_remove_rmap(page);
81931+ page_cache_release(page);
81932+ }
81933+ }
81934+ pte_unmap_unlock(pte, ptl);
81935+}
81936+
81937+/* PaX: if vma is mirrored, synchronize the mirror's PTE
81938+ *
81939+ * the ptl of the lower mapped page is held on entry and is not released on exit
81940+ * or inside to ensure atomic changes to the PTE states (swapout, mremap, munmap, etc)
81941+ */
81942+static void pax_mirror_anon_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81943+{
81944+ struct mm_struct *mm = vma->vm_mm;
81945+ unsigned long address_m;
81946+ spinlock_t *ptl_m;
81947+ struct vm_area_struct *vma_m;
81948+ pmd_t *pmd_m;
81949+ pte_t *pte_m, entry_m;
81950+
81951+ BUG_ON(!page_m || !PageAnon(page_m));
81952+
81953+ vma_m = pax_find_mirror_vma(vma);
81954+ if (!vma_m)
81955+ return;
81956+
81957+ BUG_ON(!PageLocked(page_m));
81958+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81959+ address_m = address + SEGMEXEC_TASK_SIZE;
81960+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
81961+ pte_m = pte_offset_map(pmd_m, address_m);
81962+ ptl_m = pte_lockptr(mm, pmd_m);
81963+ if (ptl != ptl_m) {
81964+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
81965+ if (!pte_none(*pte_m))
81966+ goto out;
81967+ }
81968+
81969+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
81970+ page_cache_get(page_m);
81971+ page_add_anon_rmap(page_m, vma_m, address_m);
81972+ inc_mm_counter_fast(mm, MM_ANONPAGES);
81973+ set_pte_at(mm, address_m, pte_m, entry_m);
81974+ update_mmu_cache(vma_m, address_m, pte_m);
81975+out:
81976+ if (ptl != ptl_m)
81977+ spin_unlock(ptl_m);
81978+ pte_unmap(pte_m);
81979+ unlock_page(page_m);
81980+}
81981+
81982+void pax_mirror_file_pte(struct vm_area_struct *vma, unsigned long address, struct page *page_m, spinlock_t *ptl)
81983+{
81984+ struct mm_struct *mm = vma->vm_mm;
81985+ unsigned long address_m;
81986+ spinlock_t *ptl_m;
81987+ struct vm_area_struct *vma_m;
81988+ pmd_t *pmd_m;
81989+ pte_t *pte_m, entry_m;
81990+
81991+ BUG_ON(!page_m || PageAnon(page_m));
81992+
81993+ vma_m = pax_find_mirror_vma(vma);
81994+ if (!vma_m)
81995+ return;
81996+
81997+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
81998+ address_m = address + SEGMEXEC_TASK_SIZE;
81999+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
82000+ pte_m = pte_offset_map(pmd_m, address_m);
82001+ ptl_m = pte_lockptr(mm, pmd_m);
82002+ if (ptl != ptl_m) {
82003+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
82004+ if (!pte_none(*pte_m))
82005+ goto out;
82006+ }
82007+
82008+ entry_m = pfn_pte(page_to_pfn(page_m), vma_m->vm_page_prot);
82009+ page_cache_get(page_m);
82010+ page_add_file_rmap(page_m);
82011+ inc_mm_counter_fast(mm, MM_FILEPAGES);
82012+ set_pte_at(mm, address_m, pte_m, entry_m);
82013+ update_mmu_cache(vma_m, address_m, pte_m);
82014+out:
82015+ if (ptl != ptl_m)
82016+ spin_unlock(ptl_m);
82017+ pte_unmap(pte_m);
82018+}
82019+
82020+static void pax_mirror_pfn_pte(struct vm_area_struct *vma, unsigned long address, unsigned long pfn_m, spinlock_t *ptl)
82021+{
82022+ struct mm_struct *mm = vma->vm_mm;
82023+ unsigned long address_m;
82024+ spinlock_t *ptl_m;
82025+ struct vm_area_struct *vma_m;
82026+ pmd_t *pmd_m;
82027+ pte_t *pte_m, entry_m;
82028+
82029+ vma_m = pax_find_mirror_vma(vma);
82030+ if (!vma_m)
82031+ return;
82032+
82033+ BUG_ON(address >= SEGMEXEC_TASK_SIZE);
82034+ address_m = address + SEGMEXEC_TASK_SIZE;
82035+ pmd_m = pmd_offset(pud_offset(pgd_offset(mm, address_m), address_m), address_m);
82036+ pte_m = pte_offset_map(pmd_m, address_m);
82037+ ptl_m = pte_lockptr(mm, pmd_m);
82038+ if (ptl != ptl_m) {
82039+ spin_lock_nested(ptl_m, SINGLE_DEPTH_NESTING);
82040+ if (!pte_none(*pte_m))
82041+ goto out;
82042+ }
82043+
82044+ entry_m = pfn_pte(pfn_m, vma_m->vm_page_prot);
82045+ set_pte_at(mm, address_m, pte_m, entry_m);
82046+out:
82047+ if (ptl != ptl_m)
82048+ spin_unlock(ptl_m);
82049+ pte_unmap(pte_m);
82050+}
82051+
82052+static void pax_mirror_pte(struct vm_area_struct *vma, unsigned long address, pte_t *pte, pmd_t *pmd, spinlock_t *ptl)
82053+{
82054+ struct page *page_m;
82055+ pte_t entry;
82056+
82057+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC))
82058+ goto out;
82059+
82060+ entry = *pte;
82061+ page_m = vm_normal_page(vma, address, entry);
82062+ if (!page_m)
82063+ pax_mirror_pfn_pte(vma, address, pte_pfn(entry), ptl);
82064+ else if (PageAnon(page_m)) {
82065+ if (pax_find_mirror_vma(vma)) {
82066+ pte_unmap_unlock(pte, ptl);
82067+ lock_page(page_m);
82068+ pte = pte_offset_map_lock(vma->vm_mm, pmd, address, &ptl);
82069+ if (pte_same(entry, *pte))
82070+ pax_mirror_anon_pte(vma, address, page_m, ptl);
82071+ else
82072+ unlock_page(page_m);
82073+ }
82074+ } else
82075+ pax_mirror_file_pte(vma, address, page_m, ptl);
82076+
82077+out:
82078+ pte_unmap_unlock(pte, ptl);
82079+}
82080+#endif
82081+
82082 /*
82083 * This routine handles present pages, when users try to write
82084 * to a shared page. It is done by copying the page to a new address
82085@@ -2799,6 +2995,12 @@ gotten:
82086 */
82087 page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
82088 if (likely(pte_same(*page_table, orig_pte))) {
82089+
82090+#ifdef CONFIG_PAX_SEGMEXEC
82091+ if (pax_find_mirror_vma(vma))
82092+ BUG_ON(!trylock_page(new_page));
82093+#endif
82094+
82095 if (old_page) {
82096 if (!PageAnon(old_page)) {
82097 dec_mm_counter_fast(mm, MM_FILEPAGES);
82098@@ -2850,6 +3052,10 @@ gotten:
82099 page_remove_rmap(old_page);
82100 }
82101
82102+#ifdef CONFIG_PAX_SEGMEXEC
82103+ pax_mirror_anon_pte(vma, address, new_page, ptl);
82104+#endif
82105+
82106 /* Free the old page.. */
82107 new_page = old_page;
82108 ret |= VM_FAULT_WRITE;
82109@@ -3125,6 +3331,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
82110 swap_free(entry);
82111 if (vm_swap_full() || (vma->vm_flags & VM_LOCKED) || PageMlocked(page))
82112 try_to_free_swap(page);
82113+
82114+#ifdef CONFIG_PAX_SEGMEXEC
82115+ if ((flags & FAULT_FLAG_WRITE) || !pax_find_mirror_vma(vma))
82116+#endif
82117+
82118 unlock_page(page);
82119 if (page != swapcache) {
82120 /*
82121@@ -3148,6 +3359,11 @@ static int do_swap_page(struct mm_struct *mm, struct vm_area_struct *vma,
82122
82123 /* No need to invalidate - it was non-present before */
82124 update_mmu_cache(vma, address, page_table);
82125+
82126+#ifdef CONFIG_PAX_SEGMEXEC
82127+ pax_mirror_anon_pte(vma, address, page, ptl);
82128+#endif
82129+
82130 unlock:
82131 pte_unmap_unlock(page_table, ptl);
82132 out:
82133@@ -3167,40 +3383,6 @@ out_release:
82134 }
82135
82136 /*
82137- * This is like a special single-page "expand_{down|up}wards()",
82138- * except we must first make sure that 'address{-|+}PAGE_SIZE'
82139- * doesn't hit another vma.
82140- */
82141-static inline int check_stack_guard_page(struct vm_area_struct *vma, unsigned long address)
82142-{
82143- address &= PAGE_MASK;
82144- if ((vma->vm_flags & VM_GROWSDOWN) && address == vma->vm_start) {
82145- struct vm_area_struct *prev = vma->vm_prev;
82146-
82147- /*
82148- * Is there a mapping abutting this one below?
82149- *
82150- * That's only ok if it's the same stack mapping
82151- * that has gotten split..
82152- */
82153- if (prev && prev->vm_end == address)
82154- return prev->vm_flags & VM_GROWSDOWN ? 0 : -ENOMEM;
82155-
82156- expand_downwards(vma, address - PAGE_SIZE);
82157- }
82158- if ((vma->vm_flags & VM_GROWSUP) && address + PAGE_SIZE == vma->vm_end) {
82159- struct vm_area_struct *next = vma->vm_next;
82160-
82161- /* As VM_GROWSDOWN but s/below/above/ */
82162- if (next && next->vm_start == address + PAGE_SIZE)
82163- return next->vm_flags & VM_GROWSUP ? 0 : -ENOMEM;
82164-
82165- expand_upwards(vma, address + PAGE_SIZE);
82166- }
82167- return 0;
82168-}
82169-
82170-/*
82171 * We enter with non-exclusive mmap_sem (to exclude vma changes,
82172 * but allow concurrent faults), and pte mapped but not yet locked.
82173 * We return with mmap_sem still held, but pte unmapped and unlocked.
82174@@ -3209,27 +3391,23 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
82175 unsigned long address, pte_t *page_table, pmd_t *pmd,
82176 unsigned int flags)
82177 {
82178- struct page *page;
82179+ struct page *page = NULL;
82180 spinlock_t *ptl;
82181 pte_t entry;
82182
82183- pte_unmap(page_table);
82184-
82185- /* Check if we need to add a guard page to the stack */
82186- if (check_stack_guard_page(vma, address) < 0)
82187- return VM_FAULT_SIGBUS;
82188-
82189- /* Use the zero-page for reads */
82190 if (!(flags & FAULT_FLAG_WRITE)) {
82191 entry = pte_mkspecial(pfn_pte(my_zero_pfn(address),
82192 vma->vm_page_prot));
82193- page_table = pte_offset_map_lock(mm, pmd, address, &ptl);
82194+ ptl = pte_lockptr(mm, pmd);
82195+ spin_lock(ptl);
82196 if (!pte_none(*page_table))
82197 goto unlock;
82198 goto setpte;
82199 }
82200
82201 /* Allocate our own private page. */
82202+ pte_unmap(page_table);
82203+
82204 if (unlikely(anon_vma_prepare(vma)))
82205 goto oom;
82206 page = alloc_zeroed_user_highpage_movable(vma, address);
82207@@ -3253,6 +3431,11 @@ static int do_anonymous_page(struct mm_struct *mm, struct vm_area_struct *vma,
82208 if (!pte_none(*page_table))
82209 goto release;
82210
82211+#ifdef CONFIG_PAX_SEGMEXEC
82212+ if (pax_find_mirror_vma(vma))
82213+ BUG_ON(!trylock_page(page));
82214+#endif
82215+
82216 inc_mm_counter_fast(mm, MM_ANONPAGES);
82217 page_add_new_anon_rmap(page, vma, address);
82218 setpte:
82219@@ -3260,6 +3443,12 @@ setpte:
82220
82221 /* No need to invalidate - it was non-present before */
82222 update_mmu_cache(vma, address, page_table);
82223+
82224+#ifdef CONFIG_PAX_SEGMEXEC
82225+ if (page)
82226+ pax_mirror_anon_pte(vma, address, page, ptl);
82227+#endif
82228+
82229 unlock:
82230 pte_unmap_unlock(page_table, ptl);
82231 return 0;
82232@@ -3403,6 +3592,12 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82233 */
82234 /* Only go through if we didn't race with anybody else... */
82235 if (likely(pte_same(*page_table, orig_pte))) {
82236+
82237+#ifdef CONFIG_PAX_SEGMEXEC
82238+ if (anon && pax_find_mirror_vma(vma))
82239+ BUG_ON(!trylock_page(page));
82240+#endif
82241+
82242 flush_icache_page(vma, page);
82243 entry = mk_pte(page, vma->vm_page_prot);
82244 if (flags & FAULT_FLAG_WRITE)
82245@@ -3422,6 +3617,14 @@ static int __do_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82246
82247 /* no need to invalidate: a not-present page won't be cached */
82248 update_mmu_cache(vma, address, page_table);
82249+
82250+#ifdef CONFIG_PAX_SEGMEXEC
82251+ if (anon)
82252+ pax_mirror_anon_pte(vma, address, page, ptl);
82253+ else
82254+ pax_mirror_file_pte(vma, address, page, ptl);
82255+#endif
82256+
82257 } else {
82258 if (cow_page)
82259 mem_cgroup_uncharge_page(cow_page);
82260@@ -3743,6 +3946,12 @@ int handle_pte_fault(struct mm_struct *mm,
82261 if (flags & FAULT_FLAG_WRITE)
82262 flush_tlb_fix_spurious_fault(vma, address);
82263 }
82264+
82265+#ifdef CONFIG_PAX_SEGMEXEC
82266+ pax_mirror_pte(vma, address, pte, pmd, ptl);
82267+ return 0;
82268+#endif
82269+
82270 unlock:
82271 pte_unmap_unlock(pte, ptl);
82272 return 0;
82273@@ -3759,6 +3968,10 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82274 pmd_t *pmd;
82275 pte_t *pte;
82276
82277+#ifdef CONFIG_PAX_SEGMEXEC
82278+ struct vm_area_struct *vma_m;
82279+#endif
82280+
82281 __set_current_state(TASK_RUNNING);
82282
82283 count_vm_event(PGFAULT);
82284@@ -3770,6 +3983,34 @@ int handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
82285 if (unlikely(is_vm_hugetlb_page(vma)))
82286 return hugetlb_fault(mm, vma, address, flags);
82287
82288+#ifdef CONFIG_PAX_SEGMEXEC
82289+ vma_m = pax_find_mirror_vma(vma);
82290+ if (vma_m) {
82291+ unsigned long address_m;
82292+ pgd_t *pgd_m;
82293+ pud_t *pud_m;
82294+ pmd_t *pmd_m;
82295+
82296+ if (vma->vm_start > vma_m->vm_start) {
82297+ address_m = address;
82298+ address -= SEGMEXEC_TASK_SIZE;
82299+ vma = vma_m;
82300+ } else
82301+ address_m = address + SEGMEXEC_TASK_SIZE;
82302+
82303+ pgd_m = pgd_offset(mm, address_m);
82304+ pud_m = pud_alloc(mm, pgd_m, address_m);
82305+ if (!pud_m)
82306+ return VM_FAULT_OOM;
82307+ pmd_m = pmd_alloc(mm, pud_m, address_m);
82308+ if (!pmd_m)
82309+ return VM_FAULT_OOM;
82310+ if (!pmd_present(*pmd_m) && __pte_alloc(mm, vma_m, pmd_m, address_m))
82311+ return VM_FAULT_OOM;
82312+ pax_unmap_mirror_pte(vma_m, address_m, pmd_m);
82313+ }
82314+#endif
82315+
82316 retry:
82317 pgd = pgd_offset(mm, address);
82318 pud = pud_alloc(mm, pgd, address);
82319@@ -3868,6 +4109,23 @@ int __pud_alloc(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
82320 spin_unlock(&mm->page_table_lock);
82321 return 0;
82322 }
82323+
82324+int __pud_alloc_kernel(struct mm_struct *mm, pgd_t *pgd, unsigned long address)
82325+{
82326+ pud_t *new = pud_alloc_one(mm, address);
82327+ if (!new)
82328+ return -ENOMEM;
82329+
82330+ smp_wmb(); /* See comment in __pte_alloc */
82331+
82332+ spin_lock(&mm->page_table_lock);
82333+ if (pgd_present(*pgd)) /* Another has populated it */
82334+ pud_free(mm, new);
82335+ else
82336+ pgd_populate_kernel(mm, pgd, new);
82337+ spin_unlock(&mm->page_table_lock);
82338+ return 0;
82339+}
82340 #endif /* __PAGETABLE_PUD_FOLDED */
82341
82342 #ifndef __PAGETABLE_PMD_FOLDED
82343@@ -3898,6 +4156,30 @@ int __pmd_alloc(struct mm_struct *mm, pud_t *pud, unsigned long address)
82344 spin_unlock(&mm->page_table_lock);
82345 return 0;
82346 }
82347+
82348+int __pmd_alloc_kernel(struct mm_struct *mm, pud_t *pud, unsigned long address)
82349+{
82350+ pmd_t *new = pmd_alloc_one(mm, address);
82351+ if (!new)
82352+ return -ENOMEM;
82353+
82354+ smp_wmb(); /* See comment in __pte_alloc */
82355+
82356+ spin_lock(&mm->page_table_lock);
82357+#ifndef __ARCH_HAS_4LEVEL_HACK
82358+ if (pud_present(*pud)) /* Another has populated it */
82359+ pmd_free(mm, new);
82360+ else
82361+ pud_populate_kernel(mm, pud, new);
82362+#else
82363+ if (pgd_present(*pud)) /* Another has populated it */
82364+ pmd_free(mm, new);
82365+ else
82366+ pgd_populate_kernel(mm, pud, new);
82367+#endif /* __ARCH_HAS_4LEVEL_HACK */
82368+ spin_unlock(&mm->page_table_lock);
82369+ return 0;
82370+}
82371 #endif /* __PAGETABLE_PMD_FOLDED */
82372
82373 #if !defined(__HAVE_ARCH_GATE_AREA)
82374@@ -3911,7 +4193,7 @@ static int __init gate_vma_init(void)
82375 gate_vma.vm_start = FIXADDR_USER_START;
82376 gate_vma.vm_end = FIXADDR_USER_END;
82377 gate_vma.vm_flags = VM_READ | VM_MAYREAD | VM_EXEC | VM_MAYEXEC;
82378- gate_vma.vm_page_prot = __P101;
82379+ gate_vma.vm_page_prot = vm_get_page_prot(gate_vma.vm_flags);
82380
82381 return 0;
82382 }
82383@@ -4045,8 +4327,8 @@ out:
82384 return ret;
82385 }
82386
82387-int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82388- void *buf, int len, int write)
82389+ssize_t generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82390+ void *buf, size_t len, int write)
82391 {
82392 resource_size_t phys_addr;
82393 unsigned long prot = 0;
82394@@ -4071,8 +4353,8 @@ int generic_access_phys(struct vm_area_struct *vma, unsigned long addr,
82395 * Access another process' address space as given in mm. If non-NULL, use the
82396 * given task for page fault accounting.
82397 */
82398-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82399- unsigned long addr, void *buf, int len, int write)
82400+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82401+ unsigned long addr, void *buf, size_t len, int write)
82402 {
82403 struct vm_area_struct *vma;
82404 void *old_buf = buf;
82405@@ -4080,7 +4362,7 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82406 down_read(&mm->mmap_sem);
82407 /* ignore errors, just check how much was successfully transferred */
82408 while (len) {
82409- int bytes, ret, offset;
82410+ ssize_t bytes, ret, offset;
82411 void *maddr;
82412 struct page *page = NULL;
82413
82414@@ -4139,8 +4421,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
82415 *
82416 * The caller must hold a reference on @mm.
82417 */
82418-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82419- void *buf, int len, int write)
82420+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
82421+ void *buf, size_t len, int write)
82422 {
82423 return __access_remote_vm(NULL, mm, addr, buf, len, write);
82424 }
82425@@ -4150,11 +4432,11 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
82426 * Source/target buffer must be kernel space,
82427 * Do not walk the page table directly, use get_user_pages
82428 */
82429-int access_process_vm(struct task_struct *tsk, unsigned long addr,
82430- void *buf, int len, int write)
82431+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr,
82432+ void *buf, size_t len, int write)
82433 {
82434 struct mm_struct *mm;
82435- int ret;
82436+ ssize_t ret;
82437
82438 mm = get_task_mm(tsk);
82439 if (!mm)
82440diff --git a/mm/mempolicy.c b/mm/mempolicy.c
82441index 7431001..0f8344e 100644
82442--- a/mm/mempolicy.c
82443+++ b/mm/mempolicy.c
82444@@ -708,6 +708,10 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82445 unsigned long vmstart;
82446 unsigned long vmend;
82447
82448+#ifdef CONFIG_PAX_SEGMEXEC
82449+ struct vm_area_struct *vma_m;
82450+#endif
82451+
82452 vma = find_vma(mm, start);
82453 if (!vma || vma->vm_start > start)
82454 return -EFAULT;
82455@@ -744,9 +748,20 @@ static int mbind_range(struct mm_struct *mm, unsigned long start,
82456 if (err)
82457 goto out;
82458 }
82459+
82460 err = vma_replace_policy(vma, new_pol);
82461 if (err)
82462 goto out;
82463+
82464+#ifdef CONFIG_PAX_SEGMEXEC
82465+ vma_m = pax_find_mirror_vma(vma);
82466+ if (vma_m) {
82467+ err = vma_replace_policy(vma_m, new_pol);
82468+ if (err)
82469+ goto out;
82470+ }
82471+#endif
82472+
82473 }
82474
82475 out:
82476@@ -1202,6 +1217,17 @@ static long do_mbind(unsigned long start, unsigned long len,
82477
82478 if (end < start)
82479 return -EINVAL;
82480+
82481+#ifdef CONFIG_PAX_SEGMEXEC
82482+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
82483+ if (end > SEGMEXEC_TASK_SIZE)
82484+ return -EINVAL;
82485+ } else
82486+#endif
82487+
82488+ if (end > TASK_SIZE)
82489+ return -EINVAL;
82490+
82491 if (end == start)
82492 return 0;
82493
82494@@ -1430,8 +1456,7 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82495 */
82496 tcred = __task_cred(task);
82497 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82498- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82499- !capable(CAP_SYS_NICE)) {
82500+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82501 rcu_read_unlock();
82502 err = -EPERM;
82503 goto out_put;
82504@@ -1462,6 +1487,15 @@ SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
82505 goto out;
82506 }
82507
82508+#ifdef CONFIG_GRKERNSEC_PROC_MEMMAP
82509+ if (mm != current->mm &&
82510+ (mm->pax_flags & MF_PAX_RANDMMAP || mm->pax_flags & MF_PAX_SEGMEXEC)) {
82511+ mmput(mm);
82512+ err = -EPERM;
82513+ goto out;
82514+ }
82515+#endif
82516+
82517 err = do_migrate_pages(mm, old, new,
82518 capable(CAP_SYS_NICE) ? MPOL_MF_MOVE_ALL : MPOL_MF_MOVE);
82519
82520diff --git a/mm/migrate.c b/mm/migrate.c
82521index 6f0c244..6d1ae32 100644
82522--- a/mm/migrate.c
82523+++ b/mm/migrate.c
82524@@ -1399,8 +1399,7 @@ SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
82525 */
82526 tcred = __task_cred(task);
82527 if (!uid_eq(cred->euid, tcred->suid) && !uid_eq(cred->euid, tcred->uid) &&
82528- !uid_eq(cred->uid, tcred->suid) && !uid_eq(cred->uid, tcred->uid) &&
82529- !capable(CAP_SYS_NICE)) {
82530+ !uid_eq(cred->uid, tcred->suid) && !capable(CAP_SYS_NICE)) {
82531 rcu_read_unlock();
82532 err = -EPERM;
82533 goto out;
82534diff --git a/mm/mlock.c b/mm/mlock.c
82535index 79b7cf7..9944291 100644
82536--- a/mm/mlock.c
82537+++ b/mm/mlock.c
82538@@ -13,6 +13,7 @@
82539 #include <linux/pagemap.h>
82540 #include <linux/mempolicy.h>
82541 #include <linux/syscalls.h>
82542+#include <linux/security.h>
82543 #include <linux/sched.h>
82544 #include <linux/export.h>
82545 #include <linux/rmap.h>
82546@@ -334,7 +335,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
82547 {
82548 unsigned long nstart, end, tmp;
82549 struct vm_area_struct * vma, * prev;
82550- int error;
82551+ int error = 0;
82552
82553 VM_BUG_ON(start & ~PAGE_MASK);
82554 VM_BUG_ON(len != PAGE_ALIGN(len));
82555@@ -343,6 +344,9 @@ static int do_mlock(unsigned long start, size_t len, int on)
82556 return -EINVAL;
82557 if (end == start)
82558 return 0;
82559+ if (end > TASK_SIZE)
82560+ return -EINVAL;
82561+
82562 vma = find_vma(current->mm, start);
82563 if (!vma || vma->vm_start > start)
82564 return -ENOMEM;
82565@@ -354,6 +358,11 @@ static int do_mlock(unsigned long start, size_t len, int on)
82566 for (nstart = start ; ; ) {
82567 vm_flags_t newflags;
82568
82569+#ifdef CONFIG_PAX_SEGMEXEC
82570+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82571+ break;
82572+#endif
82573+
82574 /* Here we know that vma->vm_start <= nstart < vma->vm_end. */
82575
82576 newflags = vma->vm_flags & ~VM_LOCKED;
82577@@ -466,6 +475,7 @@ SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
82578 lock_limit >>= PAGE_SHIFT;
82579
82580 /* check against resource limits */
82581+ gr_learn_resource(current, RLIMIT_MEMLOCK, (current->mm->locked_vm << PAGE_SHIFT) + len, 1);
82582 if ((locked <= lock_limit) || capable(CAP_IPC_LOCK))
82583 error = do_mlock(start, len, 1);
82584 up_write(&current->mm->mmap_sem);
82585@@ -500,6 +510,11 @@ static int do_mlockall(int flags)
82586 for (vma = current->mm->mmap; vma ; vma = prev->vm_next) {
82587 vm_flags_t newflags;
82588
82589+#ifdef CONFIG_PAX_SEGMEXEC
82590+ if ((current->mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE))
82591+ break;
82592+#endif
82593+
82594 newflags = vma->vm_flags & ~VM_LOCKED;
82595 if (flags & MCL_CURRENT)
82596 newflags |= VM_LOCKED;
82597@@ -532,6 +547,7 @@ SYSCALL_DEFINE1(mlockall, int, flags)
82598 lock_limit >>= PAGE_SHIFT;
82599
82600 ret = -ENOMEM;
82601+ gr_learn_resource(current, RLIMIT_MEMLOCK, current->mm->total_vm << PAGE_SHIFT, 1);
82602 if (!(flags & MCL_CURRENT) || (current->mm->total_vm <= lock_limit) ||
82603 capable(CAP_IPC_LOCK))
82604 ret = do_mlockall(flags);
82605diff --git a/mm/mmap.c b/mm/mmap.c
82606index f681e18..4c2577f 100644
82607--- a/mm/mmap.c
82608+++ b/mm/mmap.c
82609@@ -36,6 +36,7 @@
82610 #include <linux/sched/sysctl.h>
82611 #include <linux/notifier.h>
82612 #include <linux/memory.h>
82613+#include <linux/random.h>
82614
82615 #include <asm/uaccess.h>
82616 #include <asm/cacheflush.h>
82617@@ -52,6 +53,16 @@
82618 #define arch_rebalance_pgtables(addr, len) (addr)
82619 #endif
82620
82621+static inline void verify_mm_writelocked(struct mm_struct *mm)
82622+{
82623+#if defined(CONFIG_DEBUG_VM) || defined(CONFIG_PAX)
82624+ if (unlikely(down_read_trylock(&mm->mmap_sem))) {
82625+ up_read(&mm->mmap_sem);
82626+ BUG();
82627+ }
82628+#endif
82629+}
82630+
82631 static void unmap_region(struct mm_struct *mm,
82632 struct vm_area_struct *vma, struct vm_area_struct *prev,
82633 unsigned long start, unsigned long end);
82634@@ -71,16 +82,25 @@ static void unmap_region(struct mm_struct *mm,
82635 * x: (no) no x: (no) yes x: (no) yes x: (yes) yes
82636 *
82637 */
82638-pgprot_t protection_map[16] = {
82639+pgprot_t protection_map[16] __read_only = {
82640 __P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
82641 __S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
82642 };
82643
82644-pgprot_t vm_get_page_prot(unsigned long vm_flags)
82645+pgprot_t vm_get_page_prot(vm_flags_t vm_flags)
82646 {
82647- return __pgprot(pgprot_val(protection_map[vm_flags &
82648+ pgprot_t prot = __pgprot(pgprot_val(protection_map[vm_flags &
82649 (VM_READ|VM_WRITE|VM_EXEC|VM_SHARED)]) |
82650 pgprot_val(arch_vm_get_page_prot(vm_flags)));
82651+
82652+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82653+ if (!(__supported_pte_mask & _PAGE_NX) &&
82654+ (vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC &&
82655+ (vm_flags & (VM_READ | VM_WRITE)))
82656+ prot = __pgprot(pte_val(pte_exprotect(__pte(pgprot_val(prot)))));
82657+#endif
82658+
82659+ return prot;
82660 }
82661 EXPORT_SYMBOL(vm_get_page_prot);
82662
82663@@ -89,6 +109,7 @@ int sysctl_overcommit_ratio __read_mostly = 50; /* default is 50% */
82664 int sysctl_max_map_count __read_mostly = DEFAULT_MAX_MAP_COUNT;
82665 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
82666 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
82667+unsigned long sysctl_heap_stack_gap __read_mostly = 64*1024;
82668 /*
82669 * Make sure vm_committed_as in one cacheline and not cacheline shared with
82670 * other variables. It can be updated by several CPUs frequently.
82671@@ -247,6 +268,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
82672 struct vm_area_struct *next = vma->vm_next;
82673
82674 might_sleep();
82675+ BUG_ON(vma->vm_mirror);
82676 if (vma->vm_ops && vma->vm_ops->close)
82677 vma->vm_ops->close(vma);
82678 if (vma->vm_file)
82679@@ -291,6 +313,7 @@ SYSCALL_DEFINE1(brk, unsigned long, brk)
82680 * not page aligned -Ram Gupta
82681 */
82682 rlim = rlimit(RLIMIT_DATA);
82683+ gr_learn_resource(current, RLIMIT_DATA, (brk - mm->start_brk) + (mm->end_data - mm->start_data), 1);
82684 if (rlim < RLIM_INFINITY && (brk - mm->start_brk) +
82685 (mm->end_data - mm->start_data) > rlim)
82686 goto out;
82687@@ -933,6 +956,12 @@ static int
82688 can_vma_merge_before(struct vm_area_struct *vma, unsigned long vm_flags,
82689 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82690 {
82691+
82692+#ifdef CONFIG_PAX_SEGMEXEC
82693+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_start == SEGMEXEC_TASK_SIZE)
82694+ return 0;
82695+#endif
82696+
82697 if (is_mergeable_vma(vma, file, vm_flags) &&
82698 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82699 if (vma->vm_pgoff == vm_pgoff)
82700@@ -952,6 +981,12 @@ static int
82701 can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82702 struct anon_vma *anon_vma, struct file *file, pgoff_t vm_pgoff)
82703 {
82704+
82705+#ifdef CONFIG_PAX_SEGMEXEC
82706+ if ((vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) && vma->vm_end == SEGMEXEC_TASK_SIZE)
82707+ return 0;
82708+#endif
82709+
82710 if (is_mergeable_vma(vma, file, vm_flags) &&
82711 is_mergeable_anon_vma(anon_vma, vma->anon_vma, vma)) {
82712 pgoff_t vm_pglen;
82713@@ -994,13 +1029,20 @@ can_vma_merge_after(struct vm_area_struct *vma, unsigned long vm_flags,
82714 struct vm_area_struct *vma_merge(struct mm_struct *mm,
82715 struct vm_area_struct *prev, unsigned long addr,
82716 unsigned long end, unsigned long vm_flags,
82717- struct anon_vma *anon_vma, struct file *file,
82718+ struct anon_vma *anon_vma, struct file *file,
82719 pgoff_t pgoff, struct mempolicy *policy)
82720 {
82721 pgoff_t pglen = (end - addr) >> PAGE_SHIFT;
82722 struct vm_area_struct *area, *next;
82723 int err;
82724
82725+#ifdef CONFIG_PAX_SEGMEXEC
82726+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE, end_m = end + SEGMEXEC_TASK_SIZE;
82727+ struct vm_area_struct *area_m = NULL, *next_m = NULL, *prev_m = NULL;
82728+
82729+ BUG_ON((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE < end);
82730+#endif
82731+
82732 /*
82733 * We later require that vma->vm_flags == vm_flags,
82734 * so this tests vma->vm_flags & VM_SPECIAL, too.
82735@@ -1016,6 +1058,15 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82736 if (next && next->vm_end == end) /* cases 6, 7, 8 */
82737 next = next->vm_next;
82738
82739+#ifdef CONFIG_PAX_SEGMEXEC
82740+ if (prev)
82741+ prev_m = pax_find_mirror_vma(prev);
82742+ if (area)
82743+ area_m = pax_find_mirror_vma(area);
82744+ if (next)
82745+ next_m = pax_find_mirror_vma(next);
82746+#endif
82747+
82748 /*
82749 * Can it merge with the predecessor?
82750 */
82751@@ -1035,9 +1086,24 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82752 /* cases 1, 6 */
82753 err = vma_adjust(prev, prev->vm_start,
82754 next->vm_end, prev->vm_pgoff, NULL);
82755- } else /* cases 2, 5, 7 */
82756+
82757+#ifdef CONFIG_PAX_SEGMEXEC
82758+ if (!err && prev_m)
82759+ err = vma_adjust(prev_m, prev_m->vm_start,
82760+ next_m->vm_end, prev_m->vm_pgoff, NULL);
82761+#endif
82762+
82763+ } else { /* cases 2, 5, 7 */
82764 err = vma_adjust(prev, prev->vm_start,
82765 end, prev->vm_pgoff, NULL);
82766+
82767+#ifdef CONFIG_PAX_SEGMEXEC
82768+ if (!err && prev_m)
82769+ err = vma_adjust(prev_m, prev_m->vm_start,
82770+ end_m, prev_m->vm_pgoff, NULL);
82771+#endif
82772+
82773+ }
82774 if (err)
82775 return NULL;
82776 khugepaged_enter_vma_merge(prev);
82777@@ -1051,12 +1117,27 @@ struct vm_area_struct *vma_merge(struct mm_struct *mm,
82778 mpol_equal(policy, vma_policy(next)) &&
82779 can_vma_merge_before(next, vm_flags,
82780 anon_vma, file, pgoff+pglen)) {
82781- if (prev && addr < prev->vm_end) /* case 4 */
82782+ if (prev && addr < prev->vm_end) { /* case 4 */
82783 err = vma_adjust(prev, prev->vm_start,
82784 addr, prev->vm_pgoff, NULL);
82785- else /* cases 3, 8 */
82786+
82787+#ifdef CONFIG_PAX_SEGMEXEC
82788+ if (!err && prev_m)
82789+ err = vma_adjust(prev_m, prev_m->vm_start,
82790+ addr_m, prev_m->vm_pgoff, NULL);
82791+#endif
82792+
82793+ } else { /* cases 3, 8 */
82794 err = vma_adjust(area, addr, next->vm_end,
82795 next->vm_pgoff - pglen, NULL);
82796+
82797+#ifdef CONFIG_PAX_SEGMEXEC
82798+ if (!err && area_m)
82799+ err = vma_adjust(area_m, addr_m, next_m->vm_end,
82800+ next_m->vm_pgoff - pglen, NULL);
82801+#endif
82802+
82803+ }
82804 if (err)
82805 return NULL;
82806 khugepaged_enter_vma_merge(area);
82807@@ -1165,8 +1246,10 @@ none:
82808 void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82809 struct file *file, long pages)
82810 {
82811- const unsigned long stack_flags
82812- = VM_STACK_FLAGS & (VM_GROWSUP|VM_GROWSDOWN);
82813+
82814+#ifdef CONFIG_PAX_RANDMMAP
82815+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82816+#endif
82817
82818 mm->total_vm += pages;
82819
82820@@ -1174,7 +1257,7 @@ void vm_stat_account(struct mm_struct *mm, unsigned long flags,
82821 mm->shared_vm += pages;
82822 if ((flags & (VM_EXEC|VM_WRITE)) == VM_EXEC)
82823 mm->exec_vm += pages;
82824- } else if (flags & stack_flags)
82825+ } else if (flags & (VM_GROWSUP|VM_GROWSDOWN))
82826 mm->stack_vm += pages;
82827 }
82828 #endif /* CONFIG_PROC_FS */
82829@@ -1213,7 +1296,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82830 * (the exception is when the underlying filesystem is noexec
82831 * mounted, in which case we dont add PROT_EXEC.)
82832 */
82833- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
82834+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
82835 if (!(file && (file->f_path.mnt->mnt_flags & MNT_NOEXEC)))
82836 prot |= PROT_EXEC;
82837
82838@@ -1239,7 +1322,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82839 /* Obtain the address to map to. we verify (or select) it and ensure
82840 * that it represents a valid section of the address space.
82841 */
82842- addr = get_unmapped_area(file, addr, len, pgoff, flags);
82843+ addr = get_unmapped_area(file, addr, len, pgoff, flags | ((prot & PROT_EXEC) ? MAP_EXECUTABLE : 0));
82844 if (addr & ~PAGE_MASK)
82845 return addr;
82846
82847@@ -1250,6 +1333,36 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82848 vm_flags = calc_vm_prot_bits(prot) | calc_vm_flag_bits(flags) |
82849 mm->def_flags | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC;
82850
82851+#ifdef CONFIG_PAX_MPROTECT
82852+ if (mm->pax_flags & MF_PAX_MPROTECT) {
82853+#ifndef CONFIG_PAX_MPROTECT_COMPAT
82854+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC)) {
82855+ gr_log_rwxmmap(file);
82856+
82857+#ifdef CONFIG_PAX_EMUPLT
82858+ vm_flags &= ~VM_EXEC;
82859+#else
82860+ return -EPERM;
82861+#endif
82862+
82863+ }
82864+
82865+ if (!(vm_flags & VM_EXEC))
82866+ vm_flags &= ~VM_MAYEXEC;
82867+#else
82868+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
82869+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
82870+#endif
82871+ else
82872+ vm_flags &= ~VM_MAYWRITE;
82873+ }
82874+#endif
82875+
82876+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82877+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && file)
82878+ vm_flags &= ~VM_PAGEEXEC;
82879+#endif
82880+
82881 if (flags & MAP_LOCKED)
82882 if (!can_do_mlock())
82883 return -EPERM;
82884@@ -1261,6 +1374,7 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82885 locked += mm->locked_vm;
82886 lock_limit = rlimit(RLIMIT_MEMLOCK);
82887 lock_limit >>= PAGE_SHIFT;
82888+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
82889 if (locked > lock_limit && !capable(CAP_IPC_LOCK))
82890 return -EAGAIN;
82891 }
82892@@ -1341,6 +1455,9 @@ unsigned long do_mmap_pgoff(struct file *file, unsigned long addr,
82893 vm_flags |= VM_NORESERVE;
82894 }
82895
82896+ if (!gr_acl_handle_mmap(file, prot))
82897+ return -EACCES;
82898+
82899 addr = mmap_region(file, addr, len, vm_flags, pgoff);
82900 if (!IS_ERR_VALUE(addr) &&
82901 ((vm_flags & VM_LOCKED) ||
82902@@ -1432,7 +1549,7 @@ int vma_wants_writenotify(struct vm_area_struct *vma)
82903 vm_flags_t vm_flags = vma->vm_flags;
82904
82905 /* If it was private or non-writable, the write bit is already clear */
82906- if ((vm_flags & (VM_WRITE|VM_SHARED)) != ((VM_WRITE|VM_SHARED)))
82907+ if ((vm_flags & (VM_WRITE|VM_SHARED)) != (VM_WRITE|VM_SHARED))
82908 return 0;
82909
82910 /* The backer wishes to know when pages are first written to? */
82911@@ -1480,7 +1597,22 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82912 unsigned long charged = 0;
82913 struct inode *inode = file ? file_inode(file) : NULL;
82914
82915+#ifdef CONFIG_PAX_SEGMEXEC
82916+ struct vm_area_struct *vma_m = NULL;
82917+#endif
82918+
82919+ /*
82920+ * mm->mmap_sem is required to protect against another thread
82921+ * changing the mappings in case we sleep.
82922+ */
82923+ verify_mm_writelocked(mm);
82924+
82925 /* Check against address space limit. */
82926+
82927+#ifdef CONFIG_PAX_RANDMMAP
82928+ if (!(mm->pax_flags & MF_PAX_RANDMMAP) || (vm_flags & (VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)))
82929+#endif
82930+
82931 if (!may_expand_vm(mm, len >> PAGE_SHIFT)) {
82932 unsigned long nr_pages;
82933
82934@@ -1499,11 +1631,10 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
82935
82936 /* Clear old maps */
82937 error = -ENOMEM;
82938-munmap_back:
82939 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
82940 if (do_munmap(mm, addr, len))
82941 return -ENOMEM;
82942- goto munmap_back;
82943+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
82944 }
82945
82946 /*
82947@@ -1534,6 +1665,16 @@ munmap_back:
82948 goto unacct_error;
82949 }
82950
82951+#ifdef CONFIG_PAX_SEGMEXEC
82952+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vm_flags & VM_EXEC)) {
82953+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
82954+ if (!vma_m) {
82955+ error = -ENOMEM;
82956+ goto free_vma;
82957+ }
82958+ }
82959+#endif
82960+
82961 vma->vm_mm = mm;
82962 vma->vm_start = addr;
82963 vma->vm_end = addr + len;
82964@@ -1558,6 +1699,13 @@ munmap_back:
82965 if (error)
82966 goto unmap_and_free_vma;
82967
82968+#if defined(CONFIG_PAX_PAGEEXEC) && defined(CONFIG_X86_32)
82969+ if ((mm->pax_flags & MF_PAX_PAGEEXEC) && !(vma->vm_flags & VM_SPECIAL)) {
82970+ vma->vm_flags |= VM_PAGEEXEC;
82971+ vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
82972+ }
82973+#endif
82974+
82975 /* Can addr have changed??
82976 *
82977 * Answer: Yes, several device drivers can do it in their
82978@@ -1596,6 +1744,11 @@ munmap_back:
82979 vma_link(mm, vma, prev, rb_link, rb_parent);
82980 file = vma->vm_file;
82981
82982+#ifdef CONFIG_PAX_SEGMEXEC
82983+ if (vma_m)
82984+ BUG_ON(pax_mirror_vma(vma_m, vma));
82985+#endif
82986+
82987 /* Once vma denies write, undo our temporary denial count */
82988 if (correct_wcount)
82989 atomic_inc(&inode->i_writecount);
82990@@ -1603,6 +1756,7 @@ out:
82991 perf_event_mmap(vma);
82992
82993 vm_stat_account(mm, vm_flags, file, len >> PAGE_SHIFT);
82994+ track_exec_limit(mm, addr, addr + len, vm_flags);
82995 if (vm_flags & VM_LOCKED) {
82996 if (!((vm_flags & VM_SPECIAL) || is_vm_hugetlb_page(vma) ||
82997 vma == get_gate_vma(current->mm)))
82998@@ -1626,6 +1780,12 @@ unmap_and_free_vma:
82999 unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
83000 charged = 0;
83001 free_vma:
83002+
83003+#ifdef CONFIG_PAX_SEGMEXEC
83004+ if (vma_m)
83005+ kmem_cache_free(vm_area_cachep, vma_m);
83006+#endif
83007+
83008 kmem_cache_free(vm_area_cachep, vma);
83009 unacct_error:
83010 if (charged)
83011@@ -1633,7 +1793,63 @@ unacct_error:
83012 return error;
83013 }
83014
83015-unsigned long unmapped_area(struct vm_unmapped_area_info *info)
83016+#ifdef CONFIG_GRKERNSEC_RAND_THREADSTACK
83017+unsigned long gr_rand_threadstack_offset(const struct mm_struct *mm, const struct file *filp, unsigned long flags)
83018+{
83019+ if ((mm->pax_flags & MF_PAX_RANDMMAP) && !filp && (flags & MAP_STACK))
83020+ return ((prandom_u32() & 0xFF) + 1) << PAGE_SHIFT;
83021+
83022+ return 0;
83023+}
83024+#endif
83025+
83026+bool check_heap_stack_gap(const struct vm_area_struct *vma, unsigned long addr, unsigned long len, unsigned long offset)
83027+{
83028+ if (!vma) {
83029+#ifdef CONFIG_STACK_GROWSUP
83030+ if (addr > sysctl_heap_stack_gap)
83031+ vma = find_vma(current->mm, addr - sysctl_heap_stack_gap);
83032+ else
83033+ vma = find_vma(current->mm, 0);
83034+ if (vma && (vma->vm_flags & VM_GROWSUP))
83035+ return false;
83036+#endif
83037+ return true;
83038+ }
83039+
83040+ if (addr + len > vma->vm_start)
83041+ return false;
83042+
83043+ if (vma->vm_flags & VM_GROWSDOWN)
83044+ return sysctl_heap_stack_gap <= vma->vm_start - addr - len;
83045+#ifdef CONFIG_STACK_GROWSUP
83046+ else if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP))
83047+ return addr - vma->vm_prev->vm_end >= sysctl_heap_stack_gap;
83048+#endif
83049+ else if (offset)
83050+ return offset <= vma->vm_start - addr - len;
83051+
83052+ return true;
83053+}
83054+
83055+unsigned long skip_heap_stack_gap(const struct vm_area_struct *vma, unsigned long len, unsigned long offset)
83056+{
83057+ if (vma->vm_start < len)
83058+ return -ENOMEM;
83059+
83060+ if (!(vma->vm_flags & VM_GROWSDOWN)) {
83061+ if (offset <= vma->vm_start - len)
83062+ return vma->vm_start - len - offset;
83063+ else
83064+ return -ENOMEM;
83065+ }
83066+
83067+ if (sysctl_heap_stack_gap <= vma->vm_start - len)
83068+ return vma->vm_start - len - sysctl_heap_stack_gap;
83069+ return -ENOMEM;
83070+}
83071+
83072+unsigned long unmapped_area(const struct vm_unmapped_area_info *info)
83073 {
83074 /*
83075 * We implement the search by looking for an rbtree node that
83076@@ -1681,11 +1897,29 @@ unsigned long unmapped_area(struct vm_unmapped_area_info *info)
83077 }
83078 }
83079
83080- gap_start = vma->vm_prev ? vma->vm_prev->vm_end : 0;
83081+ gap_start = vma->vm_prev ? vma->vm_prev->vm_end: 0;
83082 check_current:
83083 /* Check if current node has a suitable gap */
83084 if (gap_start > high_limit)
83085 return -ENOMEM;
83086+
83087+ if (gap_end - gap_start > info->threadstack_offset)
83088+ gap_start += info->threadstack_offset;
83089+ else
83090+ gap_start = gap_end;
83091+
83092+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
83093+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83094+ gap_start += sysctl_heap_stack_gap;
83095+ else
83096+ gap_start = gap_end;
83097+ }
83098+ if (vma->vm_flags & VM_GROWSDOWN) {
83099+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83100+ gap_end -= sysctl_heap_stack_gap;
83101+ else
83102+ gap_end = gap_start;
83103+ }
83104 if (gap_end >= low_limit && gap_end - gap_start >= length)
83105 goto found;
83106
83107@@ -1735,7 +1969,7 @@ found:
83108 return gap_start;
83109 }
83110
83111-unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
83112+unsigned long unmapped_area_topdown(const struct vm_unmapped_area_info *info)
83113 {
83114 struct mm_struct *mm = current->mm;
83115 struct vm_area_struct *vma;
83116@@ -1789,6 +2023,24 @@ check_current:
83117 gap_end = vma->vm_start;
83118 if (gap_end < low_limit)
83119 return -ENOMEM;
83120+
83121+ if (gap_end - gap_start > info->threadstack_offset)
83122+ gap_end -= info->threadstack_offset;
83123+ else
83124+ gap_end = gap_start;
83125+
83126+ if (vma->vm_prev && (vma->vm_prev->vm_flags & VM_GROWSUP)) {
83127+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83128+ gap_start += sysctl_heap_stack_gap;
83129+ else
83130+ gap_start = gap_end;
83131+ }
83132+ if (vma->vm_flags & VM_GROWSDOWN) {
83133+ if (gap_end - gap_start > sysctl_heap_stack_gap)
83134+ gap_end -= sysctl_heap_stack_gap;
83135+ else
83136+ gap_end = gap_start;
83137+ }
83138 if (gap_start <= high_limit && gap_end - gap_start >= length)
83139 goto found;
83140
83141@@ -1852,6 +2104,7 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83142 struct mm_struct *mm = current->mm;
83143 struct vm_area_struct *vma;
83144 struct vm_unmapped_area_info info;
83145+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
83146
83147 if (len > TASK_SIZE)
83148 return -ENOMEM;
83149@@ -1859,29 +2112,45 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
83150 if (flags & MAP_FIXED)
83151 return addr;
83152
83153+#ifdef CONFIG_PAX_RANDMMAP
83154+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
83155+#endif
83156+
83157 if (addr) {
83158 addr = PAGE_ALIGN(addr);
83159 vma = find_vma(mm, addr);
83160- if (TASK_SIZE - len >= addr &&
83161- (!vma || addr + len <= vma->vm_start))
83162+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
83163 return addr;
83164 }
83165
83166 info.flags = 0;
83167 info.length = len;
83168 info.low_limit = TASK_UNMAPPED_BASE;
83169+
83170+#ifdef CONFIG_PAX_RANDMMAP
83171+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83172+ info.low_limit += mm->delta_mmap;
83173+#endif
83174+
83175 info.high_limit = TASK_SIZE;
83176 info.align_mask = 0;
83177+ info.threadstack_offset = offset;
83178 return vm_unmapped_area(&info);
83179 }
83180 #endif
83181
83182 void arch_unmap_area(struct mm_struct *mm, unsigned long addr)
83183 {
83184+
83185+#ifdef CONFIG_PAX_SEGMEXEC
83186+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
83187+ return;
83188+#endif
83189+
83190 /*
83191 * Is this a new hole at the lowest possible address?
83192 */
83193- if (addr >= TASK_UNMAPPED_BASE && addr < mm->free_area_cache)
83194+ if (addr >= mm->mmap_base && addr < mm->free_area_cache)
83195 mm->free_area_cache = addr;
83196 }
83197
83198@@ -1899,6 +2168,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83199 struct mm_struct *mm = current->mm;
83200 unsigned long addr = addr0;
83201 struct vm_unmapped_area_info info;
83202+ unsigned long offset = gr_rand_threadstack_offset(mm, filp, flags);
83203
83204 /* requested length too big for entire address space */
83205 if (len > TASK_SIZE)
83206@@ -1907,12 +2177,15 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83207 if (flags & MAP_FIXED)
83208 return addr;
83209
83210+#ifdef CONFIG_PAX_RANDMMAP
83211+ if (!(mm->pax_flags & MF_PAX_RANDMMAP))
83212+#endif
83213+
83214 /* requesting a specific address */
83215 if (addr) {
83216 addr = PAGE_ALIGN(addr);
83217 vma = find_vma(mm, addr);
83218- if (TASK_SIZE - len >= addr &&
83219- (!vma || addr + len <= vma->vm_start))
83220+ if (TASK_SIZE - len >= addr && check_heap_stack_gap(vma, addr, len, offset))
83221 return addr;
83222 }
83223
83224@@ -1921,6 +2194,7 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83225 info.low_limit = PAGE_SIZE;
83226 info.high_limit = mm->mmap_base;
83227 info.align_mask = 0;
83228+ info.threadstack_offset = offset;
83229 addr = vm_unmapped_area(&info);
83230
83231 /*
83232@@ -1933,6 +2207,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83233 VM_BUG_ON(addr != -ENOMEM);
83234 info.flags = 0;
83235 info.low_limit = TASK_UNMAPPED_BASE;
83236+
83237+#ifdef CONFIG_PAX_RANDMMAP
83238+ if (mm->pax_flags & MF_PAX_RANDMMAP)
83239+ info.low_limit += mm->delta_mmap;
83240+#endif
83241+
83242 info.high_limit = TASK_SIZE;
83243 addr = vm_unmapped_area(&info);
83244 }
83245@@ -1943,6 +2223,12 @@ arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0,
83246
83247 void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
83248 {
83249+
83250+#ifdef CONFIG_PAX_SEGMEXEC
83251+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && SEGMEXEC_TASK_SIZE <= addr)
83252+ return;
83253+#endif
83254+
83255 /*
83256 * Is this a new hole at the highest possible address?
83257 */
83258@@ -1950,8 +2236,10 @@ void arch_unmap_area_topdown(struct mm_struct *mm, unsigned long addr)
83259 mm->free_area_cache = addr;
83260
83261 /* dont allow allocations above current base */
83262- if (mm->free_area_cache > mm->mmap_base)
83263+ if (mm->free_area_cache > mm->mmap_base) {
83264 mm->free_area_cache = mm->mmap_base;
83265+ mm->cached_hole_size = ~0UL;
83266+ }
83267 }
83268
83269 unsigned long
83270@@ -2047,6 +2335,28 @@ find_vma_prev(struct mm_struct *mm, unsigned long addr,
83271 return vma;
83272 }
83273
83274+#ifdef CONFIG_PAX_SEGMEXEC
83275+struct vm_area_struct *pax_find_mirror_vma(struct vm_area_struct *vma)
83276+{
83277+ struct vm_area_struct *vma_m;
83278+
83279+ BUG_ON(!vma || vma->vm_start >= vma->vm_end);
83280+ if (!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC)) {
83281+ BUG_ON(vma->vm_mirror);
83282+ return NULL;
83283+ }
83284+ BUG_ON(vma->vm_start < SEGMEXEC_TASK_SIZE && SEGMEXEC_TASK_SIZE < vma->vm_end);
83285+ vma_m = vma->vm_mirror;
83286+ BUG_ON(!vma_m || vma_m->vm_mirror != vma);
83287+ BUG_ON(vma->vm_file != vma_m->vm_file);
83288+ BUG_ON(vma->vm_end - vma->vm_start != vma_m->vm_end - vma_m->vm_start);
83289+ BUG_ON(vma->vm_pgoff != vma_m->vm_pgoff);
83290+ BUG_ON(vma->anon_vma != vma_m->anon_vma && vma->anon_vma->root != vma_m->anon_vma->root);
83291+ BUG_ON((vma->vm_flags ^ vma_m->vm_flags) & ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED));
83292+ return vma_m;
83293+}
83294+#endif
83295+
83296 /*
83297 * Verify that the stack growth is acceptable and
83298 * update accounting. This is shared with both the
83299@@ -2063,6 +2373,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83300 return -ENOMEM;
83301
83302 /* Stack limit test */
83303+ gr_learn_resource(current, RLIMIT_STACK, size, 1);
83304 if (size > ACCESS_ONCE(rlim[RLIMIT_STACK].rlim_cur))
83305 return -ENOMEM;
83306
83307@@ -2073,6 +2384,7 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83308 locked = mm->locked_vm + grow;
83309 limit = ACCESS_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
83310 limit >>= PAGE_SHIFT;
83311+ gr_learn_resource(current, RLIMIT_MEMLOCK, locked << PAGE_SHIFT, 1);
83312 if (locked > limit && !capable(CAP_IPC_LOCK))
83313 return -ENOMEM;
83314 }
83315@@ -2102,37 +2414,48 @@ static int acct_stack_growth(struct vm_area_struct *vma, unsigned long size, uns
83316 * PA-RISC uses this for its stack; IA64 for its Register Backing Store.
83317 * vma is the last one with address > vma->vm_end. Have to extend vma.
83318 */
83319+#ifndef CONFIG_IA64
83320+static
83321+#endif
83322 int expand_upwards(struct vm_area_struct *vma, unsigned long address)
83323 {
83324 int error;
83325+ bool locknext;
83326
83327 if (!(vma->vm_flags & VM_GROWSUP))
83328 return -EFAULT;
83329
83330+ /* Also guard against wrapping around to address 0. */
83331+ if (address < PAGE_ALIGN(address+1))
83332+ address = PAGE_ALIGN(address+1);
83333+ else
83334+ return -ENOMEM;
83335+
83336 /*
83337 * We must make sure the anon_vma is allocated
83338 * so that the anon_vma locking is not a noop.
83339 */
83340 if (unlikely(anon_vma_prepare(vma)))
83341 return -ENOMEM;
83342+ locknext = vma->vm_next && (vma->vm_next->vm_flags & VM_GROWSDOWN);
83343+ if (locknext && anon_vma_prepare(vma->vm_next))
83344+ return -ENOMEM;
83345 vma_lock_anon_vma(vma);
83346+ if (locknext)
83347+ vma_lock_anon_vma(vma->vm_next);
83348
83349 /*
83350 * vma->vm_start/vm_end cannot change under us because the caller
83351 * is required to hold the mmap_sem in read mode. We need the
83352- * anon_vma lock to serialize against concurrent expand_stacks.
83353- * Also guard against wrapping around to address 0.
83354+ * anon_vma locks to serialize against concurrent expand_stacks
83355+ * and expand_upwards.
83356 */
83357- if (address < PAGE_ALIGN(address+4))
83358- address = PAGE_ALIGN(address+4);
83359- else {
83360- vma_unlock_anon_vma(vma);
83361- return -ENOMEM;
83362- }
83363 error = 0;
83364
83365 /* Somebody else might have raced and expanded it already */
83366- if (address > vma->vm_end) {
83367+ if (vma->vm_next && (vma->vm_next->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && vma->vm_next->vm_start - address < sysctl_heap_stack_gap)
83368+ error = -ENOMEM;
83369+ else if (address > vma->vm_end && (!locknext || vma->vm_next->vm_start >= address)) {
83370 unsigned long size, grow;
83371
83372 size = address - vma->vm_start;
83373@@ -2167,6 +2490,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
83374 }
83375 }
83376 }
83377+ if (locknext)
83378+ vma_unlock_anon_vma(vma->vm_next);
83379 vma_unlock_anon_vma(vma);
83380 khugepaged_enter_vma_merge(vma);
83381 validate_mm(vma->vm_mm);
83382@@ -2181,6 +2506,8 @@ int expand_downwards(struct vm_area_struct *vma,
83383 unsigned long address)
83384 {
83385 int error;
83386+ bool lockprev = false;
83387+ struct vm_area_struct *prev;
83388
83389 /*
83390 * We must make sure the anon_vma is allocated
83391@@ -2194,6 +2521,15 @@ int expand_downwards(struct vm_area_struct *vma,
83392 if (error)
83393 return error;
83394
83395+ prev = vma->vm_prev;
83396+#if defined(CONFIG_STACK_GROWSUP) || defined(CONFIG_IA64)
83397+ lockprev = prev && (prev->vm_flags & VM_GROWSUP);
83398+#endif
83399+ if (lockprev && anon_vma_prepare(prev))
83400+ return -ENOMEM;
83401+ if (lockprev)
83402+ vma_lock_anon_vma(prev);
83403+
83404 vma_lock_anon_vma(vma);
83405
83406 /*
83407@@ -2203,9 +2539,17 @@ int expand_downwards(struct vm_area_struct *vma,
83408 */
83409
83410 /* Somebody else might have raced and expanded it already */
83411- if (address < vma->vm_start) {
83412+ if (prev && (prev->vm_flags & (VM_READ | VM_WRITE | VM_EXEC)) && address - prev->vm_end < sysctl_heap_stack_gap)
83413+ error = -ENOMEM;
83414+ else if (address < vma->vm_start && (!lockprev || prev->vm_end <= address)) {
83415 unsigned long size, grow;
83416
83417+#ifdef CONFIG_PAX_SEGMEXEC
83418+ struct vm_area_struct *vma_m;
83419+
83420+ vma_m = pax_find_mirror_vma(vma);
83421+#endif
83422+
83423 size = vma->vm_end - address;
83424 grow = (vma->vm_start - address) >> PAGE_SHIFT;
83425
83426@@ -2230,13 +2574,27 @@ int expand_downwards(struct vm_area_struct *vma,
83427 vma->vm_pgoff -= grow;
83428 anon_vma_interval_tree_post_update_vma(vma);
83429 vma_gap_update(vma);
83430+
83431+#ifdef CONFIG_PAX_SEGMEXEC
83432+ if (vma_m) {
83433+ anon_vma_interval_tree_pre_update_vma(vma_m);
83434+ vma_m->vm_start -= grow << PAGE_SHIFT;
83435+ vma_m->vm_pgoff -= grow;
83436+ anon_vma_interval_tree_post_update_vma(vma_m);
83437+ vma_gap_update(vma_m);
83438+ }
83439+#endif
83440+
83441 spin_unlock(&vma->vm_mm->page_table_lock);
83442
83443+ track_exec_limit(vma->vm_mm, vma->vm_start, vma->vm_end, vma->vm_flags);
83444 perf_event_mmap(vma);
83445 }
83446 }
83447 }
83448 vma_unlock_anon_vma(vma);
83449+ if (lockprev)
83450+ vma_unlock_anon_vma(prev);
83451 khugepaged_enter_vma_merge(vma);
83452 validate_mm(vma->vm_mm);
83453 return error;
83454@@ -2334,6 +2692,13 @@ static void remove_vma_list(struct mm_struct *mm, struct vm_area_struct *vma)
83455 do {
83456 long nrpages = vma_pages(vma);
83457
83458+#ifdef CONFIG_PAX_SEGMEXEC
83459+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_start >= SEGMEXEC_TASK_SIZE)) {
83460+ vma = remove_vma(vma);
83461+ continue;
83462+ }
83463+#endif
83464+
83465 if (vma->vm_flags & VM_ACCOUNT)
83466 nr_accounted += nrpages;
83467 vm_stat_account(mm, vma->vm_flags, vma->vm_file, -nrpages);
83468@@ -2379,6 +2744,16 @@ detach_vmas_to_be_unmapped(struct mm_struct *mm, struct vm_area_struct *vma,
83469 insertion_point = (prev ? &prev->vm_next : &mm->mmap);
83470 vma->vm_prev = NULL;
83471 do {
83472+
83473+#ifdef CONFIG_PAX_SEGMEXEC
83474+ if (vma->vm_mirror) {
83475+ BUG_ON(!vma->vm_mirror->vm_mirror || vma->vm_mirror->vm_mirror != vma);
83476+ vma->vm_mirror->vm_mirror = NULL;
83477+ vma->vm_mirror->vm_flags &= ~VM_EXEC;
83478+ vma->vm_mirror = NULL;
83479+ }
83480+#endif
83481+
83482 vma_rb_erase(vma, &mm->mm_rb);
83483 mm->map_count--;
83484 tail_vma = vma;
83485@@ -2410,14 +2785,33 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83486 struct vm_area_struct *new;
83487 int err = -ENOMEM;
83488
83489+#ifdef CONFIG_PAX_SEGMEXEC
83490+ struct vm_area_struct *vma_m, *new_m = NULL;
83491+ unsigned long addr_m = addr + SEGMEXEC_TASK_SIZE;
83492+#endif
83493+
83494 if (is_vm_hugetlb_page(vma) && (addr &
83495 ~(huge_page_mask(hstate_vma(vma)))))
83496 return -EINVAL;
83497
83498+#ifdef CONFIG_PAX_SEGMEXEC
83499+ vma_m = pax_find_mirror_vma(vma);
83500+#endif
83501+
83502 new = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83503 if (!new)
83504 goto out_err;
83505
83506+#ifdef CONFIG_PAX_SEGMEXEC
83507+ if (vma_m) {
83508+ new_m = kmem_cache_alloc(vm_area_cachep, GFP_KERNEL);
83509+ if (!new_m) {
83510+ kmem_cache_free(vm_area_cachep, new);
83511+ goto out_err;
83512+ }
83513+ }
83514+#endif
83515+
83516 /* most fields are the same, copy all, and then fixup */
83517 *new = *vma;
83518
83519@@ -2430,6 +2824,22 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83520 new->vm_pgoff += ((addr - vma->vm_start) >> PAGE_SHIFT);
83521 }
83522
83523+#ifdef CONFIG_PAX_SEGMEXEC
83524+ if (vma_m) {
83525+ *new_m = *vma_m;
83526+ INIT_LIST_HEAD(&new_m->anon_vma_chain);
83527+ new_m->vm_mirror = new;
83528+ new->vm_mirror = new_m;
83529+
83530+ if (new_below)
83531+ new_m->vm_end = addr_m;
83532+ else {
83533+ new_m->vm_start = addr_m;
83534+ new_m->vm_pgoff += ((addr_m - vma_m->vm_start) >> PAGE_SHIFT);
83535+ }
83536+ }
83537+#endif
83538+
83539 pol = mpol_dup(vma_policy(vma));
83540 if (IS_ERR(pol)) {
83541 err = PTR_ERR(pol);
83542@@ -2452,6 +2862,36 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83543 else
83544 err = vma_adjust(vma, vma->vm_start, addr, vma->vm_pgoff, new);
83545
83546+#ifdef CONFIG_PAX_SEGMEXEC
83547+ if (!err && vma_m) {
83548+ if (anon_vma_clone(new_m, vma_m))
83549+ goto out_free_mpol;
83550+
83551+ mpol_get(pol);
83552+ vma_set_policy(new_m, pol);
83553+
83554+ if (new_m->vm_file)
83555+ get_file(new_m->vm_file);
83556+
83557+ if (new_m->vm_ops && new_m->vm_ops->open)
83558+ new_m->vm_ops->open(new_m);
83559+
83560+ if (new_below)
83561+ err = vma_adjust(vma_m, addr_m, vma_m->vm_end, vma_m->vm_pgoff +
83562+ ((addr_m - new_m->vm_start) >> PAGE_SHIFT), new_m);
83563+ else
83564+ err = vma_adjust(vma_m, vma_m->vm_start, addr_m, vma_m->vm_pgoff, new_m);
83565+
83566+ if (err) {
83567+ if (new_m->vm_ops && new_m->vm_ops->close)
83568+ new_m->vm_ops->close(new_m);
83569+ if (new_m->vm_file)
83570+ fput(new_m->vm_file);
83571+ mpol_put(pol);
83572+ }
83573+ }
83574+#endif
83575+
83576 /* Success. */
83577 if (!err)
83578 return 0;
83579@@ -2461,10 +2901,18 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83580 new->vm_ops->close(new);
83581 if (new->vm_file)
83582 fput(new->vm_file);
83583- unlink_anon_vmas(new);
83584 out_free_mpol:
83585 mpol_put(pol);
83586 out_free_vma:
83587+
83588+#ifdef CONFIG_PAX_SEGMEXEC
83589+ if (new_m) {
83590+ unlink_anon_vmas(new_m);
83591+ kmem_cache_free(vm_area_cachep, new_m);
83592+ }
83593+#endif
83594+
83595+ unlink_anon_vmas(new);
83596 kmem_cache_free(vm_area_cachep, new);
83597 out_err:
83598 return err;
83599@@ -2477,6 +2925,15 @@ static int __split_vma(struct mm_struct * mm, struct vm_area_struct * vma,
83600 int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83601 unsigned long addr, int new_below)
83602 {
83603+
83604+#ifdef CONFIG_PAX_SEGMEXEC
83605+ if (mm->pax_flags & MF_PAX_SEGMEXEC) {
83606+ BUG_ON(vma->vm_end > SEGMEXEC_TASK_SIZE);
83607+ if (mm->map_count >= sysctl_max_map_count-1)
83608+ return -ENOMEM;
83609+ } else
83610+#endif
83611+
83612 if (mm->map_count >= sysctl_max_map_count)
83613 return -ENOMEM;
83614
83615@@ -2488,11 +2945,30 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
83616 * work. This now handles partial unmappings.
83617 * Jeremy Fitzhardinge <jeremy@goop.org>
83618 */
83619+#ifdef CONFIG_PAX_SEGMEXEC
83620 int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83621 {
83622+ int ret = __do_munmap(mm, start, len);
83623+ if (ret || !(mm->pax_flags & MF_PAX_SEGMEXEC))
83624+ return ret;
83625+
83626+ return __do_munmap(mm, start + SEGMEXEC_TASK_SIZE, len);
83627+}
83628+
83629+int __do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83630+#else
83631+int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83632+#endif
83633+{
83634 unsigned long end;
83635 struct vm_area_struct *vma, *prev, *last;
83636
83637+ /*
83638+ * mm->mmap_sem is required to protect against another thread
83639+ * changing the mappings in case we sleep.
83640+ */
83641+ verify_mm_writelocked(mm);
83642+
83643 if ((start & ~PAGE_MASK) || start > TASK_SIZE || len > TASK_SIZE-start)
83644 return -EINVAL;
83645
83646@@ -2567,6 +3043,8 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
83647 /* Fix up all other VM information */
83648 remove_vma_list(mm, vma);
83649
83650+ track_exec_limit(mm, start, end, 0UL);
83651+
83652 return 0;
83653 }
83654
83655@@ -2575,6 +3053,13 @@ int vm_munmap(unsigned long start, size_t len)
83656 int ret;
83657 struct mm_struct *mm = current->mm;
83658
83659+
83660+#ifdef CONFIG_PAX_SEGMEXEC
83661+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) &&
83662+ (len > SEGMEXEC_TASK_SIZE || start > SEGMEXEC_TASK_SIZE-len))
83663+ return -EINVAL;
83664+#endif
83665+
83666 down_write(&mm->mmap_sem);
83667 ret = do_munmap(mm, start, len);
83668 up_write(&mm->mmap_sem);
83669@@ -2588,16 +3073,6 @@ SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
83670 return vm_munmap(addr, len);
83671 }
83672
83673-static inline void verify_mm_writelocked(struct mm_struct *mm)
83674-{
83675-#ifdef CONFIG_DEBUG_VM
83676- if (unlikely(down_read_trylock(&mm->mmap_sem))) {
83677- WARN_ON(1);
83678- up_read(&mm->mmap_sem);
83679- }
83680-#endif
83681-}
83682-
83683 /*
83684 * this is really a simplified "do_mmap". it only handles
83685 * anonymous maps. eventually we may be able to do some
83686@@ -2611,6 +3086,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83687 struct rb_node ** rb_link, * rb_parent;
83688 pgoff_t pgoff = addr >> PAGE_SHIFT;
83689 int error;
83690+ unsigned long charged;
83691
83692 len = PAGE_ALIGN(len);
83693 if (!len)
83694@@ -2618,16 +3094,30 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83695
83696 flags = VM_DATA_DEFAULT_FLAGS | VM_ACCOUNT | mm->def_flags;
83697
83698+#if defined(CONFIG_PAX_PAGEEXEC) || defined(CONFIG_PAX_SEGMEXEC)
83699+ if (mm->pax_flags & (MF_PAX_PAGEEXEC | MF_PAX_SEGMEXEC)) {
83700+ flags &= ~VM_EXEC;
83701+
83702+#ifdef CONFIG_PAX_MPROTECT
83703+ if (mm->pax_flags & MF_PAX_MPROTECT)
83704+ flags &= ~VM_MAYEXEC;
83705+#endif
83706+
83707+ }
83708+#endif
83709+
83710 error = get_unmapped_area(NULL, addr, len, 0, MAP_FIXED);
83711 if (error & ~PAGE_MASK)
83712 return error;
83713
83714+ charged = len >> PAGE_SHIFT;
83715+
83716 /*
83717 * mlock MCL_FUTURE?
83718 */
83719 if (mm->def_flags & VM_LOCKED) {
83720 unsigned long locked, lock_limit;
83721- locked = len >> PAGE_SHIFT;
83722+ locked = charged;
83723 locked += mm->locked_vm;
83724 lock_limit = rlimit(RLIMIT_MEMLOCK);
83725 lock_limit >>= PAGE_SHIFT;
83726@@ -2644,21 +3134,20 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83727 /*
83728 * Clear old maps. this also does some error checking for us
83729 */
83730- munmap_back:
83731 if (find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent)) {
83732 if (do_munmap(mm, addr, len))
83733 return -ENOMEM;
83734- goto munmap_back;
83735+ BUG_ON(find_vma_links(mm, addr, addr + len, &prev, &rb_link, &rb_parent));
83736 }
83737
83738 /* Check against address space limits *after* clearing old maps... */
83739- if (!may_expand_vm(mm, len >> PAGE_SHIFT))
83740+ if (!may_expand_vm(mm, charged))
83741 return -ENOMEM;
83742
83743 if (mm->map_count > sysctl_max_map_count)
83744 return -ENOMEM;
83745
83746- if (security_vm_enough_memory_mm(mm, len >> PAGE_SHIFT))
83747+ if (security_vm_enough_memory_mm(mm, charged))
83748 return -ENOMEM;
83749
83750 /* Can we just expand an old private anonymous mapping? */
83751@@ -2672,7 +3161,7 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83752 */
83753 vma = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83754 if (!vma) {
83755- vm_unacct_memory(len >> PAGE_SHIFT);
83756+ vm_unacct_memory(charged);
83757 return -ENOMEM;
83758 }
83759
83760@@ -2686,9 +3175,10 @@ static unsigned long do_brk(unsigned long addr, unsigned long len)
83761 vma_link(mm, vma, prev, rb_link, rb_parent);
83762 out:
83763 perf_event_mmap(vma);
83764- mm->total_vm += len >> PAGE_SHIFT;
83765+ mm->total_vm += charged;
83766 if (flags & VM_LOCKED)
83767- mm->locked_vm += (len >> PAGE_SHIFT);
83768+ mm->locked_vm += charged;
83769+ track_exec_limit(mm, addr, addr + len, flags);
83770 return addr;
83771 }
83772
83773@@ -2750,6 +3240,7 @@ void exit_mmap(struct mm_struct *mm)
83774 while (vma) {
83775 if (vma->vm_flags & VM_ACCOUNT)
83776 nr_accounted += vma_pages(vma);
83777+ vma->vm_mirror = NULL;
83778 vma = remove_vma(vma);
83779 }
83780 vm_unacct_memory(nr_accounted);
83781@@ -2766,6 +3257,13 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83782 struct vm_area_struct *prev;
83783 struct rb_node **rb_link, *rb_parent;
83784
83785+#ifdef CONFIG_PAX_SEGMEXEC
83786+ struct vm_area_struct *vma_m = NULL;
83787+#endif
83788+
83789+ if (security_mmap_addr(vma->vm_start))
83790+ return -EPERM;
83791+
83792 /*
83793 * The vm_pgoff of a purely anonymous vma should be irrelevant
83794 * until its first write fault, when page's anon_vma and index
83795@@ -2789,7 +3287,21 @@ int insert_vm_struct(struct mm_struct *mm, struct vm_area_struct *vma)
83796 security_vm_enough_memory_mm(mm, vma_pages(vma)))
83797 return -ENOMEM;
83798
83799+#ifdef CONFIG_PAX_SEGMEXEC
83800+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (vma->vm_flags & VM_EXEC)) {
83801+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
83802+ if (!vma_m)
83803+ return -ENOMEM;
83804+ }
83805+#endif
83806+
83807 vma_link(mm, vma, prev, rb_link, rb_parent);
83808+
83809+#ifdef CONFIG_PAX_SEGMEXEC
83810+ if (vma_m)
83811+ BUG_ON(pax_mirror_vma(vma_m, vma));
83812+#endif
83813+
83814 return 0;
83815 }
83816
83817@@ -2809,6 +3321,8 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83818 struct mempolicy *pol;
83819 bool faulted_in_anon_vma = true;
83820
83821+ BUG_ON(vma->vm_mirror);
83822+
83823 /*
83824 * If anonymous vma has not yet been faulted, update new pgoff
83825 * to match new location, to increase its chance of merging.
83826@@ -2875,6 +3389,39 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
83827 return NULL;
83828 }
83829
83830+#ifdef CONFIG_PAX_SEGMEXEC
83831+long pax_mirror_vma(struct vm_area_struct *vma_m, struct vm_area_struct *vma)
83832+{
83833+ struct vm_area_struct *prev_m;
83834+ struct rb_node **rb_link_m, *rb_parent_m;
83835+ struct mempolicy *pol_m;
83836+
83837+ BUG_ON(!(vma->vm_mm->pax_flags & MF_PAX_SEGMEXEC) || !(vma->vm_flags & VM_EXEC));
83838+ BUG_ON(vma->vm_mirror || vma_m->vm_mirror);
83839+ BUG_ON(!mpol_equal(vma_policy(vma), vma_policy(vma_m)));
83840+ *vma_m = *vma;
83841+ INIT_LIST_HEAD(&vma_m->anon_vma_chain);
83842+ if (anon_vma_clone(vma_m, vma))
83843+ return -ENOMEM;
83844+ pol_m = vma_policy(vma_m);
83845+ mpol_get(pol_m);
83846+ vma_set_policy(vma_m, pol_m);
83847+ vma_m->vm_start += SEGMEXEC_TASK_SIZE;
83848+ vma_m->vm_end += SEGMEXEC_TASK_SIZE;
83849+ vma_m->vm_flags &= ~(VM_WRITE | VM_MAYWRITE | VM_ACCOUNT | VM_LOCKED);
83850+ vma_m->vm_page_prot = vm_get_page_prot(vma_m->vm_flags);
83851+ if (vma_m->vm_file)
83852+ get_file(vma_m->vm_file);
83853+ if (vma_m->vm_ops && vma_m->vm_ops->open)
83854+ vma_m->vm_ops->open(vma_m);
83855+ BUG_ON(find_vma_links(vma->vm_mm, vma_m->vm_start, vma_m->vm_end, &prev_m, &rb_link_m, &rb_parent_m));
83856+ vma_link(vma->vm_mm, vma_m, prev_m, rb_link_m, rb_parent_m);
83857+ vma_m->vm_mirror = vma;
83858+ vma->vm_mirror = vma_m;
83859+ return 0;
83860+}
83861+#endif
83862+
83863 /*
83864 * Return true if the calling process may expand its vm space by the passed
83865 * number of pages
83866@@ -2886,6 +3433,7 @@ int may_expand_vm(struct mm_struct *mm, unsigned long npages)
83867
83868 lim = rlimit(RLIMIT_AS) >> PAGE_SHIFT;
83869
83870+ gr_learn_resource(current, RLIMIT_AS, (cur + npages) << PAGE_SHIFT, 1);
83871 if (cur + npages > lim)
83872 return 0;
83873 return 1;
83874@@ -2956,6 +3504,22 @@ int install_special_mapping(struct mm_struct *mm,
83875 vma->vm_start = addr;
83876 vma->vm_end = addr + len;
83877
83878+#ifdef CONFIG_PAX_MPROTECT
83879+ if (mm->pax_flags & MF_PAX_MPROTECT) {
83880+#ifndef CONFIG_PAX_MPROTECT_COMPAT
83881+ if ((vm_flags & (VM_WRITE | VM_EXEC)) == (VM_WRITE | VM_EXEC))
83882+ return -EPERM;
83883+ if (!(vm_flags & VM_EXEC))
83884+ vm_flags &= ~VM_MAYEXEC;
83885+#else
83886+ if ((vm_flags & (VM_WRITE | VM_EXEC)) != VM_EXEC)
83887+ vm_flags &= ~(VM_EXEC | VM_MAYEXEC);
83888+#endif
83889+ else
83890+ vm_flags &= ~VM_MAYWRITE;
83891+ }
83892+#endif
83893+
83894 vma->vm_flags = vm_flags | mm->def_flags | VM_DONTEXPAND;
83895 vma->vm_page_prot = vm_get_page_prot(vma->vm_flags);
83896
83897diff --git a/mm/mprotect.c b/mm/mprotect.c
83898index 94722a4..07d9926 100644
83899--- a/mm/mprotect.c
83900+++ b/mm/mprotect.c
83901@@ -23,10 +23,18 @@
83902 #include <linux/mmu_notifier.h>
83903 #include <linux/migrate.h>
83904 #include <linux/perf_event.h>
83905+#include <linux/sched/sysctl.h>
83906+
83907+#ifdef CONFIG_PAX_MPROTECT
83908+#include <linux/elf.h>
83909+#include <linux/binfmts.h>
83910+#endif
83911+
83912 #include <asm/uaccess.h>
83913 #include <asm/pgtable.h>
83914 #include <asm/cacheflush.h>
83915 #include <asm/tlbflush.h>
83916+#include <asm/mmu_context.h>
83917
83918 #ifndef pgprot_modify
83919 static inline pgprot_t pgprot_modify(pgprot_t oldprot, pgprot_t newprot)
83920@@ -233,6 +241,48 @@ unsigned long change_protection(struct vm_area_struct *vma, unsigned long start,
83921 return pages;
83922 }
83923
83924+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
83925+/* called while holding the mmap semaphor for writing except stack expansion */
83926+void track_exec_limit(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long prot)
83927+{
83928+ unsigned long oldlimit, newlimit = 0UL;
83929+
83930+ if (!(mm->pax_flags & MF_PAX_PAGEEXEC) || (__supported_pte_mask & _PAGE_NX))
83931+ return;
83932+
83933+ spin_lock(&mm->page_table_lock);
83934+ oldlimit = mm->context.user_cs_limit;
83935+ if ((prot & VM_EXEC) && oldlimit < end)
83936+ /* USER_CS limit moved up */
83937+ newlimit = end;
83938+ else if (!(prot & VM_EXEC) && start < oldlimit && oldlimit <= end)
83939+ /* USER_CS limit moved down */
83940+ newlimit = start;
83941+
83942+ if (newlimit) {
83943+ mm->context.user_cs_limit = newlimit;
83944+
83945+#ifdef CONFIG_SMP
83946+ wmb();
83947+ cpus_clear(mm->context.cpu_user_cs_mask);
83948+ cpu_set(smp_processor_id(), mm->context.cpu_user_cs_mask);
83949+#endif
83950+
83951+ set_user_cs(mm->context.user_cs_base, mm->context.user_cs_limit, smp_processor_id());
83952+ }
83953+ spin_unlock(&mm->page_table_lock);
83954+ if (newlimit == end) {
83955+ struct vm_area_struct *vma = find_vma(mm, oldlimit);
83956+
83957+ for (; vma && vma->vm_start < end; vma = vma->vm_next)
83958+ if (is_vm_hugetlb_page(vma))
83959+ hugetlb_change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot);
83960+ else
83961+ change_protection(vma, vma->vm_start, vma->vm_end, vma->vm_page_prot, vma_wants_writenotify(vma), 0);
83962+ }
83963+}
83964+#endif
83965+
83966 int
83967 mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83968 unsigned long start, unsigned long end, unsigned long newflags)
83969@@ -245,11 +295,29 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
83970 int error;
83971 int dirty_accountable = 0;
83972
83973+#ifdef CONFIG_PAX_SEGMEXEC
83974+ struct vm_area_struct *vma_m = NULL;
83975+ unsigned long start_m, end_m;
83976+
83977+ start_m = start + SEGMEXEC_TASK_SIZE;
83978+ end_m = end + SEGMEXEC_TASK_SIZE;
83979+#endif
83980+
83981 if (newflags == oldflags) {
83982 *pprev = vma;
83983 return 0;
83984 }
83985
83986+ if (newflags & (VM_READ | VM_WRITE | VM_EXEC)) {
83987+ struct vm_area_struct *prev = vma->vm_prev, *next = vma->vm_next;
83988+
83989+ if (next && (next->vm_flags & VM_GROWSDOWN) && sysctl_heap_stack_gap > next->vm_start - end)
83990+ return -ENOMEM;
83991+
83992+ if (prev && (prev->vm_flags & VM_GROWSUP) && sysctl_heap_stack_gap > start - prev->vm_end)
83993+ return -ENOMEM;
83994+ }
83995+
83996 /*
83997 * If we make a private mapping writable we increase our commit;
83998 * but (without finer accounting) cannot reduce our commit if we
83999@@ -266,6 +334,42 @@ mprotect_fixup(struct vm_area_struct *vma, struct vm_area_struct **pprev,
84000 }
84001 }
84002
84003+#ifdef CONFIG_PAX_SEGMEXEC
84004+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && ((oldflags ^ newflags) & VM_EXEC)) {
84005+ if (start != vma->vm_start) {
84006+ error = split_vma(mm, vma, start, 1);
84007+ if (error)
84008+ goto fail;
84009+ BUG_ON(!*pprev || (*pprev)->vm_next == vma);
84010+ *pprev = (*pprev)->vm_next;
84011+ }
84012+
84013+ if (end != vma->vm_end) {
84014+ error = split_vma(mm, vma, end, 0);
84015+ if (error)
84016+ goto fail;
84017+ }
84018+
84019+ if (pax_find_mirror_vma(vma)) {
84020+ error = __do_munmap(mm, start_m, end_m - start_m);
84021+ if (error)
84022+ goto fail;
84023+ } else {
84024+ vma_m = kmem_cache_zalloc(vm_area_cachep, GFP_KERNEL);
84025+ if (!vma_m) {
84026+ error = -ENOMEM;
84027+ goto fail;
84028+ }
84029+ vma->vm_flags = newflags;
84030+ error = pax_mirror_vma(vma_m, vma);
84031+ if (error) {
84032+ vma->vm_flags = oldflags;
84033+ goto fail;
84034+ }
84035+ }
84036+ }
84037+#endif
84038+
84039 /*
84040 * First try to merge with previous and/or next vma.
84041 */
84042@@ -296,9 +400,21 @@ success:
84043 * vm_flags and vm_page_prot are protected by the mmap_sem
84044 * held in write mode.
84045 */
84046+
84047+#ifdef CONFIG_PAX_SEGMEXEC
84048+ if ((mm->pax_flags & MF_PAX_SEGMEXEC) && (newflags & VM_EXEC) && ((vma->vm_flags ^ newflags) & VM_READ))
84049+ pax_find_mirror_vma(vma)->vm_flags ^= VM_READ;
84050+#endif
84051+
84052 vma->vm_flags = newflags;
84053+
84054+#ifdef CONFIG_PAX_MPROTECT
84055+ if (mm->binfmt && mm->binfmt->handle_mprotect)
84056+ mm->binfmt->handle_mprotect(vma, newflags);
84057+#endif
84058+
84059 vma->vm_page_prot = pgprot_modify(vma->vm_page_prot,
84060- vm_get_page_prot(newflags));
84061+ vm_get_page_prot(vma->vm_flags));
84062
84063 if (vma_wants_writenotify(vma)) {
84064 vma->vm_page_prot = vm_get_page_prot(newflags & ~VM_SHARED);
84065@@ -337,6 +453,17 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84066 end = start + len;
84067 if (end <= start)
84068 return -ENOMEM;
84069+
84070+#ifdef CONFIG_PAX_SEGMEXEC
84071+ if (current->mm->pax_flags & MF_PAX_SEGMEXEC) {
84072+ if (end > SEGMEXEC_TASK_SIZE)
84073+ return -EINVAL;
84074+ } else
84075+#endif
84076+
84077+ if (end > TASK_SIZE)
84078+ return -EINVAL;
84079+
84080 if (!arch_validate_prot(prot))
84081 return -EINVAL;
84082
84083@@ -344,7 +471,7 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84084 /*
84085 * Does the application expect PROT_READ to imply PROT_EXEC:
84086 */
84087- if ((prot & PROT_READ) && (current->personality & READ_IMPLIES_EXEC))
84088+ if ((prot & (PROT_READ | PROT_WRITE)) && (current->personality & READ_IMPLIES_EXEC))
84089 prot |= PROT_EXEC;
84090
84091 vm_flags = calc_vm_prot_bits(prot);
84092@@ -376,6 +503,11 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84093 if (start > vma->vm_start)
84094 prev = vma;
84095
84096+#ifdef CONFIG_PAX_MPROTECT
84097+ if (current->mm->binfmt && current->mm->binfmt->handle_mprotect)
84098+ current->mm->binfmt->handle_mprotect(vma, vm_flags);
84099+#endif
84100+
84101 for (nstart = start ; ; ) {
84102 unsigned long newflags;
84103
84104@@ -386,6 +518,14 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84105
84106 /* newflags >> 4 shift VM_MAY% in place of VM_% */
84107 if ((newflags & ~(newflags >> 4)) & (VM_READ | VM_WRITE | VM_EXEC)) {
84108+ if (prot & (PROT_WRITE | PROT_EXEC))
84109+ gr_log_rwxmprotect(vma->vm_file);
84110+
84111+ error = -EACCES;
84112+ goto out;
84113+ }
84114+
84115+ if (!gr_acl_handle_mprotect(vma->vm_file, prot)) {
84116 error = -EACCES;
84117 goto out;
84118 }
84119@@ -400,6 +540,9 @@ SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
84120 error = mprotect_fixup(vma, &prev, nstart, tmp, newflags);
84121 if (error)
84122 goto out;
84123+
84124+ track_exec_limit(current->mm, nstart, tmp, vm_flags);
84125+
84126 nstart = tmp;
84127
84128 if (nstart < prev->vm_end)
84129diff --git a/mm/mremap.c b/mm/mremap.c
84130index 463a257..c0c7a92 100644
84131--- a/mm/mremap.c
84132+++ b/mm/mremap.c
84133@@ -126,6 +126,12 @@ static void move_ptes(struct vm_area_struct *vma, pmd_t *old_pmd,
84134 continue;
84135 pte = ptep_get_and_clear(mm, old_addr, old_pte);
84136 pte = move_pte(pte, new_vma->vm_page_prot, old_addr, new_addr);
84137+
84138+#ifdef CONFIG_ARCH_TRACK_EXEC_LIMIT
84139+ if (!(__supported_pte_mask & _PAGE_NX) && (new_vma->vm_flags & (VM_PAGEEXEC | VM_EXEC)) == VM_PAGEEXEC)
84140+ pte = pte_exprotect(pte);
84141+#endif
84142+
84143 set_pte_at(mm, new_addr, new_pte, pte);
84144 }
84145
84146@@ -318,6 +324,11 @@ static struct vm_area_struct *vma_to_resize(unsigned long addr,
84147 if (is_vm_hugetlb_page(vma))
84148 goto Einval;
84149
84150+#ifdef CONFIG_PAX_SEGMEXEC
84151+ if (pax_find_mirror_vma(vma))
84152+ goto Einval;
84153+#endif
84154+
84155 /* We can't remap across vm area boundaries */
84156 if (old_len > vma->vm_end - addr)
84157 goto Efault;
84158@@ -373,20 +384,25 @@ static unsigned long mremap_to(unsigned long addr, unsigned long old_len,
84159 unsigned long ret = -EINVAL;
84160 unsigned long charged = 0;
84161 unsigned long map_flags;
84162+ unsigned long pax_task_size = TASK_SIZE;
84163
84164 if (new_addr & ~PAGE_MASK)
84165 goto out;
84166
84167- if (new_len > TASK_SIZE || new_addr > TASK_SIZE - new_len)
84168+#ifdef CONFIG_PAX_SEGMEXEC
84169+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
84170+ pax_task_size = SEGMEXEC_TASK_SIZE;
84171+#endif
84172+
84173+ pax_task_size -= PAGE_SIZE;
84174+
84175+ if (new_len > TASK_SIZE || new_addr > pax_task_size - new_len)
84176 goto out;
84177
84178 /* Check if the location we're moving into overlaps the
84179 * old location at all, and fail if it does.
84180 */
84181- if ((new_addr <= addr) && (new_addr+new_len) > addr)
84182- goto out;
84183-
84184- if ((addr <= new_addr) && (addr+old_len) > new_addr)
84185+ if (addr + old_len > new_addr && new_addr + new_len > addr)
84186 goto out;
84187
84188 ret = do_munmap(mm, new_addr, new_len);
84189@@ -455,6 +471,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84190 unsigned long ret = -EINVAL;
84191 unsigned long charged = 0;
84192 bool locked = false;
84193+ unsigned long pax_task_size = TASK_SIZE;
84194
84195 down_write(&current->mm->mmap_sem);
84196
84197@@ -475,6 +492,17 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84198 if (!new_len)
84199 goto out;
84200
84201+#ifdef CONFIG_PAX_SEGMEXEC
84202+ if (mm->pax_flags & MF_PAX_SEGMEXEC)
84203+ pax_task_size = SEGMEXEC_TASK_SIZE;
84204+#endif
84205+
84206+ pax_task_size -= PAGE_SIZE;
84207+
84208+ if (new_len > pax_task_size || addr > pax_task_size-new_len ||
84209+ old_len > pax_task_size || addr > pax_task_size-old_len)
84210+ goto out;
84211+
84212 if (flags & MREMAP_FIXED) {
84213 if (flags & MREMAP_MAYMOVE)
84214 ret = mremap_to(addr, old_len, new_addr, new_len,
84215@@ -524,6 +552,7 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84216 new_addr = addr;
84217 }
84218 ret = addr;
84219+ track_exec_limit(vma->vm_mm, vma->vm_start, addr + new_len, vma->vm_flags);
84220 goto out;
84221 }
84222 }
84223@@ -547,7 +576,12 @@ SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
84224 goto out;
84225 }
84226
84227+ map_flags = vma->vm_flags;
84228 ret = move_vma(vma, addr, old_len, new_len, new_addr, &locked);
84229+ if (!(ret & ~PAGE_MASK)) {
84230+ track_exec_limit(current->mm, addr, addr + old_len, 0UL);
84231+ track_exec_limit(current->mm, new_addr, new_addr + new_len, map_flags);
84232+ }
84233 }
84234 out:
84235 if (ret & ~PAGE_MASK)
84236diff --git a/mm/nommu.c b/mm/nommu.c
84237index 298884d..5f74980 100644
84238--- a/mm/nommu.c
84239+++ b/mm/nommu.c
84240@@ -65,7 +65,6 @@ int sysctl_max_map_count = DEFAULT_MAX_MAP_COUNT;
84241 int sysctl_nr_trim_pages = CONFIG_NOMMU_INITIAL_TRIM_EXCESS;
84242 unsigned long sysctl_user_reserve_kbytes __read_mostly = 1UL << 17; /* 128MB */
84243 unsigned long sysctl_admin_reserve_kbytes __read_mostly = 1UL << 13; /* 8MB */
84244-int heap_stack_gap = 0;
84245
84246 atomic_long_t mmap_pages_allocated;
84247
84248@@ -842,15 +841,6 @@ struct vm_area_struct *find_vma(struct mm_struct *mm, unsigned long addr)
84249 EXPORT_SYMBOL(find_vma);
84250
84251 /*
84252- * find a VMA
84253- * - we don't extend stack VMAs under NOMMU conditions
84254- */
84255-struct vm_area_struct *find_extend_vma(struct mm_struct *mm, unsigned long addr)
84256-{
84257- return find_vma(mm, addr);
84258-}
84259-
84260-/*
84261 * expand a stack to a given address
84262 * - not supported under NOMMU conditions
84263 */
84264@@ -1561,6 +1551,7 @@ int split_vma(struct mm_struct *mm, struct vm_area_struct *vma,
84265
84266 /* most fields are the same, copy all, and then fixup */
84267 *new = *vma;
84268+ INIT_LIST_HEAD(&new->anon_vma_chain);
84269 *region = *vma->vm_region;
84270 new->vm_region = region;
84271
84272@@ -1995,8 +1986,8 @@ int generic_file_remap_pages(struct vm_area_struct *vma, unsigned long addr,
84273 }
84274 EXPORT_SYMBOL(generic_file_remap_pages);
84275
84276-static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84277- unsigned long addr, void *buf, int len, int write)
84278+static ssize_t __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84279+ unsigned long addr, void *buf, size_t len, int write)
84280 {
84281 struct vm_area_struct *vma;
84282
84283@@ -2037,8 +2028,8 @@ static int __access_remote_vm(struct task_struct *tsk, struct mm_struct *mm,
84284 *
84285 * The caller must hold a reference on @mm.
84286 */
84287-int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84288- void *buf, int len, int write)
84289+ssize_t access_remote_vm(struct mm_struct *mm, unsigned long addr,
84290+ void *buf, size_t len, int write)
84291 {
84292 return __access_remote_vm(NULL, mm, addr, buf, len, write);
84293 }
84294@@ -2047,7 +2038,7 @@ int access_remote_vm(struct mm_struct *mm, unsigned long addr,
84295 * Access another process' address space.
84296 * - source/target buffer must be kernel space
84297 */
84298-int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write)
84299+ssize_t access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, size_t len, int write)
84300 {
84301 struct mm_struct *mm;
84302
84303diff --git a/mm/page-writeback.c b/mm/page-writeback.c
84304index 4514ad7..92eaa1c 100644
84305--- a/mm/page-writeback.c
84306+++ b/mm/page-writeback.c
84307@@ -659,7 +659,7 @@ unsigned long bdi_dirty_limit(struct backing_dev_info *bdi, unsigned long dirty)
84308 * card's bdi_dirty may rush to many times higher than bdi_setpoint.
84309 * - the bdi dirty thresh drops quickly due to change of JBOD workload
84310 */
84311-static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
84312+static unsigned long __intentional_overflow(-1) bdi_position_ratio(struct backing_dev_info *bdi,
84313 unsigned long thresh,
84314 unsigned long bg_thresh,
84315 unsigned long dirty,
84316@@ -1634,7 +1634,7 @@ ratelimit_handler(struct notifier_block *self, unsigned long action,
84317 }
84318 }
84319
84320-static struct notifier_block __cpuinitdata ratelimit_nb = {
84321+static struct notifier_block ratelimit_nb = {
84322 .notifier_call = ratelimit_handler,
84323 .next = NULL,
84324 };
84325diff --git a/mm/page_alloc.c b/mm/page_alloc.c
84326index 2ee0fd3..6e2edfb 100644
84327--- a/mm/page_alloc.c
84328+++ b/mm/page_alloc.c
84329@@ -60,6 +60,7 @@
84330 #include <linux/page-debug-flags.h>
84331 #include <linux/hugetlb.h>
84332 #include <linux/sched/rt.h>
84333+#include <linux/random.h>
84334
84335 #include <asm/tlbflush.h>
84336 #include <asm/div64.h>
84337@@ -345,7 +346,7 @@ out:
84338 * This usage means that zero-order pages may not be compound.
84339 */
84340
84341-static void free_compound_page(struct page *page)
84342+void free_compound_page(struct page *page)
84343 {
84344 __free_pages_ok(page, compound_order(page));
84345 }
84346@@ -702,6 +703,10 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
84347 int i;
84348 int bad = 0;
84349
84350+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84351+ unsigned long index = 1UL << order;
84352+#endif
84353+
84354 trace_mm_page_free(page, order);
84355 kmemcheck_free_shadow(page, order);
84356
84357@@ -717,6 +722,12 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
84358 debug_check_no_obj_freed(page_address(page),
84359 PAGE_SIZE << order);
84360 }
84361+
84362+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84363+ for (; index; --index)
84364+ sanitize_highpage(page + index - 1);
84365+#endif
84366+
84367 arch_free_page(page, order);
84368 kernel_map_pages(page, 1 << order, 0);
84369
84370@@ -739,6 +750,19 @@ static void __free_pages_ok(struct page *page, unsigned int order)
84371 local_irq_restore(flags);
84372 }
84373
84374+#ifdef CONFIG_PAX_LATENT_ENTROPY
84375+bool __meminitdata extra_latent_entropy;
84376+
84377+static int __init setup_pax_extra_latent_entropy(char *str)
84378+{
84379+ extra_latent_entropy = true;
84380+ return 0;
84381+}
84382+early_param("pax_extra_latent_entropy", setup_pax_extra_latent_entropy);
84383+
84384+volatile u64 latent_entropy;
84385+#endif
84386+
84387 /*
84388 * Read access to zone->managed_pages is safe because it's unsigned long,
84389 * but we still need to serialize writers. Currently all callers of
84390@@ -761,6 +785,19 @@ void __meminit __free_pages_bootmem(struct page *page, unsigned int order)
84391 set_page_count(p, 0);
84392 }
84393
84394+#ifdef CONFIG_PAX_LATENT_ENTROPY
84395+ if (extra_latent_entropy && !PageHighMem(page) && page_to_pfn(page) < 0x100000) {
84396+ u64 hash = 0;
84397+ size_t index, end = PAGE_SIZE * nr_pages / sizeof hash;
84398+ const u64 *data = lowmem_page_address(page);
84399+
84400+ for (index = 0; index < end; index++)
84401+ hash ^= hash + data[index];
84402+ latent_entropy ^= hash;
84403+ add_device_randomness((const void *)&latent_entropy, sizeof(latent_entropy));
84404+ }
84405+#endif
84406+
84407 page_zone(page)->managed_pages += 1 << order;
84408 set_page_refcounted(page);
84409 __free_pages(page, order);
84410@@ -870,8 +907,10 @@ static int prep_new_page(struct page *page, int order, gfp_t gfp_flags)
84411 arch_alloc_page(page, order);
84412 kernel_map_pages(page, 1 << order, 1);
84413
84414+#ifndef CONFIG_PAX_MEMORY_SANITIZE
84415 if (gfp_flags & __GFP_ZERO)
84416 prep_zero_page(page, order, gfp_flags);
84417+#endif
84418
84419 if (order && (gfp_flags & __GFP_COMP))
84420 prep_compound_page(page, order);
84421diff --git a/mm/page_io.c b/mm/page_io.c
84422index a8a3ef4..7260a60 100644
84423--- a/mm/page_io.c
84424+++ b/mm/page_io.c
84425@@ -214,7 +214,7 @@ int __swap_writepage(struct page *page, struct writeback_control *wbc,
84426 struct file *swap_file = sis->swap_file;
84427 struct address_space *mapping = swap_file->f_mapping;
84428 struct iovec iov = {
84429- .iov_base = kmap(page),
84430+ .iov_base = (void __force_user *)kmap(page),
84431 .iov_len = PAGE_SIZE,
84432 };
84433
84434diff --git a/mm/percpu.c b/mm/percpu.c
84435index 8c8e08f..73a5cda 100644
84436--- a/mm/percpu.c
84437+++ b/mm/percpu.c
84438@@ -122,7 +122,7 @@ static unsigned int pcpu_low_unit_cpu __read_mostly;
84439 static unsigned int pcpu_high_unit_cpu __read_mostly;
84440
84441 /* the address of the first chunk which starts with the kernel static area */
84442-void *pcpu_base_addr __read_mostly;
84443+void *pcpu_base_addr __read_only;
84444 EXPORT_SYMBOL_GPL(pcpu_base_addr);
84445
84446 static const int *pcpu_unit_map __read_mostly; /* cpu -> unit */
84447diff --git a/mm/process_vm_access.c b/mm/process_vm_access.c
84448index fd26d04..0cea1b0 100644
84449--- a/mm/process_vm_access.c
84450+++ b/mm/process_vm_access.c
84451@@ -13,6 +13,7 @@
84452 #include <linux/uio.h>
84453 #include <linux/sched.h>
84454 #include <linux/highmem.h>
84455+#include <linux/security.h>
84456 #include <linux/ptrace.h>
84457 #include <linux/slab.h>
84458 #include <linux/syscalls.h>
84459@@ -258,19 +259,19 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84460 size_t iov_l_curr_offset = 0;
84461 ssize_t iov_len;
84462
84463+ return -ENOSYS; // PaX: until properly audited
84464+
84465 /*
84466 * Work out how many pages of struct pages we're going to need
84467 * when eventually calling get_user_pages
84468 */
84469 for (i = 0; i < riovcnt; i++) {
84470 iov_len = rvec[i].iov_len;
84471- if (iov_len > 0) {
84472- nr_pages_iov = ((unsigned long)rvec[i].iov_base
84473- + iov_len)
84474- / PAGE_SIZE - (unsigned long)rvec[i].iov_base
84475- / PAGE_SIZE + 1;
84476- nr_pages = max(nr_pages, nr_pages_iov);
84477- }
84478+ if (iov_len <= 0)
84479+ continue;
84480+ nr_pages_iov = ((unsigned long)rvec[i].iov_base + iov_len) / PAGE_SIZE -
84481+ (unsigned long)rvec[i].iov_base / PAGE_SIZE + 1;
84482+ nr_pages = max(nr_pages, nr_pages_iov);
84483 }
84484
84485 if (nr_pages == 0)
84486@@ -298,6 +299,11 @@ static ssize_t process_vm_rw_core(pid_t pid, const struct iovec *lvec,
84487 goto free_proc_pages;
84488 }
84489
84490+ if (gr_handle_ptrace(task, vm_write ? PTRACE_POKETEXT : PTRACE_ATTACH)) {
84491+ rc = -EPERM;
84492+ goto put_task_struct;
84493+ }
84494+
84495 mm = mm_access(task, PTRACE_MODE_ATTACH);
84496 if (!mm || IS_ERR(mm)) {
84497 rc = IS_ERR(mm) ? PTR_ERR(mm) : -ESRCH;
84498diff --git a/mm/rmap.c b/mm/rmap.c
84499index 6280da8..b5c090e 100644
84500--- a/mm/rmap.c
84501+++ b/mm/rmap.c
84502@@ -163,6 +163,10 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84503 struct anon_vma *anon_vma = vma->anon_vma;
84504 struct anon_vma_chain *avc;
84505
84506+#ifdef CONFIG_PAX_SEGMEXEC
84507+ struct anon_vma_chain *avc_m = NULL;
84508+#endif
84509+
84510 might_sleep();
84511 if (unlikely(!anon_vma)) {
84512 struct mm_struct *mm = vma->vm_mm;
84513@@ -172,6 +176,12 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84514 if (!avc)
84515 goto out_enomem;
84516
84517+#ifdef CONFIG_PAX_SEGMEXEC
84518+ avc_m = anon_vma_chain_alloc(GFP_KERNEL);
84519+ if (!avc_m)
84520+ goto out_enomem_free_avc;
84521+#endif
84522+
84523 anon_vma = find_mergeable_anon_vma(vma);
84524 allocated = NULL;
84525 if (!anon_vma) {
84526@@ -185,6 +195,18 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84527 /* page_table_lock to protect against threads */
84528 spin_lock(&mm->page_table_lock);
84529 if (likely(!vma->anon_vma)) {
84530+
84531+#ifdef CONFIG_PAX_SEGMEXEC
84532+ struct vm_area_struct *vma_m = pax_find_mirror_vma(vma);
84533+
84534+ if (vma_m) {
84535+ BUG_ON(vma_m->anon_vma);
84536+ vma_m->anon_vma = anon_vma;
84537+ anon_vma_chain_link(vma_m, avc_m, anon_vma);
84538+ avc_m = NULL;
84539+ }
84540+#endif
84541+
84542 vma->anon_vma = anon_vma;
84543 anon_vma_chain_link(vma, avc, anon_vma);
84544 allocated = NULL;
84545@@ -195,12 +217,24 @@ int anon_vma_prepare(struct vm_area_struct *vma)
84546
84547 if (unlikely(allocated))
84548 put_anon_vma(allocated);
84549+
84550+#ifdef CONFIG_PAX_SEGMEXEC
84551+ if (unlikely(avc_m))
84552+ anon_vma_chain_free(avc_m);
84553+#endif
84554+
84555 if (unlikely(avc))
84556 anon_vma_chain_free(avc);
84557 }
84558 return 0;
84559
84560 out_enomem_free_avc:
84561+
84562+#ifdef CONFIG_PAX_SEGMEXEC
84563+ if (avc_m)
84564+ anon_vma_chain_free(avc_m);
84565+#endif
84566+
84567 anon_vma_chain_free(avc);
84568 out_enomem:
84569 return -ENOMEM;
84570@@ -236,7 +270,7 @@ static inline void unlock_anon_vma_root(struct anon_vma *root)
84571 * Attach the anon_vmas from src to dst.
84572 * Returns 0 on success, -ENOMEM on failure.
84573 */
84574-int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84575+int anon_vma_clone(struct vm_area_struct *dst, const struct vm_area_struct *src)
84576 {
84577 struct anon_vma_chain *avc, *pavc;
84578 struct anon_vma *root = NULL;
84579@@ -269,7 +303,7 @@ int anon_vma_clone(struct vm_area_struct *dst, struct vm_area_struct *src)
84580 * the corresponding VMA in the parent process is attached to.
84581 * Returns 0 on success, non-zero on failure.
84582 */
84583-int anon_vma_fork(struct vm_area_struct *vma, struct vm_area_struct *pvma)
84584+int anon_vma_fork(struct vm_area_struct *vma, const struct vm_area_struct *pvma)
84585 {
84586 struct anon_vma_chain *avc;
84587 struct anon_vma *anon_vma;
84588@@ -373,8 +407,10 @@ static void anon_vma_ctor(void *data)
84589 void __init anon_vma_init(void)
84590 {
84591 anon_vma_cachep = kmem_cache_create("anon_vma", sizeof(struct anon_vma),
84592- 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC, anon_vma_ctor);
84593- anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain, SLAB_PANIC);
84594+ 0, SLAB_DESTROY_BY_RCU|SLAB_PANIC|SLAB_NO_SANITIZE,
84595+ anon_vma_ctor);
84596+ anon_vma_chain_cachep = KMEM_CACHE(anon_vma_chain,
84597+ SLAB_PANIC|SLAB_NO_SANITIZE);
84598 }
84599
84600 /*
84601diff --git a/mm/shmem.c b/mm/shmem.c
84602index 5e6a842..b41916e 100644
84603--- a/mm/shmem.c
84604+++ b/mm/shmem.c
84605@@ -33,7 +33,7 @@
84606 #include <linux/swap.h>
84607 #include <linux/aio.h>
84608
84609-static struct vfsmount *shm_mnt;
84610+struct vfsmount *shm_mnt;
84611
84612 #ifdef CONFIG_SHMEM
84613 /*
84614@@ -77,7 +77,7 @@ static struct vfsmount *shm_mnt;
84615 #define BOGO_DIRENT_SIZE 20
84616
84617 /* Symlink up to this size is kmalloc'ed instead of using a swappable page */
84618-#define SHORT_SYMLINK_LEN 128
84619+#define SHORT_SYMLINK_LEN 64
84620
84621 /*
84622 * shmem_fallocate and shmem_writepage communicate via inode->i_private
84623@@ -2203,6 +2203,11 @@ static const struct xattr_handler *shmem_xattr_handlers[] = {
84624 static int shmem_xattr_validate(const char *name)
84625 {
84626 struct { const char *prefix; size_t len; } arr[] = {
84627+
84628+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84629+ { XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN},
84630+#endif
84631+
84632 { XATTR_SECURITY_PREFIX, XATTR_SECURITY_PREFIX_LEN },
84633 { XATTR_TRUSTED_PREFIX, XATTR_TRUSTED_PREFIX_LEN }
84634 };
84635@@ -2258,6 +2263,15 @@ static int shmem_setxattr(struct dentry *dentry, const char *name,
84636 if (err)
84637 return err;
84638
84639+#ifdef CONFIG_PAX_XATTR_PAX_FLAGS
84640+ if (!strncmp(name, XATTR_USER_PREFIX, XATTR_USER_PREFIX_LEN)) {
84641+ if (strcmp(name, XATTR_NAME_PAX_FLAGS))
84642+ return -EOPNOTSUPP;
84643+ if (size > 8)
84644+ return -EINVAL;
84645+ }
84646+#endif
84647+
84648 return simple_xattr_set(&info->xattrs, name, value, size, flags);
84649 }
84650
84651@@ -2570,8 +2584,7 @@ int shmem_fill_super(struct super_block *sb, void *data, int silent)
84652 int err = -ENOMEM;
84653
84654 /* Round up to L1_CACHE_BYTES to resist false sharing */
84655- sbinfo = kzalloc(max((int)sizeof(struct shmem_sb_info),
84656- L1_CACHE_BYTES), GFP_KERNEL);
84657+ sbinfo = kzalloc(max(sizeof(struct shmem_sb_info), L1_CACHE_BYTES), GFP_KERNEL);
84658 if (!sbinfo)
84659 return -ENOMEM;
84660
84661diff --git a/mm/slab.c b/mm/slab.c
84662index bd88411..2d46fd6 100644
84663--- a/mm/slab.c
84664+++ b/mm/slab.c
84665@@ -366,10 +366,12 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
84666 if ((x)->max_freeable < i) \
84667 (x)->max_freeable = i; \
84668 } while (0)
84669-#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
84670-#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
84671-#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
84672-#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
84673+#define STATS_INC_ALLOCHIT(x) atomic_inc_unchecked(&(x)->allochit)
84674+#define STATS_INC_ALLOCMISS(x) atomic_inc_unchecked(&(x)->allocmiss)
84675+#define STATS_INC_FREEHIT(x) atomic_inc_unchecked(&(x)->freehit)
84676+#define STATS_INC_FREEMISS(x) atomic_inc_unchecked(&(x)->freemiss)
84677+#define STATS_INC_SANITIZED(x) atomic_inc_unchecked(&(x)->sanitized)
84678+#define STATS_INC_NOT_SANITIZED(x) atomic_inc_unchecked(&(x)->not_sanitized)
84679 #else
84680 #define STATS_INC_ACTIVE(x) do { } while (0)
84681 #define STATS_DEC_ACTIVE(x) do { } while (0)
84682@@ -386,6 +388,8 @@ static void kmem_cache_node_init(struct kmem_cache_node *parent)
84683 #define STATS_INC_ALLOCMISS(x) do { } while (0)
84684 #define STATS_INC_FREEHIT(x) do { } while (0)
84685 #define STATS_INC_FREEMISS(x) do { } while (0)
84686+#define STATS_INC_SANITIZED(x) do { } while (0)
84687+#define STATS_INC_NOT_SANITIZED(x) do { } while (0)
84688 #endif
84689
84690 #if DEBUG
84691@@ -477,7 +481,7 @@ static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
84692 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
84693 */
84694 static inline unsigned int obj_to_index(const struct kmem_cache *cache,
84695- const struct slab *slab, void *obj)
84696+ const struct slab *slab, const void *obj)
84697 {
84698 u32 offset = (obj - slab->s_mem);
84699 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
84700@@ -1384,7 +1388,7 @@ static int __cpuinit cpuup_callback(struct notifier_block *nfb,
84701 return notifier_from_errno(err);
84702 }
84703
84704-static struct notifier_block __cpuinitdata cpucache_notifier = {
84705+static struct notifier_block cpucache_notifier = {
84706 &cpuup_callback, NULL, 0
84707 };
84708
84709@@ -1565,12 +1569,12 @@ void __init kmem_cache_init(void)
84710 */
84711
84712 kmalloc_caches[INDEX_AC] = create_kmalloc_cache("kmalloc-ac",
84713- kmalloc_size(INDEX_AC), ARCH_KMALLOC_FLAGS);
84714+ kmalloc_size(INDEX_AC), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
84715
84716 if (INDEX_AC != INDEX_NODE)
84717 kmalloc_caches[INDEX_NODE] =
84718 create_kmalloc_cache("kmalloc-node",
84719- kmalloc_size(INDEX_NODE), ARCH_KMALLOC_FLAGS);
84720+ kmalloc_size(INDEX_NODE), SLAB_USERCOPY | ARCH_KMALLOC_FLAGS);
84721
84722 slab_early_init = 0;
84723
84724@@ -3583,6 +3587,21 @@ static inline void __cache_free(struct kmem_cache *cachep, void *objp,
84725 struct array_cache *ac = cpu_cache_get(cachep);
84726
84727 check_irq_off();
84728+
84729+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84730+ if (pax_sanitize_slab) {
84731+ if (!(cachep->flags & (SLAB_POISON | SLAB_NO_SANITIZE))) {
84732+ memset(objp, PAX_MEMORY_SANITIZE_VALUE, cachep->object_size);
84733+
84734+ if (cachep->ctor)
84735+ cachep->ctor(objp);
84736+
84737+ STATS_INC_SANITIZED(cachep);
84738+ } else
84739+ STATS_INC_NOT_SANITIZED(cachep);
84740+ }
84741+#endif
84742+
84743 kmemleak_free_recursive(objp, cachep->flags);
84744 objp = cache_free_debugcheck(cachep, objp, caller);
84745
84746@@ -3800,6 +3819,7 @@ void kfree(const void *objp)
84747
84748 if (unlikely(ZERO_OR_NULL_PTR(objp)))
84749 return;
84750+ VM_BUG_ON(!virt_addr_valid(objp));
84751 local_irq_save(flags);
84752 kfree_debugcheck(objp);
84753 c = virt_to_cache(objp);
84754@@ -4241,14 +4261,22 @@ void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *cachep)
84755 }
84756 /* cpu stats */
84757 {
84758- unsigned long allochit = atomic_read(&cachep->allochit);
84759- unsigned long allocmiss = atomic_read(&cachep->allocmiss);
84760- unsigned long freehit = atomic_read(&cachep->freehit);
84761- unsigned long freemiss = atomic_read(&cachep->freemiss);
84762+ unsigned long allochit = atomic_read_unchecked(&cachep->allochit);
84763+ unsigned long allocmiss = atomic_read_unchecked(&cachep->allocmiss);
84764+ unsigned long freehit = atomic_read_unchecked(&cachep->freehit);
84765+ unsigned long freemiss = atomic_read_unchecked(&cachep->freemiss);
84766
84767 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
84768 allochit, allocmiss, freehit, freemiss);
84769 }
84770+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84771+ {
84772+ unsigned long sanitized = atomic_read_unchecked(&cachep->sanitized);
84773+ unsigned long not_sanitized = atomic_read_unchecked(&cachep->not_sanitized);
84774+
84775+ seq_printf(m, " : pax %6lu %6lu", sanitized, not_sanitized);
84776+ }
84777+#endif
84778 #endif
84779 }
84780
84781@@ -4476,13 +4504,71 @@ static const struct file_operations proc_slabstats_operations = {
84782 static int __init slab_proc_init(void)
84783 {
84784 #ifdef CONFIG_DEBUG_SLAB_LEAK
84785- proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
84786+ proc_create("slab_allocators", S_IRUSR, NULL, &proc_slabstats_operations);
84787 #endif
84788 return 0;
84789 }
84790 module_init(slab_proc_init);
84791 #endif
84792
84793+bool is_usercopy_object(const void *ptr)
84794+{
84795+ struct page *page;
84796+ struct kmem_cache *cachep;
84797+
84798+ if (ZERO_OR_NULL_PTR(ptr))
84799+ return false;
84800+
84801+ if (!slab_is_available())
84802+ return false;
84803+
84804+ if (!virt_addr_valid(ptr))
84805+ return false;
84806+
84807+ page = virt_to_head_page(ptr);
84808+
84809+ if (!PageSlab(page))
84810+ return false;
84811+
84812+ cachep = page->slab_cache;
84813+ return cachep->flags & SLAB_USERCOPY;
84814+}
84815+
84816+#ifdef CONFIG_PAX_USERCOPY
84817+const char *check_heap_object(const void *ptr, unsigned long n)
84818+{
84819+ struct page *page;
84820+ struct kmem_cache *cachep;
84821+ struct slab *slabp;
84822+ unsigned int objnr;
84823+ unsigned long offset;
84824+
84825+ if (ZERO_OR_NULL_PTR(ptr))
84826+ return "<null>";
84827+
84828+ if (!virt_addr_valid(ptr))
84829+ return NULL;
84830+
84831+ page = virt_to_head_page(ptr);
84832+
84833+ if (!PageSlab(page))
84834+ return NULL;
84835+
84836+ cachep = page->slab_cache;
84837+ if (!(cachep->flags & SLAB_USERCOPY))
84838+ return cachep->name;
84839+
84840+ slabp = page->slab_page;
84841+ objnr = obj_to_index(cachep, slabp, ptr);
84842+ BUG_ON(objnr >= cachep->num);
84843+ offset = ptr - index_to_obj(cachep, slabp, objnr) - obj_offset(cachep);
84844+ if (offset <= cachep->object_size && n <= cachep->object_size - offset)
84845+ return NULL;
84846+
84847+ return cachep->name;
84848+}
84849+#endif
84850+
84851 /**
84852 * ksize - get the actual amount of memory allocated for a given object
84853 * @objp: Pointer to the object
84854diff --git a/mm/slab.h b/mm/slab.h
84855index f96b49e..db1d204 100644
84856--- a/mm/slab.h
84857+++ b/mm/slab.h
84858@@ -32,6 +32,15 @@ extern struct list_head slab_caches;
84859 /* The slab cache that manages slab cache information */
84860 extern struct kmem_cache *kmem_cache;
84861
84862+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84863+#ifdef CONFIG_X86_64
84864+#define PAX_MEMORY_SANITIZE_VALUE '\xfe'
84865+#else
84866+#define PAX_MEMORY_SANITIZE_VALUE '\xff'
84867+#endif
84868+extern bool pax_sanitize_slab;
84869+#endif
84870+
84871 unsigned long calculate_alignment(unsigned long flags,
84872 unsigned long align, unsigned long size);
84873
84874@@ -67,7 +76,8 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
84875
84876 /* Legal flag mask for kmem_cache_create(), for various configurations */
84877 #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
84878- SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS )
84879+ SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS | \
84880+ SLAB_USERCOPY | SLAB_NO_SANITIZE)
84881
84882 #if defined(CONFIG_DEBUG_SLAB)
84883 #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
84884@@ -229,6 +239,9 @@ static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
84885 return s;
84886
84887 page = virt_to_head_page(x);
84888+
84889+ BUG_ON(!PageSlab(page));
84890+
84891 cachep = page->slab_cache;
84892 if (slab_equal_or_root(cachep, s))
84893 return cachep;
84894diff --git a/mm/slab_common.c b/mm/slab_common.c
84895index 2d41450..4efe6ee 100644
84896--- a/mm/slab_common.c
84897+++ b/mm/slab_common.c
84898@@ -22,11 +22,22 @@
84899
84900 #include "slab.h"
84901
84902-enum slab_state slab_state;
84903+enum slab_state slab_state __read_only;
84904 LIST_HEAD(slab_caches);
84905 DEFINE_MUTEX(slab_mutex);
84906 struct kmem_cache *kmem_cache;
84907
84908+#ifdef CONFIG_PAX_MEMORY_SANITIZE
84909+bool pax_sanitize_slab __read_only = true;
84910+static int __init pax_sanitize_slab_setup(char *str)
84911+{
84912+ pax_sanitize_slab = !!simple_strtol(str, NULL, 0);
84913+ printk("%sabled PaX slab sanitization\n", pax_sanitize_slab ? "En" : "Dis");
84914+ return 1;
84915+}
84916+__setup("pax_sanitize_slab=", pax_sanitize_slab_setup);
84917+#endif
84918+
84919 #ifdef CONFIG_DEBUG_VM
84920 static int kmem_cache_sanity_check(struct mem_cgroup *memcg, const char *name,
84921 size_t size)
84922@@ -209,7 +220,7 @@ kmem_cache_create_memcg(struct mem_cgroup *memcg, const char *name, size_t size,
84923
84924 err = __kmem_cache_create(s, flags);
84925 if (!err) {
84926- s->refcount = 1;
84927+ atomic_set(&s->refcount, 1);
84928 list_add(&s->list, &slab_caches);
84929 memcg_cache_list_add(memcg, s);
84930 } else {
84931@@ -255,8 +266,7 @@ void kmem_cache_destroy(struct kmem_cache *s)
84932
84933 get_online_cpus();
84934 mutex_lock(&slab_mutex);
84935- s->refcount--;
84936- if (!s->refcount) {
84937+ if (atomic_dec_and_test(&s->refcount)) {
84938 list_del(&s->list);
84939
84940 if (!__kmem_cache_shutdown(s)) {
84941@@ -302,7 +312,7 @@ void __init create_boot_cache(struct kmem_cache *s, const char *name, size_t siz
84942 panic("Creation of kmalloc slab %s size=%zu failed. Reason %d\n",
84943 name, size, err);
84944
84945- s->refcount = -1; /* Exempt from merging for now */
84946+ atomic_set(&s->refcount, -1); /* Exempt from merging for now */
84947 }
84948
84949 struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84950@@ -315,7 +325,7 @@ struct kmem_cache *__init create_kmalloc_cache(const char *name, size_t size,
84951
84952 create_boot_cache(s, name, size, flags);
84953 list_add(&s->list, &slab_caches);
84954- s->refcount = 1;
84955+ atomic_set(&s->refcount, 1);
84956 return s;
84957 }
84958
84959@@ -327,6 +337,11 @@ struct kmem_cache *kmalloc_dma_caches[KMALLOC_SHIFT_HIGH + 1];
84960 EXPORT_SYMBOL(kmalloc_dma_caches);
84961 #endif
84962
84963+#ifdef CONFIG_PAX_USERCOPY_SLABS
84964+struct kmem_cache *kmalloc_usercopy_caches[KMALLOC_SHIFT_HIGH + 1];
84965+EXPORT_SYMBOL(kmalloc_usercopy_caches);
84966+#endif
84967+
84968 /*
84969 * Conversion table for small slabs sizes / 8 to the index in the
84970 * kmalloc array. This is necessary for slabs < 192 since we have non power
84971@@ -391,6 +406,13 @@ struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
84972 return kmalloc_dma_caches[index];
84973
84974 #endif
84975+
84976+#ifdef CONFIG_PAX_USERCOPY_SLABS
84977+ if (unlikely((flags & GFP_USERCOPY)))
84978+ return kmalloc_usercopy_caches[index];
84979+
84980+#endif
84981+
84982 return kmalloc_caches[index];
84983 }
84984
84985@@ -447,7 +469,7 @@ void __init create_kmalloc_caches(unsigned long flags)
84986 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
84987 if (!kmalloc_caches[i]) {
84988 kmalloc_caches[i] = create_kmalloc_cache(NULL,
84989- 1 << i, flags);
84990+ 1 << i, SLAB_USERCOPY | flags);
84991 }
84992
84993 /*
84994@@ -456,10 +478,10 @@ void __init create_kmalloc_caches(unsigned long flags)
84995 * earlier power of two caches
84996 */
84997 if (KMALLOC_MIN_SIZE <= 32 && !kmalloc_caches[1] && i == 6)
84998- kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, flags);
84999+ kmalloc_caches[1] = create_kmalloc_cache(NULL, 96, SLAB_USERCOPY | flags);
85000
85001 if (KMALLOC_MIN_SIZE <= 64 && !kmalloc_caches[2] && i == 7)
85002- kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, flags);
85003+ kmalloc_caches[2] = create_kmalloc_cache(NULL, 192, SLAB_USERCOPY | flags);
85004 }
85005
85006 /* Kmalloc array is now usable */
85007@@ -492,6 +514,23 @@ void __init create_kmalloc_caches(unsigned long flags)
85008 }
85009 }
85010 #endif
85011+
85012+#ifdef CONFIG_PAX_USERCOPY_SLABS
85013+ for (i = 0; i <= KMALLOC_SHIFT_HIGH; i++) {
85014+ struct kmem_cache *s = kmalloc_caches[i];
85015+
85016+ if (s) {
85017+ int size = kmalloc_size(i);
85018+ char *n = kasprintf(GFP_NOWAIT,
85019+ "usercopy-kmalloc-%d", size);
85020+
85021+ BUG_ON(!n);
85022+ kmalloc_usercopy_caches[i] = create_kmalloc_cache(n,
85023+ size, SLAB_USERCOPY | flags);
85024+ }
85025+ }
85026+#endif
85027+
85028 }
85029 #endif /* !CONFIG_SLOB */
85030
85031@@ -516,6 +555,9 @@ void print_slabinfo_header(struct seq_file *m)
85032 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
85033 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
85034 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
85035+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85036+ seq_puts(m, " : pax <sanitized> <not_sanitized>");
85037+#endif
85038 #endif
85039 seq_putc(m, '\n');
85040 }
85041diff --git a/mm/slob.c b/mm/slob.c
85042index eeed4a0..bb0e9ab 100644
85043--- a/mm/slob.c
85044+++ b/mm/slob.c
85045@@ -157,7 +157,7 @@ static void set_slob(slob_t *s, slobidx_t size, slob_t *next)
85046 /*
85047 * Return the size of a slob block.
85048 */
85049-static slobidx_t slob_units(slob_t *s)
85050+static slobidx_t slob_units(const slob_t *s)
85051 {
85052 if (s->units > 0)
85053 return s->units;
85054@@ -167,7 +167,7 @@ static slobidx_t slob_units(slob_t *s)
85055 /*
85056 * Return the next free slob block pointer after this one.
85057 */
85058-static slob_t *slob_next(slob_t *s)
85059+static slob_t *slob_next(const slob_t *s)
85060 {
85061 slob_t *base = (slob_t *)((unsigned long)s & PAGE_MASK);
85062 slobidx_t next;
85063@@ -182,14 +182,14 @@ static slob_t *slob_next(slob_t *s)
85064 /*
85065 * Returns true if s is the last free block in its page.
85066 */
85067-static int slob_last(slob_t *s)
85068+static int slob_last(const slob_t *s)
85069 {
85070 return !((unsigned long)slob_next(s) & ~PAGE_MASK);
85071 }
85072
85073-static void *slob_new_pages(gfp_t gfp, int order, int node)
85074+static struct page *slob_new_pages(gfp_t gfp, unsigned int order, int node)
85075 {
85076- void *page;
85077+ struct page *page;
85078
85079 #ifdef CONFIG_NUMA
85080 if (node != NUMA_NO_NODE)
85081@@ -201,14 +201,18 @@ static void *slob_new_pages(gfp_t gfp, int order, int node)
85082 if (!page)
85083 return NULL;
85084
85085- return page_address(page);
85086+ __SetPageSlab(page);
85087+ return page;
85088 }
85089
85090-static void slob_free_pages(void *b, int order)
85091+static void slob_free_pages(struct page *sp, int order)
85092 {
85093 if (current->reclaim_state)
85094 current->reclaim_state->reclaimed_slab += 1 << order;
85095- free_pages((unsigned long)b, order);
85096+ __ClearPageSlab(sp);
85097+ page_mapcount_reset(sp);
85098+ sp->private = 0;
85099+ __free_pages(sp, order);
85100 }
85101
85102 /*
85103@@ -313,15 +317,15 @@ static void *slob_alloc(size_t size, gfp_t gfp, int align, int node)
85104
85105 /* Not enough space: must allocate a new page */
85106 if (!b) {
85107- b = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
85108- if (!b)
85109+ sp = slob_new_pages(gfp & ~__GFP_ZERO, 0, node);
85110+ if (!sp)
85111 return NULL;
85112- sp = virt_to_page(b);
85113- __SetPageSlab(sp);
85114+ b = page_address(sp);
85115
85116 spin_lock_irqsave(&slob_lock, flags);
85117 sp->units = SLOB_UNITS(PAGE_SIZE);
85118 sp->freelist = b;
85119+ sp->private = 0;
85120 INIT_LIST_HEAD(&sp->list);
85121 set_slob(b, SLOB_UNITS(PAGE_SIZE), b + SLOB_UNITS(PAGE_SIZE));
85122 set_slob_page_free(sp, slob_list);
85123@@ -359,12 +363,15 @@ static void slob_free(void *block, int size)
85124 if (slob_page_free(sp))
85125 clear_slob_page_free(sp);
85126 spin_unlock_irqrestore(&slob_lock, flags);
85127- __ClearPageSlab(sp);
85128- page_mapcount_reset(sp);
85129- slob_free_pages(b, 0);
85130+ slob_free_pages(sp, 0);
85131 return;
85132 }
85133
85134+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85135+ if (pax_sanitize_slab)
85136+ memset(block, PAX_MEMORY_SANITIZE_VALUE, size);
85137+#endif
85138+
85139 if (!slob_page_free(sp)) {
85140 /* This slob page is about to become partially free. Easy! */
85141 sp->units = units;
85142@@ -424,11 +431,10 @@ out:
85143 */
85144
85145 static __always_inline void *
85146-__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
85147+__do_kmalloc_node_align(size_t size, gfp_t gfp, int node, unsigned long caller, int align)
85148 {
85149- unsigned int *m;
85150- int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85151- void *ret;
85152+ slob_t *m;
85153+ void *ret = NULL;
85154
85155 gfp &= gfp_allowed_mask;
85156
85157@@ -442,23 +448,41 @@ __do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
85158
85159 if (!m)
85160 return NULL;
85161- *m = size;
85162+ BUILD_BUG_ON(ARCH_KMALLOC_MINALIGN < 2 * SLOB_UNIT);
85163+ BUILD_BUG_ON(ARCH_SLAB_MINALIGN < 2 * SLOB_UNIT);
85164+ m[0].units = size;
85165+ m[1].units = align;
85166 ret = (void *)m + align;
85167
85168 trace_kmalloc_node(caller, ret,
85169 size, size + align, gfp, node);
85170 } else {
85171 unsigned int order = get_order(size);
85172+ struct page *page;
85173
85174 if (likely(order))
85175 gfp |= __GFP_COMP;
85176- ret = slob_new_pages(gfp, order, node);
85177+ page = slob_new_pages(gfp, order, node);
85178+ if (page) {
85179+ ret = page_address(page);
85180+ page->private = size;
85181+ }
85182
85183 trace_kmalloc_node(caller, ret,
85184 size, PAGE_SIZE << order, gfp, node);
85185 }
85186
85187- kmemleak_alloc(ret, size, 1, gfp);
85188+ return ret;
85189+}
85190+
85191+static __always_inline void *
85192+__do_kmalloc_node(size_t size, gfp_t gfp, int node, unsigned long caller)
85193+{
85194+ int align = max(ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85195+ void *ret = __do_kmalloc_node_align(size, gfp, node, caller, align);
85196+
85197+ if (!ZERO_OR_NULL_PTR(ret))
85198+ kmemleak_alloc(ret, size, 1, gfp);
85199 return ret;
85200 }
85201
85202@@ -493,34 +517,112 @@ void kfree(const void *block)
85203 return;
85204 kmemleak_free(block);
85205
85206+ VM_BUG_ON(!virt_addr_valid(block));
85207 sp = virt_to_page(block);
85208- if (PageSlab(sp)) {
85209+ VM_BUG_ON(!PageSlab(sp));
85210+ if (!sp->private) {
85211 int align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85212- unsigned int *m = (unsigned int *)(block - align);
85213- slob_free(m, *m + align);
85214- } else
85215+ slob_t *m = (slob_t *)(block - align);
85216+ slob_free(m, m[0].units + align);
85217+ } else {
85218+ __ClearPageSlab(sp);
85219+ page_mapcount_reset(sp);
85220+ sp->private = 0;
85221 __free_pages(sp, compound_order(sp));
85222+ }
85223 }
85224 EXPORT_SYMBOL(kfree);
85225
85226+bool is_usercopy_object(const void *ptr)
85227+{
85228+ if (!slab_is_available())
85229+ return false;
85230+
85231+ // PAX: TODO
85232+
85233+ return false;
85234+}
85235+
85236+#ifdef CONFIG_PAX_USERCOPY
85237+const char *check_heap_object(const void *ptr, unsigned long n)
85238+{
85239+ struct page *page;
85240+ const slob_t *free;
85241+ const void *base;
85242+ unsigned long flags;
85243+
85244+ if (ZERO_OR_NULL_PTR(ptr))
85245+ return "<null>";
85246+
85247+ if (!virt_addr_valid(ptr))
85248+ return NULL;
85249+
85250+ page = virt_to_head_page(ptr);
85251+ if (!PageSlab(page))
85252+ return NULL;
85253+
85254+ if (page->private) {
85255+ base = page;
85256+ if (base <= ptr && n <= page->private - (ptr - base))
85257+ return NULL;
85258+ return "<slob>";
85259+ }
85260+
85261+ /* some tricky double walking to find the chunk */
85262+ spin_lock_irqsave(&slob_lock, flags);
85263+ base = (void *)((unsigned long)ptr & PAGE_MASK);
85264+ free = page->freelist;
85265+
85266+ while (!slob_last(free) && (void *)free <= ptr) {
85267+ base = free + slob_units(free);
85268+ free = slob_next(free);
85269+ }
85270+
85271+ while (base < (void *)free) {
85272+ slobidx_t m = ((slob_t *)base)[0].units, align = ((slob_t *)base)[1].units;
85273+ int size = SLOB_UNIT * SLOB_UNITS(m + align);
85274+ int offset;
85275+
85276+ if (ptr < base + align)
85277+ break;
85278+
85279+ offset = ptr - base - align;
85280+ if (offset >= m) {
85281+ base += size;
85282+ continue;
85283+ }
85284+
85285+ if (n > m - offset)
85286+ break;
85287+
85288+ spin_unlock_irqrestore(&slob_lock, flags);
85289+ return NULL;
85290+ }
85291+
85292+ spin_unlock_irqrestore(&slob_lock, flags);
85293+ return "<slob>";
85294+}
85295+#endif
85296+
85297 /* can't use ksize for kmem_cache_alloc memory, only kmalloc */
85298 size_t ksize(const void *block)
85299 {
85300 struct page *sp;
85301 int align;
85302- unsigned int *m;
85303+ slob_t *m;
85304
85305 BUG_ON(!block);
85306 if (unlikely(block == ZERO_SIZE_PTR))
85307 return 0;
85308
85309 sp = virt_to_page(block);
85310- if (unlikely(!PageSlab(sp)))
85311- return PAGE_SIZE << compound_order(sp);
85312+ VM_BUG_ON(!PageSlab(sp));
85313+ if (sp->private)
85314+ return sp->private;
85315
85316 align = max_t(size_t, ARCH_KMALLOC_MINALIGN, ARCH_SLAB_MINALIGN);
85317- m = (unsigned int *)(block - align);
85318- return SLOB_UNITS(*m) * SLOB_UNIT;
85319+ m = (slob_t *)(block - align);
85320+ return SLOB_UNITS(m[0].units) * SLOB_UNIT;
85321 }
85322 EXPORT_SYMBOL(ksize);
85323
85324@@ -536,23 +638,33 @@ int __kmem_cache_create(struct kmem_cache *c, unsigned long flags)
85325
85326 void *kmem_cache_alloc_node(struct kmem_cache *c, gfp_t flags, int node)
85327 {
85328- void *b;
85329+ void *b = NULL;
85330
85331 flags &= gfp_allowed_mask;
85332
85333 lockdep_trace_alloc(flags);
85334
85335+#ifdef CONFIG_PAX_USERCOPY_SLABS
85336+ b = __do_kmalloc_node_align(c->size, flags, node, _RET_IP_, c->align);
85337+#else
85338 if (c->size < PAGE_SIZE) {
85339 b = slob_alloc(c->size, flags, c->align, node);
85340 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
85341 SLOB_UNITS(c->size) * SLOB_UNIT,
85342 flags, node);
85343 } else {
85344- b = slob_new_pages(flags, get_order(c->size), node);
85345+ struct page *sp;
85346+
85347+ sp = slob_new_pages(flags, get_order(c->size), node);
85348+ if (sp) {
85349+ b = page_address(sp);
85350+ sp->private = c->size;
85351+ }
85352 trace_kmem_cache_alloc_node(_RET_IP_, b, c->object_size,
85353 PAGE_SIZE << get_order(c->size),
85354 flags, node);
85355 }
85356+#endif
85357
85358 if (c->ctor)
85359 c->ctor(b);
85360@@ -564,10 +676,14 @@ EXPORT_SYMBOL(kmem_cache_alloc_node);
85361
85362 static void __kmem_cache_free(void *b, int size)
85363 {
85364- if (size < PAGE_SIZE)
85365+ struct page *sp;
85366+
85367+ sp = virt_to_page(b);
85368+ BUG_ON(!PageSlab(sp));
85369+ if (!sp->private)
85370 slob_free(b, size);
85371 else
85372- slob_free_pages(b, get_order(size));
85373+ slob_free_pages(sp, get_order(size));
85374 }
85375
85376 static void kmem_rcu_free(struct rcu_head *head)
85377@@ -580,17 +696,31 @@ static void kmem_rcu_free(struct rcu_head *head)
85378
85379 void kmem_cache_free(struct kmem_cache *c, void *b)
85380 {
85381+ int size = c->size;
85382+
85383+#ifdef CONFIG_PAX_USERCOPY_SLABS
85384+ if (size + c->align < PAGE_SIZE) {
85385+ size += c->align;
85386+ b -= c->align;
85387+ }
85388+#endif
85389+
85390 kmemleak_free_recursive(b, c->flags);
85391 if (unlikely(c->flags & SLAB_DESTROY_BY_RCU)) {
85392 struct slob_rcu *slob_rcu;
85393- slob_rcu = b + (c->size - sizeof(struct slob_rcu));
85394- slob_rcu->size = c->size;
85395+ slob_rcu = b + (size - sizeof(struct slob_rcu));
85396+ slob_rcu->size = size;
85397 call_rcu(&slob_rcu->head, kmem_rcu_free);
85398 } else {
85399- __kmem_cache_free(b, c->size);
85400+ __kmem_cache_free(b, size);
85401 }
85402
85403+#ifdef CONFIG_PAX_USERCOPY_SLABS
85404+ trace_kfree(_RET_IP_, b);
85405+#else
85406 trace_kmem_cache_free(_RET_IP_, b);
85407+#endif
85408+
85409 }
85410 EXPORT_SYMBOL(kmem_cache_free);
85411
85412diff --git a/mm/slub.c b/mm/slub.c
85413index 57707f0..7857bd3 100644
85414--- a/mm/slub.c
85415+++ b/mm/slub.c
85416@@ -198,7 +198,7 @@ struct track {
85417
85418 enum track_item { TRACK_ALLOC, TRACK_FREE };
85419
85420-#ifdef CONFIG_SYSFS
85421+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85422 static int sysfs_slab_add(struct kmem_cache *);
85423 static int sysfs_slab_alias(struct kmem_cache *, const char *);
85424 static void sysfs_slab_remove(struct kmem_cache *);
85425@@ -519,7 +519,7 @@ static void print_track(const char *s, struct track *t)
85426 if (!t->addr)
85427 return;
85428
85429- printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
85430+ printk(KERN_ERR "INFO: %s in %pA age=%lu cpu=%u pid=%d\n",
85431 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
85432 #ifdef CONFIG_STACKTRACE
85433 {
85434@@ -2594,6 +2594,14 @@ static __always_inline void slab_free(struct kmem_cache *s,
85435
85436 slab_free_hook(s, x);
85437
85438+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85439+ if (pax_sanitize_slab && !(s->flags & SLAB_NO_SANITIZE)) {
85440+ memset(x, PAX_MEMORY_SANITIZE_VALUE, s->object_size);
85441+ if (s->ctor)
85442+ s->ctor(x);
85443+ }
85444+#endif
85445+
85446 redo:
85447 /*
85448 * Determine the currently cpus per cpu slab.
85449@@ -2661,7 +2669,7 @@ static int slub_min_objects;
85450 * Merge control. If this is set then no merging of slab caches will occur.
85451 * (Could be removed. This was introduced to pacify the merge skeptics.)
85452 */
85453-static int slub_nomerge;
85454+static int slub_nomerge = 1;
85455
85456 /*
85457 * Calculate the order of allocation given an slab object size.
85458@@ -2938,6 +2946,9 @@ static int calculate_sizes(struct kmem_cache *s, int forced_order)
85459 s->inuse = size;
85460
85461 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
85462+#ifdef CONFIG_PAX_MEMORY_SANITIZE
85463+ (pax_sanitize_slab && !(flags & SLAB_NO_SANITIZE)) ||
85464+#endif
85465 s->ctor)) {
85466 /*
85467 * Relocate free pointer after the object if it is not
85468@@ -3283,6 +3294,59 @@ void *__kmalloc_node(size_t size, gfp_t flags, int node)
85469 EXPORT_SYMBOL(__kmalloc_node);
85470 #endif
85471
85472+bool is_usercopy_object(const void *ptr)
85473+{
85474+ struct page *page;
85475+ struct kmem_cache *s;
85476+
85477+ if (ZERO_OR_NULL_PTR(ptr))
85478+ return false;
85479+
85480+ if (!slab_is_available())
85481+ return false;
85482+
85483+ if (!virt_addr_valid(ptr))
85484+ return false;
85485+
85486+ page = virt_to_head_page(ptr);
85487+
85488+ if (!PageSlab(page))
85489+ return false;
85490+
85491+ s = page->slab_cache;
85492+ return s->flags & SLAB_USERCOPY;
85493+}
85494+
85495+#ifdef CONFIG_PAX_USERCOPY
85496+const char *check_heap_object(const void *ptr, unsigned long n)
85497+{
85498+ struct page *page;
85499+ struct kmem_cache *s;
85500+ unsigned long offset;
85501+
85502+ if (ZERO_OR_NULL_PTR(ptr))
85503+ return "<null>";
85504+
85505+ if (!virt_addr_valid(ptr))
85506+ return NULL;
85507+
85508+ page = virt_to_head_page(ptr);
85509+
85510+ if (!PageSlab(page))
85511+ return NULL;
85512+
85513+ s = page->slab_cache;
85514+ if (!(s->flags & SLAB_USERCOPY))
85515+ return s->name;
85516+
85517+ offset = (ptr - page_address(page)) % s->size;
85518+ if (offset <= s->object_size && n <= s->object_size - offset)
85519+ return NULL;
85520+
85521+ return s->name;
85522+}
85523+#endif
85524+
85525 size_t ksize(const void *object)
85526 {
85527 struct page *page;
85528@@ -3347,6 +3411,7 @@ void kfree(const void *x)
85529 if (unlikely(ZERO_OR_NULL_PTR(x)))
85530 return;
85531
85532+ VM_BUG_ON(!virt_addr_valid(x));
85533 page = virt_to_head_page(x);
85534 if (unlikely(!PageSlab(page))) {
85535 BUG_ON(!PageCompound(page));
85536@@ -3652,7 +3717,7 @@ static int slab_unmergeable(struct kmem_cache *s)
85537 /*
85538 * We may have set a slab to be unmergeable during bootstrap.
85539 */
85540- if (s->refcount < 0)
85541+ if (atomic_read(&s->refcount) < 0)
85542 return 1;
85543
85544 return 0;
85545@@ -3710,7 +3775,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85546
85547 s = find_mergeable(memcg, size, align, flags, name, ctor);
85548 if (s) {
85549- s->refcount++;
85550+ atomic_inc(&s->refcount);
85551 /*
85552 * Adjust the object sizes so that we clear
85553 * the complete object on kzalloc.
85554@@ -3719,7 +3784,7 @@ __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size,
85555 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
85556
85557 if (sysfs_slab_alias(s, name)) {
85558- s->refcount--;
85559+ atomic_dec(&s->refcount);
85560 s = NULL;
85561 }
85562 }
85563@@ -3781,7 +3846,7 @@ static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
85564 return NOTIFY_OK;
85565 }
85566
85567-static struct notifier_block __cpuinitdata slab_notifier = {
85568+static struct notifier_block slab_notifier = {
85569 .notifier_call = slab_cpuup_callback
85570 };
85571
85572@@ -3839,7 +3904,7 @@ void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
85573 }
85574 #endif
85575
85576-#ifdef CONFIG_SYSFS
85577+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85578 static int count_inuse(struct page *page)
85579 {
85580 return page->inuse;
85581@@ -4226,12 +4291,12 @@ static void resiliency_test(void)
85582 validate_slab_cache(kmalloc_caches[9]);
85583 }
85584 #else
85585-#ifdef CONFIG_SYSFS
85586+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85587 static void resiliency_test(void) {};
85588 #endif
85589 #endif
85590
85591-#ifdef CONFIG_SYSFS
85592+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85593 enum slab_stat_type {
85594 SL_ALL, /* All slabs */
85595 SL_PARTIAL, /* Only partially allocated slabs */
85596@@ -4475,7 +4540,7 @@ SLAB_ATTR_RO(ctor);
85597
85598 static ssize_t aliases_show(struct kmem_cache *s, char *buf)
85599 {
85600- return sprintf(buf, "%d\n", s->refcount - 1);
85601+ return sprintf(buf, "%d\n", atomic_read(&s->refcount) - 1);
85602 }
85603 SLAB_ATTR_RO(aliases);
85604
85605@@ -4563,6 +4628,14 @@ static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
85606 SLAB_ATTR_RO(cache_dma);
85607 #endif
85608
85609+#ifdef CONFIG_PAX_USERCOPY_SLABS
85610+static ssize_t usercopy_show(struct kmem_cache *s, char *buf)
85611+{
85612+ return sprintf(buf, "%d\n", !!(s->flags & SLAB_USERCOPY));
85613+}
85614+SLAB_ATTR_RO(usercopy);
85615+#endif
85616+
85617 static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
85618 {
85619 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
85620@@ -4897,6 +4970,9 @@ static struct attribute *slab_attrs[] = {
85621 #ifdef CONFIG_ZONE_DMA
85622 &cache_dma_attr.attr,
85623 #endif
85624+#ifdef CONFIG_PAX_USERCOPY_SLABS
85625+ &usercopy_attr.attr,
85626+#endif
85627 #ifdef CONFIG_NUMA
85628 &remote_node_defrag_ratio_attr.attr,
85629 #endif
85630@@ -5128,6 +5204,7 @@ static char *create_unique_id(struct kmem_cache *s)
85631 return name;
85632 }
85633
85634+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85635 static int sysfs_slab_add(struct kmem_cache *s)
85636 {
85637 int err;
85638@@ -5151,7 +5228,7 @@ static int sysfs_slab_add(struct kmem_cache *s)
85639 }
85640
85641 s->kobj.kset = slab_kset;
85642- err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
85643+ err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
85644 if (err) {
85645 kobject_put(&s->kobj);
85646 return err;
85647@@ -5185,6 +5262,7 @@ static void sysfs_slab_remove(struct kmem_cache *s)
85648 kobject_del(&s->kobj);
85649 kobject_put(&s->kobj);
85650 }
85651+#endif
85652
85653 /*
85654 * Need to buffer aliases during bootup until sysfs becomes
85655@@ -5198,6 +5276,7 @@ struct saved_alias {
85656
85657 static struct saved_alias *alias_list;
85658
85659+#if defined(CONFIG_SYSFS) && !defined(CONFIG_GRKERNSEC_PROC_ADD)
85660 static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85661 {
85662 struct saved_alias *al;
85663@@ -5220,6 +5299,7 @@ static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
85664 alias_list = al;
85665 return 0;
85666 }
85667+#endif
85668
85669 static int __init slab_sysfs_init(void)
85670 {
85671diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
85672index 27eeab3..7c3f7f2 100644
85673--- a/mm/sparse-vmemmap.c
85674+++ b/mm/sparse-vmemmap.c
85675@@ -130,7 +130,7 @@ pud_t * __meminit vmemmap_pud_populate(pgd_t *pgd, unsigned long addr, int node)
85676 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85677 if (!p)
85678 return NULL;
85679- pud_populate(&init_mm, pud, p);
85680+ pud_populate_kernel(&init_mm, pud, p);
85681 }
85682 return pud;
85683 }
85684@@ -142,7 +142,7 @@ pgd_t * __meminit vmemmap_pgd_populate(unsigned long addr, int node)
85685 void *p = vmemmap_alloc_block(PAGE_SIZE, node);
85686 if (!p)
85687 return NULL;
85688- pgd_populate(&init_mm, pgd, p);
85689+ pgd_populate_kernel(&init_mm, pgd, p);
85690 }
85691 return pgd;
85692 }
85693diff --git a/mm/sparse.c b/mm/sparse.c
85694index 1c91f0d3..485470a 100644
85695--- a/mm/sparse.c
85696+++ b/mm/sparse.c
85697@@ -761,7 +761,7 @@ static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
85698
85699 for (i = 0; i < PAGES_PER_SECTION; i++) {
85700 if (PageHWPoison(&memmap[i])) {
85701- atomic_long_sub(1, &num_poisoned_pages);
85702+ atomic_long_sub_unchecked(1, &num_poisoned_pages);
85703 ClearPageHWPoison(&memmap[i]);
85704 }
85705 }
85706diff --git a/mm/swap.c b/mm/swap.c
85707index dfd7d71..ccdf688 100644
85708--- a/mm/swap.c
85709+++ b/mm/swap.c
85710@@ -31,6 +31,7 @@
85711 #include <linux/memcontrol.h>
85712 #include <linux/gfp.h>
85713 #include <linux/uio.h>
85714+#include <linux/hugetlb.h>
85715
85716 #include "internal.h"
85717
85718@@ -73,6 +74,8 @@ static void __put_compound_page(struct page *page)
85719
85720 __page_cache_release(page);
85721 dtor = get_compound_page_dtor(page);
85722+ if (!PageHuge(page))
85723+ BUG_ON(dtor != free_compound_page);
85724 (*dtor)(page);
85725 }
85726
85727diff --git a/mm/swapfile.c b/mm/swapfile.c
85728index 746af55b..7ac94ae 100644
85729--- a/mm/swapfile.c
85730+++ b/mm/swapfile.c
85731@@ -66,7 +66,7 @@ static DEFINE_MUTEX(swapon_mutex);
85732
85733 static DECLARE_WAIT_QUEUE_HEAD(proc_poll_wait);
85734 /* Activity counter to indicate that a swapon or swapoff has occurred */
85735-static atomic_t proc_poll_event = ATOMIC_INIT(0);
85736+static atomic_unchecked_t proc_poll_event = ATOMIC_INIT(0);
85737
85738 static inline unsigned char swap_count(unsigned char ent)
85739 {
85740@@ -1684,7 +1684,7 @@ SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
85741 }
85742 filp_close(swap_file, NULL);
85743 err = 0;
85744- atomic_inc(&proc_poll_event);
85745+ atomic_inc_unchecked(&proc_poll_event);
85746 wake_up_interruptible(&proc_poll_wait);
85747
85748 out_dput:
85749@@ -1701,8 +1701,8 @@ static unsigned swaps_poll(struct file *file, poll_table *wait)
85750
85751 poll_wait(file, &proc_poll_wait, wait);
85752
85753- if (seq->poll_event != atomic_read(&proc_poll_event)) {
85754- seq->poll_event = atomic_read(&proc_poll_event);
85755+ if (seq->poll_event != atomic_read_unchecked(&proc_poll_event)) {
85756+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85757 return POLLIN | POLLRDNORM | POLLERR | POLLPRI;
85758 }
85759
85760@@ -1800,7 +1800,7 @@ static int swaps_open(struct inode *inode, struct file *file)
85761 return ret;
85762
85763 seq = file->private_data;
85764- seq->poll_event = atomic_read(&proc_poll_event);
85765+ seq->poll_event = atomic_read_unchecked(&proc_poll_event);
85766 return 0;
85767 }
85768
85769@@ -2143,7 +2143,7 @@ SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
85770 (frontswap_map) ? "FS" : "");
85771
85772 mutex_unlock(&swapon_mutex);
85773- atomic_inc(&proc_poll_event);
85774+ atomic_inc_unchecked(&proc_poll_event);
85775 wake_up_interruptible(&proc_poll_wait);
85776
85777 if (S_ISREG(inode->i_mode))
85778diff --git a/mm/util.c b/mm/util.c
85779index ab1424d..7c5bd5a 100644
85780--- a/mm/util.c
85781+++ b/mm/util.c
85782@@ -294,6 +294,12 @@ done:
85783 void arch_pick_mmap_layout(struct mm_struct *mm)
85784 {
85785 mm->mmap_base = TASK_UNMAPPED_BASE;
85786+
85787+#ifdef CONFIG_PAX_RANDMMAP
85788+ if (mm->pax_flags & MF_PAX_RANDMMAP)
85789+ mm->mmap_base += mm->delta_mmap;
85790+#endif
85791+
85792 mm->get_unmapped_area = arch_get_unmapped_area;
85793 mm->unmap_area = arch_unmap_area;
85794 }
85795diff --git a/mm/vmalloc.c b/mm/vmalloc.c
85796index d365724..6cae7c2 100644
85797--- a/mm/vmalloc.c
85798+++ b/mm/vmalloc.c
85799@@ -59,8 +59,19 @@ static void vunmap_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end)
85800
85801 pte = pte_offset_kernel(pmd, addr);
85802 do {
85803- pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85804- WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85805+
85806+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85807+ if ((unsigned long)MODULES_EXEC_VADDR <= addr && addr < (unsigned long)MODULES_EXEC_END) {
85808+ BUG_ON(!pte_exec(*pte));
85809+ set_pte_at(&init_mm, addr, pte, pfn_pte(__pa(addr) >> PAGE_SHIFT, PAGE_KERNEL_EXEC));
85810+ continue;
85811+ }
85812+#endif
85813+
85814+ {
85815+ pte_t ptent = ptep_get_and_clear(&init_mm, addr, pte);
85816+ WARN_ON(!pte_none(ptent) && !pte_present(ptent));
85817+ }
85818 } while (pte++, addr += PAGE_SIZE, addr != end);
85819 }
85820
85821@@ -120,16 +131,29 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
85822 pte = pte_alloc_kernel(pmd, addr);
85823 if (!pte)
85824 return -ENOMEM;
85825+
85826+ pax_open_kernel();
85827 do {
85828 struct page *page = pages[*nr];
85829
85830- if (WARN_ON(!pte_none(*pte)))
85831+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85832+ if (pgprot_val(prot) & _PAGE_NX)
85833+#endif
85834+
85835+ if (!pte_none(*pte)) {
85836+ pax_close_kernel();
85837+ WARN_ON(1);
85838 return -EBUSY;
85839- if (WARN_ON(!page))
85840+ }
85841+ if (!page) {
85842+ pax_close_kernel();
85843+ WARN_ON(1);
85844 return -ENOMEM;
85845+ }
85846 set_pte_at(&init_mm, addr, pte, mk_pte(page, prot));
85847 (*nr)++;
85848 } while (pte++, addr += PAGE_SIZE, addr != end);
85849+ pax_close_kernel();
85850 return 0;
85851 }
85852
85853@@ -139,7 +163,7 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
85854 pmd_t *pmd;
85855 unsigned long next;
85856
85857- pmd = pmd_alloc(&init_mm, pud, addr);
85858+ pmd = pmd_alloc_kernel(&init_mm, pud, addr);
85859 if (!pmd)
85860 return -ENOMEM;
85861 do {
85862@@ -156,7 +180,7 @@ static int vmap_pud_range(pgd_t *pgd, unsigned long addr,
85863 pud_t *pud;
85864 unsigned long next;
85865
85866- pud = pud_alloc(&init_mm, pgd, addr);
85867+ pud = pud_alloc_kernel(&init_mm, pgd, addr);
85868 if (!pud)
85869 return -ENOMEM;
85870 do {
85871@@ -216,6 +240,12 @@ int is_vmalloc_or_module_addr(const void *x)
85872 if (addr >= MODULES_VADDR && addr < MODULES_END)
85873 return 1;
85874 #endif
85875+
85876+#if defined(CONFIG_X86_32) && defined(CONFIG_PAX_KERNEXEC)
85877+ if (x >= (const void *)MODULES_EXEC_VADDR && x < (const void *)MODULES_EXEC_END)
85878+ return 1;
85879+#endif
85880+
85881 return is_vmalloc_addr(x);
85882 }
85883
85884@@ -236,8 +266,14 @@ struct page *vmalloc_to_page(const void *vmalloc_addr)
85885
85886 if (!pgd_none(*pgd)) {
85887 pud_t *pud = pud_offset(pgd, addr);
85888+#ifdef CONFIG_X86
85889+ if (!pud_large(*pud))
85890+#endif
85891 if (!pud_none(*pud)) {
85892 pmd_t *pmd = pmd_offset(pud, addr);
85893+#ifdef CONFIG_X86
85894+ if (!pmd_large(*pmd))
85895+#endif
85896 if (!pmd_none(*pmd)) {
85897 pte_t *ptep, pte;
85898
85899@@ -339,7 +375,7 @@ static void purge_vmap_area_lazy(void);
85900 * Allocate a region of KVA of the specified size and alignment, within the
85901 * vstart and vend.
85902 */
85903-static struct vmap_area *alloc_vmap_area(unsigned long size,
85904+static __size_overflow(1) struct vmap_area *alloc_vmap_area(unsigned long size,
85905 unsigned long align,
85906 unsigned long vstart, unsigned long vend,
85907 int node, gfp_t gfp_mask)
85908@@ -1337,6 +1373,16 @@ static struct vm_struct *__get_vm_area_node(unsigned long size,
85909 struct vm_struct *area;
85910
85911 BUG_ON(in_interrupt());
85912+
85913+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85914+ if (flags & VM_KERNEXEC) {
85915+ if (start != VMALLOC_START || end != VMALLOC_END)
85916+ return NULL;
85917+ start = (unsigned long)MODULES_EXEC_VADDR;
85918+ end = (unsigned long)MODULES_EXEC_END;
85919+ }
85920+#endif
85921+
85922 if (flags & VM_IOREMAP) {
85923 int bit = fls(size);
85924
85925@@ -1581,6 +1627,11 @@ void *vmap(struct page **pages, unsigned int count,
85926 if (count > totalram_pages)
85927 return NULL;
85928
85929+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85930+ if (!(pgprot_val(prot) & _PAGE_NX))
85931+ flags |= VM_KERNEXEC;
85932+#endif
85933+
85934 area = get_vm_area_caller((count << PAGE_SHIFT), flags,
85935 __builtin_return_address(0));
85936 if (!area)
85937@@ -1682,6 +1733,13 @@ void *__vmalloc_node_range(unsigned long size, unsigned long align,
85938 if (!size || (size >> PAGE_SHIFT) > totalram_pages)
85939 goto fail;
85940
85941+#if defined(CONFIG_X86) && defined(CONFIG_PAX_KERNEXEC)
85942+ if (!(pgprot_val(prot) & _PAGE_NX))
85943+ area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST | VM_KERNEXEC,
85944+ VMALLOC_START, VMALLOC_END, node, gfp_mask, caller);
85945+ else
85946+#endif
85947+
85948 area = __get_vm_area_node(size, align, VM_ALLOC | VM_UNLIST,
85949 start, end, node, gfp_mask, caller);
85950 if (!area)
85951@@ -1858,10 +1916,9 @@ EXPORT_SYMBOL(vzalloc_node);
85952 * For tight control over page level allocator and protection flags
85953 * use __vmalloc() instead.
85954 */
85955-
85956 void *vmalloc_exec(unsigned long size)
85957 {
85958- return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL_EXEC,
85959+ return __vmalloc_node(size, 1, GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL_EXEC,
85960 NUMA_NO_NODE, __builtin_return_address(0));
85961 }
85962
85963@@ -2168,6 +2225,8 @@ int remap_vmalloc_range(struct vm_area_struct *vma, void *addr,
85964 unsigned long uaddr = vma->vm_start;
85965 unsigned long usize = vma->vm_end - vma->vm_start;
85966
85967+ BUG_ON(vma->vm_mirror);
85968+
85969 if ((PAGE_SIZE-1) & (unsigned long)addr)
85970 return -EINVAL;
85971
85972@@ -2629,7 +2688,11 @@ static int s_show(struct seq_file *m, void *p)
85973 v->addr, v->addr + v->size, v->size);
85974
85975 if (v->caller)
85976+#ifdef CONFIG_GRKERNSEC_HIDESYM
85977+ seq_printf(m, " %pK", v->caller);
85978+#else
85979 seq_printf(m, " %pS", v->caller);
85980+#endif
85981
85982 if (v->nr_pages)
85983 seq_printf(m, " pages=%d", v->nr_pages);
85984diff --git a/mm/vmstat.c b/mm/vmstat.c
85985index f42745e..62f8346 100644
85986--- a/mm/vmstat.c
85987+++ b/mm/vmstat.c
85988@@ -76,7 +76,7 @@ void vm_events_fold_cpu(int cpu)
85989 *
85990 * vm_stat contains the global counters
85991 */
85992-atomic_long_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85993+atomic_long_unchecked_t vm_stat[NR_VM_ZONE_STAT_ITEMS] __cacheline_aligned_in_smp;
85994 EXPORT_SYMBOL(vm_stat);
85995
85996 #ifdef CONFIG_SMP
85997@@ -452,7 +452,7 @@ void refresh_cpu_vm_stats(int cpu)
85998 v = p->vm_stat_diff[i];
85999 p->vm_stat_diff[i] = 0;
86000 local_irq_restore(flags);
86001- atomic_long_add(v, &zone->vm_stat[i]);
86002+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
86003 global_diff[i] += v;
86004 #ifdef CONFIG_NUMA
86005 /* 3 seconds idle till flush */
86006@@ -490,7 +490,7 @@ void refresh_cpu_vm_stats(int cpu)
86007
86008 for (i = 0; i < NR_VM_ZONE_STAT_ITEMS; i++)
86009 if (global_diff[i])
86010- atomic_long_add(global_diff[i], &vm_stat[i]);
86011+ atomic_long_add_unchecked(global_diff[i], &vm_stat[i]);
86012 }
86013
86014 /*
86015@@ -505,8 +505,8 @@ void drain_zonestat(struct zone *zone, struct per_cpu_pageset *pset)
86016 if (pset->vm_stat_diff[i]) {
86017 int v = pset->vm_stat_diff[i];
86018 pset->vm_stat_diff[i] = 0;
86019- atomic_long_add(v, &zone->vm_stat[i]);
86020- atomic_long_add(v, &vm_stat[i]);
86021+ atomic_long_add_unchecked(v, &zone->vm_stat[i]);
86022+ atomic_long_add_unchecked(v, &vm_stat[i]);
86023 }
86024 }
86025 #endif
86026@@ -1226,7 +1226,7 @@ static int __cpuinit vmstat_cpuup_callback(struct notifier_block *nfb,
86027 return NOTIFY_OK;
86028 }
86029
86030-static struct notifier_block __cpuinitdata vmstat_notifier =
86031+static struct notifier_block vmstat_notifier =
86032 { &vmstat_cpuup_callback, NULL, 0 };
86033 #endif
86034
86035@@ -1241,10 +1241,20 @@ static int __init setup_vmstat(void)
86036 start_cpu_timer(cpu);
86037 #endif
86038 #ifdef CONFIG_PROC_FS
86039- proc_create("buddyinfo", S_IRUGO, NULL, &fragmentation_file_operations);
86040- proc_create("pagetypeinfo", S_IRUGO, NULL, &pagetypeinfo_file_ops);
86041- proc_create("vmstat", S_IRUGO, NULL, &proc_vmstat_file_operations);
86042- proc_create("zoneinfo", S_IRUGO, NULL, &proc_zoneinfo_file_operations);
86043+ {
86044+ mode_t gr_mode = S_IRUGO;
86045+#ifdef CONFIG_GRKERNSEC_PROC_ADD
86046+ gr_mode = S_IRUSR;
86047+#endif
86048+ proc_create("buddyinfo", gr_mode, NULL, &fragmentation_file_operations);
86049+ proc_create("pagetypeinfo", gr_mode, NULL, &pagetypeinfo_file_ops);
86050+#ifdef CONFIG_GRKERNSEC_PROC_USERGROUP
86051+ proc_create("vmstat", gr_mode | S_IRGRP, NULL, &proc_vmstat_file_operations);
86052+#else
86053+ proc_create("vmstat", gr_mode, NULL, &proc_vmstat_file_operations);
86054+#endif
86055+ proc_create("zoneinfo", gr_mode, NULL, &proc_zoneinfo_file_operations);
86056+ }
86057 #endif
86058 return 0;
86059 }
86060diff --git a/net/8021q/vlan.c b/net/8021q/vlan.c
86061index 9424f37..6aabf19 100644
86062--- a/net/8021q/vlan.c
86063+++ b/net/8021q/vlan.c
86064@@ -469,7 +469,7 @@ out:
86065 return NOTIFY_DONE;
86066 }
86067
86068-static struct notifier_block vlan_notifier_block __read_mostly = {
86069+static struct notifier_block vlan_notifier_block = {
86070 .notifier_call = vlan_device_event,
86071 };
86072
86073@@ -544,8 +544,7 @@ static int vlan_ioctl_handler(struct net *net, void __user *arg)
86074 err = -EPERM;
86075 if (!ns_capable(net->user_ns, CAP_NET_ADMIN))
86076 break;
86077- if ((args.u.name_type >= 0) &&
86078- (args.u.name_type < VLAN_NAME_TYPE_HIGHEST)) {
86079+ if (args.u.name_type < VLAN_NAME_TYPE_HIGHEST) {
86080 struct vlan_net *vn;
86081
86082 vn = net_generic(net, vlan_net_id);
86083diff --git a/net/9p/mod.c b/net/9p/mod.c
86084index 6ab36ae..6f1841b 100644
86085--- a/net/9p/mod.c
86086+++ b/net/9p/mod.c
86087@@ -84,7 +84,7 @@ static LIST_HEAD(v9fs_trans_list);
86088 void v9fs_register_trans(struct p9_trans_module *m)
86089 {
86090 spin_lock(&v9fs_trans_lock);
86091- list_add_tail(&m->list, &v9fs_trans_list);
86092+ pax_list_add_tail((struct list_head *)&m->list, &v9fs_trans_list);
86093 spin_unlock(&v9fs_trans_lock);
86094 }
86095 EXPORT_SYMBOL(v9fs_register_trans);
86096@@ -97,7 +97,7 @@ EXPORT_SYMBOL(v9fs_register_trans);
86097 void v9fs_unregister_trans(struct p9_trans_module *m)
86098 {
86099 spin_lock(&v9fs_trans_lock);
86100- list_del_init(&m->list);
86101+ pax_list_del_init((struct list_head *)&m->list);
86102 spin_unlock(&v9fs_trans_lock);
86103 }
86104 EXPORT_SYMBOL(v9fs_unregister_trans);
86105diff --git a/net/9p/trans_fd.c b/net/9p/trans_fd.c
86106index 02efb25..41541a9 100644
86107--- a/net/9p/trans_fd.c
86108+++ b/net/9p/trans_fd.c
86109@@ -425,7 +425,7 @@ static int p9_fd_write(struct p9_client *client, void *v, int len)
86110 oldfs = get_fs();
86111 set_fs(get_ds());
86112 /* The cast to a user pointer is valid due to the set_fs() */
86113- ret = vfs_write(ts->wr, (__force void __user *)v, len, &ts->wr->f_pos);
86114+ ret = vfs_write(ts->wr, (void __force_user *)v, len, &ts->wr->f_pos);
86115 set_fs(oldfs);
86116
86117 if (ret <= 0 && ret != -ERESTARTSYS && ret != -EAGAIN)
86118diff --git a/net/atm/atm_misc.c b/net/atm/atm_misc.c
86119index 876fbe8..8bbea9f 100644
86120--- a/net/atm/atm_misc.c
86121+++ b/net/atm/atm_misc.c
86122@@ -17,7 +17,7 @@ int atm_charge(struct atm_vcc *vcc, int truesize)
86123 if (atomic_read(&sk_atm(vcc)->sk_rmem_alloc) <= sk_atm(vcc)->sk_rcvbuf)
86124 return 1;
86125 atm_return(vcc, truesize);
86126- atomic_inc(&vcc->stats->rx_drop);
86127+ atomic_inc_unchecked(&vcc->stats->rx_drop);
86128 return 0;
86129 }
86130 EXPORT_SYMBOL(atm_charge);
86131@@ -39,7 +39,7 @@ struct sk_buff *atm_alloc_charge(struct atm_vcc *vcc, int pdu_size,
86132 }
86133 }
86134 atm_return(vcc, guess);
86135- atomic_inc(&vcc->stats->rx_drop);
86136+ atomic_inc_unchecked(&vcc->stats->rx_drop);
86137 return NULL;
86138 }
86139 EXPORT_SYMBOL(atm_alloc_charge);
86140@@ -86,7 +86,7 @@ EXPORT_SYMBOL(atm_pcr_goal);
86141
86142 void sonet_copy_stats(struct k_sonet_stats *from, struct sonet_stats *to)
86143 {
86144-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
86145+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
86146 __SONET_ITEMS
86147 #undef __HANDLE_ITEM
86148 }
86149@@ -94,7 +94,7 @@ EXPORT_SYMBOL(sonet_copy_stats);
86150
86151 void sonet_subtract_stats(struct k_sonet_stats *from, struct sonet_stats *to)
86152 {
86153-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
86154+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i,&from->i)
86155 __SONET_ITEMS
86156 #undef __HANDLE_ITEM
86157 }
86158diff --git a/net/atm/lec.h b/net/atm/lec.h
86159index 4149db1..f2ab682 100644
86160--- a/net/atm/lec.h
86161+++ b/net/atm/lec.h
86162@@ -48,7 +48,7 @@ struct lane2_ops {
86163 const u8 *tlvs, u32 sizeoftlvs);
86164 void (*associate_indicator) (struct net_device *dev, const u8 *mac_addr,
86165 const u8 *tlvs, u32 sizeoftlvs);
86166-};
86167+} __no_const;
86168
86169 /*
86170 * ATM LAN Emulation supports both LLC & Dix Ethernet EtherType
86171diff --git a/net/atm/proc.c b/net/atm/proc.c
86172index bbb6461..cf04016 100644
86173--- a/net/atm/proc.c
86174+++ b/net/atm/proc.c
86175@@ -45,9 +45,9 @@ static void add_stats(struct seq_file *seq, const char *aal,
86176 const struct k_atm_aal_stats *stats)
86177 {
86178 seq_printf(seq, "%s ( %d %d %d %d %d )", aal,
86179- atomic_read(&stats->tx), atomic_read(&stats->tx_err),
86180- atomic_read(&stats->rx), atomic_read(&stats->rx_err),
86181- atomic_read(&stats->rx_drop));
86182+ atomic_read_unchecked(&stats->tx),atomic_read_unchecked(&stats->tx_err),
86183+ atomic_read_unchecked(&stats->rx),atomic_read_unchecked(&stats->rx_err),
86184+ atomic_read_unchecked(&stats->rx_drop));
86185 }
86186
86187 static void atm_dev_info(struct seq_file *seq, const struct atm_dev *dev)
86188diff --git a/net/atm/resources.c b/net/atm/resources.c
86189index 0447d5d..3cf4728 100644
86190--- a/net/atm/resources.c
86191+++ b/net/atm/resources.c
86192@@ -160,7 +160,7 @@ EXPORT_SYMBOL(atm_dev_deregister);
86193 static void copy_aal_stats(struct k_atm_aal_stats *from,
86194 struct atm_aal_stats *to)
86195 {
86196-#define __HANDLE_ITEM(i) to->i = atomic_read(&from->i)
86197+#define __HANDLE_ITEM(i) to->i = atomic_read_unchecked(&from->i)
86198 __AAL_STAT_ITEMS
86199 #undef __HANDLE_ITEM
86200 }
86201@@ -168,7 +168,7 @@ static void copy_aal_stats(struct k_atm_aal_stats *from,
86202 static void subtract_aal_stats(struct k_atm_aal_stats *from,
86203 struct atm_aal_stats *to)
86204 {
86205-#define __HANDLE_ITEM(i) atomic_sub(to->i, &from->i)
86206+#define __HANDLE_ITEM(i) atomic_sub_unchecked(to->i, &from->i)
86207 __AAL_STAT_ITEMS
86208 #undef __HANDLE_ITEM
86209 }
86210diff --git a/net/ax25/sysctl_net_ax25.c b/net/ax25/sysctl_net_ax25.c
86211index d5744b7..506bae3 100644
86212--- a/net/ax25/sysctl_net_ax25.c
86213+++ b/net/ax25/sysctl_net_ax25.c
86214@@ -152,7 +152,7 @@ int ax25_register_dev_sysctl(ax25_dev *ax25_dev)
86215 {
86216 char path[sizeof("net/ax25/") + IFNAMSIZ];
86217 int k;
86218- struct ctl_table *table;
86219+ ctl_table_no_const *table;
86220
86221 table = kmemdup(ax25_param_table, sizeof(ax25_param_table), GFP_KERNEL);
86222 if (!table)
86223diff --git a/net/batman-adv/bat_iv_ogm.c b/net/batman-adv/bat_iv_ogm.c
86224index f680ee1..97e3542 100644
86225--- a/net/batman-adv/bat_iv_ogm.c
86226+++ b/net/batman-adv/bat_iv_ogm.c
86227@@ -79,7 +79,7 @@ static int batadv_iv_ogm_iface_enable(struct batadv_hard_iface *hard_iface)
86228
86229 /* randomize initial seqno to avoid collision */
86230 get_random_bytes(&random_seqno, sizeof(random_seqno));
86231- atomic_set(&hard_iface->bat_iv.ogm_seqno, random_seqno);
86232+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, random_seqno);
86233
86234 hard_iface->bat_iv.ogm_buff_len = BATADV_OGM_HLEN;
86235 ogm_buff = kmalloc(hard_iface->bat_iv.ogm_buff_len, GFP_ATOMIC);
86236@@ -627,9 +627,9 @@ static void batadv_iv_ogm_schedule(struct batadv_hard_iface *hard_iface)
86237 batadv_ogm_packet = (struct batadv_ogm_packet *)(*ogm_buff);
86238
86239 /* change sequence number to network order */
86240- seqno = (uint32_t)atomic_read(&hard_iface->bat_iv.ogm_seqno);
86241+ seqno = (uint32_t)atomic_read_unchecked(&hard_iface->bat_iv.ogm_seqno);
86242 batadv_ogm_packet->seqno = htonl(seqno);
86243- atomic_inc(&hard_iface->bat_iv.ogm_seqno);
86244+ atomic_inc_unchecked(&hard_iface->bat_iv.ogm_seqno);
86245
86246 batadv_ogm_packet->ttvn = atomic_read(&bat_priv->tt.vn);
86247 batadv_ogm_packet->tt_crc = htons(bat_priv->tt.local_crc);
86248@@ -1037,7 +1037,7 @@ static void batadv_iv_ogm_process(const struct ethhdr *ethhdr,
86249 return;
86250
86251 /* could be changed by schedule_own_packet() */
86252- if_incoming_seqno = atomic_read(&if_incoming->bat_iv.ogm_seqno);
86253+ if_incoming_seqno = atomic_read_unchecked(&if_incoming->bat_iv.ogm_seqno);
86254
86255 if (batadv_ogm_packet->flags & BATADV_DIRECTLINK)
86256 has_directlink_flag = 1;
86257diff --git a/net/batman-adv/hard-interface.c b/net/batman-adv/hard-interface.c
86258index 522243a..b48c0ef 100644
86259--- a/net/batman-adv/hard-interface.c
86260+++ b/net/batman-adv/hard-interface.c
86261@@ -401,7 +401,7 @@ int batadv_hardif_enable_interface(struct batadv_hard_iface *hard_iface,
86262 hard_iface->batman_adv_ptype.dev = hard_iface->net_dev;
86263 dev_add_pack(&hard_iface->batman_adv_ptype);
86264
86265- atomic_set(&hard_iface->frag_seqno, 1);
86266+ atomic_set_unchecked(&hard_iface->frag_seqno, 1);
86267 batadv_info(hard_iface->soft_iface, "Adding interface: %s\n",
86268 hard_iface->net_dev->name);
86269
86270@@ -550,7 +550,7 @@ batadv_hardif_add_interface(struct net_device *net_dev)
86271 /* This can't be called via a bat_priv callback because
86272 * we have no bat_priv yet.
86273 */
86274- atomic_set(&hard_iface->bat_iv.ogm_seqno, 1);
86275+ atomic_set_unchecked(&hard_iface->bat_iv.ogm_seqno, 1);
86276 hard_iface->bat_iv.ogm_buff = NULL;
86277
86278 return hard_iface;
86279diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c
86280index 819dfb0..9a672d1 100644
86281--- a/net/batman-adv/soft-interface.c
86282+++ b/net/batman-adv/soft-interface.c
86283@@ -253,7 +253,7 @@ static int batadv_interface_tx(struct sk_buff *skb,
86284 primary_if->net_dev->dev_addr, ETH_ALEN);
86285
86286 /* set broadcast sequence number */
86287- seqno = atomic_inc_return(&bat_priv->bcast_seqno);
86288+ seqno = atomic_inc_return_unchecked(&bat_priv->bcast_seqno);
86289 bcast_packet->seqno = htonl(seqno);
86290
86291 batadv_add_bcast_packet_to_list(bat_priv, skb, brd_delay);
86292@@ -472,7 +472,7 @@ static int batadv_softif_init_late(struct net_device *dev)
86293 atomic_set(&bat_priv->batman_queue_left, BATADV_BATMAN_QUEUE_LEN);
86294
86295 atomic_set(&bat_priv->mesh_state, BATADV_MESH_INACTIVE);
86296- atomic_set(&bat_priv->bcast_seqno, 1);
86297+ atomic_set_unchecked(&bat_priv->bcast_seqno, 1);
86298 atomic_set(&bat_priv->tt.vn, 0);
86299 atomic_set(&bat_priv->tt.local_changes, 0);
86300 atomic_set(&bat_priv->tt.ogm_append_cnt, 0);
86301diff --git a/net/batman-adv/types.h b/net/batman-adv/types.h
86302index aba8364..50fcbb8 100644
86303--- a/net/batman-adv/types.h
86304+++ b/net/batman-adv/types.h
86305@@ -51,7 +51,7 @@
86306 struct batadv_hard_iface_bat_iv {
86307 unsigned char *ogm_buff;
86308 int ogm_buff_len;
86309- atomic_t ogm_seqno;
86310+ atomic_unchecked_t ogm_seqno;
86311 };
86312
86313 /**
86314@@ -75,7 +75,7 @@ struct batadv_hard_iface {
86315 int16_t if_num;
86316 char if_status;
86317 struct net_device *net_dev;
86318- atomic_t frag_seqno;
86319+ atomic_unchecked_t frag_seqno;
86320 struct kobject *hardif_obj;
86321 atomic_t refcount;
86322 struct packet_type batman_adv_ptype;
86323@@ -558,7 +558,7 @@ struct batadv_priv {
86324 #ifdef CONFIG_BATMAN_ADV_DEBUG
86325 atomic_t log_level;
86326 #endif
86327- atomic_t bcast_seqno;
86328+ atomic_unchecked_t bcast_seqno;
86329 atomic_t bcast_queue_left;
86330 atomic_t batman_queue_left;
86331 char num_ifaces;
86332diff --git a/net/batman-adv/unicast.c b/net/batman-adv/unicast.c
86333index 0bb3b59..ffcbf2f 100644
86334--- a/net/batman-adv/unicast.c
86335+++ b/net/batman-adv/unicast.c
86336@@ -270,7 +270,7 @@ int batadv_frag_send_skb(struct sk_buff *skb, struct batadv_priv *bat_priv,
86337 frag1->flags = BATADV_UNI_FRAG_HEAD | large_tail;
86338 frag2->flags = large_tail;
86339
86340- seqno = atomic_add_return(2, &hard_iface->frag_seqno);
86341+ seqno = atomic_add_return_unchecked(2, &hard_iface->frag_seqno);
86342 frag1->seqno = htons(seqno - 1);
86343 frag2->seqno = htons(seqno);
86344
86345diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c
86346index ace5e55..a65a1c0 100644
86347--- a/net/bluetooth/hci_core.c
86348+++ b/net/bluetooth/hci_core.c
86349@@ -2211,16 +2211,16 @@ int hci_register_dev(struct hci_dev *hdev)
86350 list_add(&hdev->list, &hci_dev_list);
86351 write_unlock(&hci_dev_list_lock);
86352
86353- hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
86354- WQ_MEM_RECLAIM, 1);
86355+ hdev->workqueue = alloc_workqueue("%s", WQ_HIGHPRI | WQ_UNBOUND |
86356+ WQ_MEM_RECLAIM, 1, hdev->name);
86357 if (!hdev->workqueue) {
86358 error = -ENOMEM;
86359 goto err;
86360 }
86361
86362- hdev->req_workqueue = alloc_workqueue(hdev->name,
86363+ hdev->req_workqueue = alloc_workqueue("%s",
86364 WQ_HIGHPRI | WQ_UNBOUND |
86365- WQ_MEM_RECLAIM, 1);
86366+ WQ_MEM_RECLAIM, 1, hdev->name);
86367 if (!hdev->req_workqueue) {
86368 destroy_workqueue(hdev->workqueue);
86369 error = -ENOMEM;
86370diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c
86371index 9bd7d95..6c4884f 100644
86372--- a/net/bluetooth/hci_sock.c
86373+++ b/net/bluetooth/hci_sock.c
86374@@ -934,7 +934,7 @@ static int hci_sock_setsockopt(struct socket *sock, int level, int optname,
86375 uf.event_mask[1] = *((u32 *) f->event_mask + 1);
86376 }
86377
86378- len = min_t(unsigned int, len, sizeof(uf));
86379+ len = min((size_t)len, sizeof(uf));
86380 if (copy_from_user(&uf, optval, len)) {
86381 err = -EFAULT;
86382 break;
86383diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c
86384index 68843a2..30e9342 100644
86385--- a/net/bluetooth/l2cap_core.c
86386+++ b/net/bluetooth/l2cap_core.c
86387@@ -3507,8 +3507,10 @@ static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len,
86388 break;
86389
86390 case L2CAP_CONF_RFC:
86391- if (olen == sizeof(rfc))
86392- memcpy(&rfc, (void *)val, olen);
86393+ if (olen != sizeof(rfc))
86394+ break;
86395+
86396+ memcpy(&rfc, (void *)val, olen);
86397
86398 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
86399 rfc.mode != chan->mode)
86400diff --git a/net/bluetooth/l2cap_sock.c b/net/bluetooth/l2cap_sock.c
86401index 36fed40..be2eeb2 100644
86402--- a/net/bluetooth/l2cap_sock.c
86403+++ b/net/bluetooth/l2cap_sock.c
86404@@ -485,7 +485,8 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
86405 struct sock *sk = sock->sk;
86406 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
86407 struct l2cap_options opts;
86408- int len, err = 0;
86409+ int err = 0;
86410+ size_t len = optlen;
86411 u32 opt;
86412
86413 BT_DBG("sk %p", sk);
86414@@ -507,7 +508,7 @@ static int l2cap_sock_setsockopt_old(struct socket *sock, int optname,
86415 opts.max_tx = chan->max_tx;
86416 opts.txwin_size = chan->tx_win;
86417
86418- len = min_t(unsigned int, sizeof(opts), optlen);
86419+ len = min(sizeof(opts), len);
86420 if (copy_from_user((char *) &opts, optval, len)) {
86421 err = -EFAULT;
86422 break;
86423@@ -587,7 +588,8 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86424 struct bt_security sec;
86425 struct bt_power pwr;
86426 struct l2cap_conn *conn;
86427- int len, err = 0;
86428+ int err = 0;
86429+ size_t len = optlen;
86430 u32 opt;
86431
86432 BT_DBG("sk %p", sk);
86433@@ -610,7 +612,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86434
86435 sec.level = BT_SECURITY_LOW;
86436
86437- len = min_t(unsigned int, sizeof(sec), optlen);
86438+ len = min(sizeof(sec), len);
86439 if (copy_from_user((char *) &sec, optval, len)) {
86440 err = -EFAULT;
86441 break;
86442@@ -707,7 +709,7 @@ static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname,
86443
86444 pwr.force_active = BT_POWER_FORCE_ACTIVE_ON;
86445
86446- len = min_t(unsigned int, sizeof(pwr), optlen);
86447+ len = min(sizeof(pwr), len);
86448 if (copy_from_user((char *) &pwr, optval, len)) {
86449 err = -EFAULT;
86450 break;
86451diff --git a/net/bluetooth/rfcomm/sock.c b/net/bluetooth/rfcomm/sock.c
86452index 30b3721..c1bd0a0 100644
86453--- a/net/bluetooth/rfcomm/sock.c
86454+++ b/net/bluetooth/rfcomm/sock.c
86455@@ -666,7 +666,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
86456 struct sock *sk = sock->sk;
86457 struct bt_security sec;
86458 int err = 0;
86459- size_t len;
86460+ size_t len = optlen;
86461 u32 opt;
86462
86463 BT_DBG("sk %p", sk);
86464@@ -688,7 +688,7 @@ static int rfcomm_sock_setsockopt(struct socket *sock, int level, int optname, c
86465
86466 sec.level = BT_SECURITY_LOW;
86467
86468- len = min_t(unsigned int, sizeof(sec), optlen);
86469+ len = min(sizeof(sec), len);
86470 if (copy_from_user((char *) &sec, optval, len)) {
86471 err = -EFAULT;
86472 break;
86473diff --git a/net/bluetooth/rfcomm/tty.c b/net/bluetooth/rfcomm/tty.c
86474index b6e44ad..5b0d514 100644
86475--- a/net/bluetooth/rfcomm/tty.c
86476+++ b/net/bluetooth/rfcomm/tty.c
86477@@ -309,7 +309,7 @@ static void rfcomm_dev_del(struct rfcomm_dev *dev)
86478 BUG_ON(test_and_set_bit(RFCOMM_TTY_RELEASED, &dev->flags));
86479
86480 spin_lock_irqsave(&dev->port.lock, flags);
86481- if (dev->port.count > 0) {
86482+ if (atomic_read(&dev->port.count) > 0) {
86483 spin_unlock_irqrestore(&dev->port.lock, flags);
86484 return;
86485 }
86486@@ -659,10 +659,10 @@ static int rfcomm_tty_open(struct tty_struct *tty, struct file *filp)
86487 return -ENODEV;
86488
86489 BT_DBG("dev %p dst %pMR channel %d opened %d", dev, &dev->dst,
86490- dev->channel, dev->port.count);
86491+ dev->channel, atomic_read(&dev->port.count));
86492
86493 spin_lock_irqsave(&dev->port.lock, flags);
86494- if (++dev->port.count > 1) {
86495+ if (atomic_inc_return(&dev->port.count) > 1) {
86496 spin_unlock_irqrestore(&dev->port.lock, flags);
86497 return 0;
86498 }
86499@@ -727,10 +727,10 @@ static void rfcomm_tty_close(struct tty_struct *tty, struct file *filp)
86500 return;
86501
86502 BT_DBG("tty %p dev %p dlc %p opened %d", tty, dev, dev->dlc,
86503- dev->port.count);
86504+ atomic_read(&dev->port.count));
86505
86506 spin_lock_irqsave(&dev->port.lock, flags);
86507- if (!--dev->port.count) {
86508+ if (!atomic_dec_return(&dev->port.count)) {
86509 spin_unlock_irqrestore(&dev->port.lock, flags);
86510 if (dev->tty_dev->parent)
86511 device_move(dev->tty_dev, NULL, DPM_ORDER_DEV_LAST);
86512diff --git a/net/bridge/netfilter/ebtables.c b/net/bridge/netfilter/ebtables.c
86513index 3d110c4..4e1b2eb 100644
86514--- a/net/bridge/netfilter/ebtables.c
86515+++ b/net/bridge/netfilter/ebtables.c
86516@@ -1525,7 +1525,7 @@ static int do_ebt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
86517 tmp.valid_hooks = t->table->valid_hooks;
86518 }
86519 mutex_unlock(&ebt_mutex);
86520- if (copy_to_user(user, &tmp, *len) != 0){
86521+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0){
86522 BUGPRINT("c2u Didn't work\n");
86523 ret = -EFAULT;
86524 break;
86525@@ -2331,7 +2331,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86526 goto out;
86527 tmp.valid_hooks = t->valid_hooks;
86528
86529- if (copy_to_user(user, &tmp, *len) != 0) {
86530+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86531 ret = -EFAULT;
86532 break;
86533 }
86534@@ -2342,7 +2342,7 @@ static int compat_do_ebt_get_ctl(struct sock *sk, int cmd,
86535 tmp.entries_size = t->table->entries_size;
86536 tmp.valid_hooks = t->table->valid_hooks;
86537
86538- if (copy_to_user(user, &tmp, *len) != 0) {
86539+ if (*len > sizeof(tmp) || copy_to_user(user, &tmp, *len) != 0) {
86540 ret = -EFAULT;
86541 break;
86542 }
86543diff --git a/net/caif/cfctrl.c b/net/caif/cfctrl.c
86544index 2bd4b58..0dc30a1 100644
86545--- a/net/caif/cfctrl.c
86546+++ b/net/caif/cfctrl.c
86547@@ -10,6 +10,7 @@
86548 #include <linux/spinlock.h>
86549 #include <linux/slab.h>
86550 #include <linux/pkt_sched.h>
86551+#include <linux/sched.h>
86552 #include <net/caif/caif_layer.h>
86553 #include <net/caif/cfpkt.h>
86554 #include <net/caif/cfctrl.h>
86555@@ -43,8 +44,8 @@ struct cflayer *cfctrl_create(void)
86556 memset(&dev_info, 0, sizeof(dev_info));
86557 dev_info.id = 0xff;
86558 cfsrvl_init(&this->serv, 0, &dev_info, false);
86559- atomic_set(&this->req_seq_no, 1);
86560- atomic_set(&this->rsp_seq_no, 1);
86561+ atomic_set_unchecked(&this->req_seq_no, 1);
86562+ atomic_set_unchecked(&this->rsp_seq_no, 1);
86563 this->serv.layer.receive = cfctrl_recv;
86564 sprintf(this->serv.layer.name, "ctrl");
86565 this->serv.layer.ctrlcmd = cfctrl_ctrlcmd;
86566@@ -130,8 +131,8 @@ static void cfctrl_insert_req(struct cfctrl *ctrl,
86567 struct cfctrl_request_info *req)
86568 {
86569 spin_lock_bh(&ctrl->info_list_lock);
86570- atomic_inc(&ctrl->req_seq_no);
86571- req->sequence_no = atomic_read(&ctrl->req_seq_no);
86572+ atomic_inc_unchecked(&ctrl->req_seq_no);
86573+ req->sequence_no = atomic_read_unchecked(&ctrl->req_seq_no);
86574 list_add_tail(&req->list, &ctrl->list);
86575 spin_unlock_bh(&ctrl->info_list_lock);
86576 }
86577@@ -149,7 +150,7 @@ static struct cfctrl_request_info *cfctrl_remove_req(struct cfctrl *ctrl,
86578 if (p != first)
86579 pr_warn("Requests are not received in order\n");
86580
86581- atomic_set(&ctrl->rsp_seq_no,
86582+ atomic_set_unchecked(&ctrl->rsp_seq_no,
86583 p->sequence_no);
86584 list_del(&p->list);
86585 goto out;
86586diff --git a/net/can/af_can.c b/net/can/af_can.c
86587index c4e5085..aa9efdf 100644
86588--- a/net/can/af_can.c
86589+++ b/net/can/af_can.c
86590@@ -862,7 +862,7 @@ static const struct net_proto_family can_family_ops = {
86591 };
86592
86593 /* notifier block for netdevice event */
86594-static struct notifier_block can_netdev_notifier __read_mostly = {
86595+static struct notifier_block can_netdev_notifier = {
86596 .notifier_call = can_notifier,
86597 };
86598
86599diff --git a/net/can/gw.c b/net/can/gw.c
86600index 3ee690e..00d581b 100644
86601--- a/net/can/gw.c
86602+++ b/net/can/gw.c
86603@@ -80,7 +80,6 @@ MODULE_PARM_DESC(max_hops,
86604 "default: " __stringify(CGW_DEFAULT_HOPS) ")");
86605
86606 static HLIST_HEAD(cgw_list);
86607-static struct notifier_block notifier;
86608
86609 static struct kmem_cache *cgw_cache __read_mostly;
86610
86611@@ -927,6 +926,10 @@ static int cgw_remove_job(struct sk_buff *skb, struct nlmsghdr *nlh)
86612 return err;
86613 }
86614
86615+static struct notifier_block notifier = {
86616+ .notifier_call = cgw_notifier
86617+};
86618+
86619 static __init int cgw_module_init(void)
86620 {
86621 /* sanitize given module parameter */
86622@@ -942,7 +945,6 @@ static __init int cgw_module_init(void)
86623 return -ENOMEM;
86624
86625 /* set notifier */
86626- notifier.notifier_call = cgw_notifier;
86627 register_netdevice_notifier(&notifier);
86628
86629 if (__rtnl_register(PF_CAN, RTM_GETROUTE, NULL, cgw_dump_jobs, NULL)) {
86630diff --git a/net/compat.c b/net/compat.c
86631index f0a1ba6..0541331 100644
86632--- a/net/compat.c
86633+++ b/net/compat.c
86634@@ -71,9 +71,9 @@ int get_compat_msghdr(struct msghdr *kmsg, struct compat_msghdr __user *umsg)
86635 __get_user(kmsg->msg_controllen, &umsg->msg_controllen) ||
86636 __get_user(kmsg->msg_flags, &umsg->msg_flags))
86637 return -EFAULT;
86638- kmsg->msg_name = compat_ptr(tmp1);
86639- kmsg->msg_iov = compat_ptr(tmp2);
86640- kmsg->msg_control = compat_ptr(tmp3);
86641+ kmsg->msg_name = (void __force_kernel *)compat_ptr(tmp1);
86642+ kmsg->msg_iov = (void __force_kernel *)compat_ptr(tmp2);
86643+ kmsg->msg_control = (void __force_kernel *)compat_ptr(tmp3);
86644 return 0;
86645 }
86646
86647@@ -85,7 +85,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86648
86649 if (kern_msg->msg_namelen) {
86650 if (mode == VERIFY_READ) {
86651- int err = move_addr_to_kernel(kern_msg->msg_name,
86652+ int err = move_addr_to_kernel((void __force_user *)kern_msg->msg_name,
86653 kern_msg->msg_namelen,
86654 kern_address);
86655 if (err < 0)
86656@@ -96,7 +96,7 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86657 kern_msg->msg_name = NULL;
86658
86659 tot_len = iov_from_user_compat_to_kern(kern_iov,
86660- (struct compat_iovec __user *)kern_msg->msg_iov,
86661+ (struct compat_iovec __force_user *)kern_msg->msg_iov,
86662 kern_msg->msg_iovlen);
86663 if (tot_len >= 0)
86664 kern_msg->msg_iov = kern_iov;
86665@@ -116,20 +116,20 @@ int verify_compat_iovec(struct msghdr *kern_msg, struct iovec *kern_iov,
86666
86667 #define CMSG_COMPAT_FIRSTHDR(msg) \
86668 (((msg)->msg_controllen) >= sizeof(struct compat_cmsghdr) ? \
86669- (struct compat_cmsghdr __user *)((msg)->msg_control) : \
86670+ (struct compat_cmsghdr __force_user *)((msg)->msg_control) : \
86671 (struct compat_cmsghdr __user *)NULL)
86672
86673 #define CMSG_COMPAT_OK(ucmlen, ucmsg, mhdr) \
86674 ((ucmlen) >= sizeof(struct compat_cmsghdr) && \
86675 (ucmlen) <= (unsigned long) \
86676 ((mhdr)->msg_controllen - \
86677- ((char *)(ucmsg) - (char *)(mhdr)->msg_control)))
86678+ ((char __force_kernel *)(ucmsg) - (char *)(mhdr)->msg_control)))
86679
86680 static inline struct compat_cmsghdr __user *cmsg_compat_nxthdr(struct msghdr *msg,
86681 struct compat_cmsghdr __user *cmsg, int cmsg_len)
86682 {
86683 char __user *ptr = (char __user *)cmsg + CMSG_COMPAT_ALIGN(cmsg_len);
86684- if ((unsigned long)(ptr + 1 - (char __user *)msg->msg_control) >
86685+ if ((unsigned long)(ptr + 1 - (char __force_user *)msg->msg_control) >
86686 msg->msg_controllen)
86687 return NULL;
86688 return (struct compat_cmsghdr __user *)ptr;
86689@@ -219,7 +219,7 @@ Efault:
86690
86691 int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *data)
86692 {
86693- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86694+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86695 struct compat_cmsghdr cmhdr;
86696 struct compat_timeval ctv;
86697 struct compat_timespec cts[3];
86698@@ -275,7 +275,7 @@ int put_cmsg_compat(struct msghdr *kmsg, int level, int type, int len, void *dat
86699
86700 void scm_detach_fds_compat(struct msghdr *kmsg, struct scm_cookie *scm)
86701 {
86702- struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __user *) kmsg->msg_control;
86703+ struct compat_cmsghdr __user *cm = (struct compat_cmsghdr __force_user *) kmsg->msg_control;
86704 int fdmax = (kmsg->msg_controllen - sizeof(struct compat_cmsghdr)) / sizeof(int);
86705 int fdnum = scm->fp->count;
86706 struct file **fp = scm->fp->fp;
86707@@ -363,7 +363,7 @@ static int do_set_sock_timeout(struct socket *sock, int level,
86708 return -EFAULT;
86709 old_fs = get_fs();
86710 set_fs(KERNEL_DS);
86711- err = sock_setsockopt(sock, level, optname, (char *)&ktime, sizeof(ktime));
86712+ err = sock_setsockopt(sock, level, optname, (char __force_user *)&ktime, sizeof(ktime));
86713 set_fs(old_fs);
86714
86715 return err;
86716@@ -424,7 +424,7 @@ static int do_get_sock_timeout(struct socket *sock, int level, int optname,
86717 len = sizeof(ktime);
86718 old_fs = get_fs();
86719 set_fs(KERNEL_DS);
86720- err = sock_getsockopt(sock, level, optname, (char *) &ktime, &len);
86721+ err = sock_getsockopt(sock, level, optname, (char __force_user *) &ktime, (int __force_user *)&len);
86722 set_fs(old_fs);
86723
86724 if (!err) {
86725@@ -567,7 +567,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86726 case MCAST_JOIN_GROUP:
86727 case MCAST_LEAVE_GROUP:
86728 {
86729- struct compat_group_req __user *gr32 = (void *)optval;
86730+ struct compat_group_req __user *gr32 = (void __user *)optval;
86731 struct group_req __user *kgr =
86732 compat_alloc_user_space(sizeof(struct group_req));
86733 u32 interface;
86734@@ -588,7 +588,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86735 case MCAST_BLOCK_SOURCE:
86736 case MCAST_UNBLOCK_SOURCE:
86737 {
86738- struct compat_group_source_req __user *gsr32 = (void *)optval;
86739+ struct compat_group_source_req __user *gsr32 = (void __user *)optval;
86740 struct group_source_req __user *kgsr = compat_alloc_user_space(
86741 sizeof(struct group_source_req));
86742 u32 interface;
86743@@ -609,7 +609,7 @@ int compat_mc_setsockopt(struct sock *sock, int level, int optname,
86744 }
86745 case MCAST_MSFILTER:
86746 {
86747- struct compat_group_filter __user *gf32 = (void *)optval;
86748+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86749 struct group_filter __user *kgf;
86750 u32 interface, fmode, numsrc;
86751
86752@@ -647,7 +647,7 @@ int compat_mc_getsockopt(struct sock *sock, int level, int optname,
86753 char __user *optval, int __user *optlen,
86754 int (*getsockopt)(struct sock *, int, int, char __user *, int __user *))
86755 {
86756- struct compat_group_filter __user *gf32 = (void *)optval;
86757+ struct compat_group_filter __user *gf32 = (void __user *)optval;
86758 struct group_filter __user *kgf;
86759 int __user *koptlen;
86760 u32 interface, fmode, numsrc;
86761@@ -805,7 +805,7 @@ asmlinkage long compat_sys_socketcall(int call, u32 __user *args)
86762
86763 if (call < SYS_SOCKET || call > SYS_SENDMMSG)
86764 return -EINVAL;
86765- if (copy_from_user(a, args, nas[call]))
86766+ if (nas[call] > sizeof a || copy_from_user(a, args, nas[call]))
86767 return -EFAULT;
86768 a0 = a[0];
86769 a1 = a[1];
86770diff --git a/net/core/datagram.c b/net/core/datagram.c
86771index b71423d..0360434 100644
86772--- a/net/core/datagram.c
86773+++ b/net/core/datagram.c
86774@@ -295,7 +295,7 @@ int skb_kill_datagram(struct sock *sk, struct sk_buff *skb, unsigned int flags)
86775 }
86776
86777 kfree_skb(skb);
86778- atomic_inc(&sk->sk_drops);
86779+ atomic_inc_unchecked(&sk->sk_drops);
86780 sk_mem_reclaim_partial(sk);
86781
86782 return err;
86783diff --git a/net/core/dev.c b/net/core/dev.c
86784index faebb39..a38fb42 100644
86785--- a/net/core/dev.c
86786+++ b/net/core/dev.c
86787@@ -1649,7 +1649,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86788 {
86789 if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
86790 if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
86791- atomic_long_inc(&dev->rx_dropped);
86792+ atomic_long_inc_unchecked(&dev->rx_dropped);
86793 kfree_skb(skb);
86794 return NET_RX_DROP;
86795 }
86796@@ -1658,7 +1658,7 @@ int dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
86797 skb_orphan(skb);
86798
86799 if (unlikely(!is_skb_forwardable(dev, skb))) {
86800- atomic_long_inc(&dev->rx_dropped);
86801+ atomic_long_inc_unchecked(&dev->rx_dropped);
86802 kfree_skb(skb);
86803 return NET_RX_DROP;
86804 }
86805@@ -2404,7 +2404,7 @@ static int illegal_highdma(struct net_device *dev, struct sk_buff *skb)
86806
86807 struct dev_gso_cb {
86808 void (*destructor)(struct sk_buff *skb);
86809-};
86810+} __no_const;
86811
86812 #define DEV_GSO_CB(skb) ((struct dev_gso_cb *)(skb)->cb)
86813
86814@@ -3139,7 +3139,7 @@ enqueue:
86815
86816 local_irq_restore(flags);
86817
86818- atomic_long_inc(&skb->dev->rx_dropped);
86819+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86820 kfree_skb(skb);
86821 return NET_RX_DROP;
86822 }
86823@@ -3211,7 +3211,7 @@ int netif_rx_ni(struct sk_buff *skb)
86824 }
86825 EXPORT_SYMBOL(netif_rx_ni);
86826
86827-static void net_tx_action(struct softirq_action *h)
86828+static void net_tx_action(void)
86829 {
86830 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86831
86832@@ -3538,7 +3538,7 @@ ncls:
86833 ret = pt_prev->func(skb, skb->dev, pt_prev, orig_dev);
86834 } else {
86835 drop:
86836- atomic_long_inc(&skb->dev->rx_dropped);
86837+ atomic_long_inc_unchecked(&skb->dev->rx_dropped);
86838 kfree_skb(skb);
86839 /* Jamal, now you will not able to escape explaining
86840 * me how you were going to use this. :-)
86841@@ -4146,7 +4146,7 @@ void netif_napi_del(struct napi_struct *napi)
86842 }
86843 EXPORT_SYMBOL(netif_napi_del);
86844
86845-static void net_rx_action(struct softirq_action *h)
86846+static void net_rx_action(void)
86847 {
86848 struct softnet_data *sd = &__get_cpu_var(softnet_data);
86849 unsigned long time_limit = jiffies + 2;
86850@@ -5583,7 +5583,7 @@ struct rtnl_link_stats64 *dev_get_stats(struct net_device *dev,
86851 } else {
86852 netdev_stats_to_stats64(storage, &dev->stats);
86853 }
86854- storage->rx_dropped += atomic_long_read(&dev->rx_dropped);
86855+ storage->rx_dropped += atomic_long_read_unchecked(&dev->rx_dropped);
86856 return storage;
86857 }
86858 EXPORT_SYMBOL(dev_get_stats);
86859diff --git a/net/core/dev_ioctl.c b/net/core/dev_ioctl.c
86860index 5b7d0e1..cb960fc 100644
86861--- a/net/core/dev_ioctl.c
86862+++ b/net/core/dev_ioctl.c
86863@@ -365,9 +365,13 @@ void dev_load(struct net *net, const char *name)
86864 if (no_module && capable(CAP_NET_ADMIN))
86865 no_module = request_module("netdev-%s", name);
86866 if (no_module && capable(CAP_SYS_MODULE)) {
86867+#ifdef CONFIG_GRKERNSEC_MODHARDEN
86868+ ___request_module(true, "grsec_modharden_netdev", "%s", name);
86869+#else
86870 if (!request_module("%s", name))
86871 pr_warn("Loading kernel module for a network device with CAP_SYS_MODULE (deprecated). Use CAP_NET_ADMIN and alias netdev-%s instead.\n",
86872 name);
86873+#endif
86874 }
86875 }
86876 EXPORT_SYMBOL(dev_load);
86877diff --git a/net/core/ethtool.c b/net/core/ethtool.c
86878index ce91766..3b71cdb 100644
86879--- a/net/core/ethtool.c
86880+++ b/net/core/ethtool.c
86881@@ -1319,10 +1319,19 @@ static int ethtool_get_dump_data(struct net_device *dev,
86882 if (ret)
86883 return ret;
86884
86885- len = (tmp.len > dump.len) ? dump.len : tmp.len;
86886+ len = min(tmp.len, dump.len);
86887 if (!len)
86888 return -EFAULT;
86889
86890+ /* Don't ever let the driver think there's more space available
86891+ * than it requested with .get_dump_flag().
86892+ */
86893+ dump.len = len;
86894+
86895+ /* Always allocate enough space to hold the whole thing so that the
86896+ * driver does not need to check the length and bother with partial
86897+ * dumping.
86898+ */
86899 data = vzalloc(tmp.len);
86900 if (!data)
86901 return -ENOMEM;
86902@@ -1330,6 +1339,16 @@ static int ethtool_get_dump_data(struct net_device *dev,
86903 if (ret)
86904 goto out;
86905
86906+ /* There are two sane possibilities:
86907+ * 1. The driver's .get_dump_data() does not touch dump.len.
86908+ * 2. Or it may set dump.len to how much it really writes, which
86909+ * should be tmp.len (or len if it can do a partial dump).
86910+ * In any case respond to userspace with the actual length of data
86911+ * it's receiving.
86912+ */
86913+ WARN_ON(dump.len != len && dump.len != tmp.len);
86914+ dump.len = len;
86915+
86916 if (copy_to_user(useraddr, &dump, sizeof(dump))) {
86917 ret = -EFAULT;
86918 goto out;
86919diff --git a/net/core/flow.c b/net/core/flow.c
86920index 7102f16..146b4bd 100644
86921--- a/net/core/flow.c
86922+++ b/net/core/flow.c
86923@@ -61,7 +61,7 @@ struct flow_cache {
86924 struct timer_list rnd_timer;
86925 };
86926
86927-atomic_t flow_cache_genid = ATOMIC_INIT(0);
86928+atomic_unchecked_t flow_cache_genid = ATOMIC_INIT(0);
86929 EXPORT_SYMBOL(flow_cache_genid);
86930 static struct flow_cache flow_cache_global;
86931 static struct kmem_cache *flow_cachep __read_mostly;
86932@@ -86,7 +86,7 @@ static void flow_cache_new_hashrnd(unsigned long arg)
86933
86934 static int flow_entry_valid(struct flow_cache_entry *fle)
86935 {
86936- if (atomic_read(&flow_cache_genid) != fle->genid)
86937+ if (atomic_read_unchecked(&flow_cache_genid) != fle->genid)
86938 return 0;
86939 if (fle->object && !fle->object->ops->check(fle->object))
86940 return 0;
86941@@ -258,7 +258,7 @@ flow_cache_lookup(struct net *net, const struct flowi *key, u16 family, u8 dir,
86942 hlist_add_head(&fle->u.hlist, &fcp->hash_table[hash]);
86943 fcp->hash_count++;
86944 }
86945- } else if (likely(fle->genid == atomic_read(&flow_cache_genid))) {
86946+ } else if (likely(fle->genid == atomic_read_unchecked(&flow_cache_genid))) {
86947 flo = fle->object;
86948 if (!flo)
86949 goto ret_object;
86950@@ -279,7 +279,7 @@ nocache:
86951 }
86952 flo = resolver(net, key, family, dir, flo, ctx);
86953 if (fle) {
86954- fle->genid = atomic_read(&flow_cache_genid);
86955+ fle->genid = atomic_read_unchecked(&flow_cache_genid);
86956 if (!IS_ERR(flo))
86957 fle->object = flo;
86958 else
86959diff --git a/net/core/iovec.c b/net/core/iovec.c
86960index de178e4..1dabd8b 100644
86961--- a/net/core/iovec.c
86962+++ b/net/core/iovec.c
86963@@ -42,7 +42,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86964 if (m->msg_namelen) {
86965 if (mode == VERIFY_READ) {
86966 void __user *namep;
86967- namep = (void __user __force *) m->msg_name;
86968+ namep = (void __force_user *) m->msg_name;
86969 err = move_addr_to_kernel(namep, m->msg_namelen,
86970 address);
86971 if (err < 0)
86972@@ -54,7 +54,7 @@ int verify_iovec(struct msghdr *m, struct iovec *iov, struct sockaddr_storage *a
86973 }
86974
86975 size = m->msg_iovlen * sizeof(struct iovec);
86976- if (copy_from_user(iov, (void __user __force *) m->msg_iov, size))
86977+ if (copy_from_user(iov, (void __force_user *) m->msg_iov, size))
86978 return -EFAULT;
86979
86980 m->msg_iov = iov;
86981diff --git a/net/core/neighbour.c b/net/core/neighbour.c
86982index 5c56b21..8766fbf 100644
86983--- a/net/core/neighbour.c
86984+++ b/net/core/neighbour.c
86985@@ -2769,7 +2769,7 @@ static int proc_unres_qlen(ctl_table *ctl, int write, void __user *buffer,
86986 size_t *lenp, loff_t *ppos)
86987 {
86988 int size, ret;
86989- ctl_table tmp = *ctl;
86990+ ctl_table_no_const tmp = *ctl;
86991
86992 tmp.extra1 = &zero;
86993 tmp.extra2 = &unres_qlen_max;
86994diff --git a/net/core/net-procfs.c b/net/core/net-procfs.c
86995index 569d355..79cf2d0 100644
86996--- a/net/core/net-procfs.c
86997+++ b/net/core/net-procfs.c
86998@@ -271,8 +271,13 @@ static int ptype_seq_show(struct seq_file *seq, void *v)
86999 else
87000 seq_printf(seq, "%04x", ntohs(pt->type));
87001
87002+#ifdef CONFIG_GRKERNSEC_HIDESYM
87003+ seq_printf(seq, " %-8s %pf\n",
87004+ pt->dev ? pt->dev->name : "", NULL);
87005+#else
87006 seq_printf(seq, " %-8s %pf\n",
87007 pt->dev ? pt->dev->name : "", pt->func);
87008+#endif
87009 }
87010
87011 return 0;
87012diff --git a/net/core/net-sysfs.c b/net/core/net-sysfs.c
87013index 981fed3..536af34 100644
87014--- a/net/core/net-sysfs.c
87015+++ b/net/core/net-sysfs.c
87016@@ -1311,7 +1311,7 @@ void netdev_class_remove_file(struct class_attribute *class_attr)
87017 }
87018 EXPORT_SYMBOL(netdev_class_remove_file);
87019
87020-int netdev_kobject_init(void)
87021+int __init netdev_kobject_init(void)
87022 {
87023 kobj_ns_type_register(&net_ns_type_operations);
87024 return class_register(&net_class);
87025diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c
87026index f9765203..9feaef8 100644
87027--- a/net/core/net_namespace.c
87028+++ b/net/core/net_namespace.c
87029@@ -443,7 +443,7 @@ static int __register_pernet_operations(struct list_head *list,
87030 int error;
87031 LIST_HEAD(net_exit_list);
87032
87033- list_add_tail(&ops->list, list);
87034+ pax_list_add_tail((struct list_head *)&ops->list, list);
87035 if (ops->init || (ops->id && ops->size)) {
87036 for_each_net(net) {
87037 error = ops_init(ops, net);
87038@@ -456,7 +456,7 @@ static int __register_pernet_operations(struct list_head *list,
87039
87040 out_undo:
87041 /* If I have an error cleanup all namespaces I initialized */
87042- list_del(&ops->list);
87043+ pax_list_del((struct list_head *)&ops->list);
87044 ops_exit_list(ops, &net_exit_list);
87045 ops_free_list(ops, &net_exit_list);
87046 return error;
87047@@ -467,7 +467,7 @@ static void __unregister_pernet_operations(struct pernet_operations *ops)
87048 struct net *net;
87049 LIST_HEAD(net_exit_list);
87050
87051- list_del(&ops->list);
87052+ pax_list_del((struct list_head *)&ops->list);
87053 for_each_net(net)
87054 list_add_tail(&net->exit_list, &net_exit_list);
87055 ops_exit_list(ops, &net_exit_list);
87056@@ -601,7 +601,7 @@ int register_pernet_device(struct pernet_operations *ops)
87057 mutex_lock(&net_mutex);
87058 error = register_pernet_operations(&pernet_list, ops);
87059 if (!error && (first_device == &pernet_list))
87060- first_device = &ops->list;
87061+ first_device = (struct list_head *)&ops->list;
87062 mutex_unlock(&net_mutex);
87063 return error;
87064 }
87065diff --git a/net/core/rtnetlink.c b/net/core/rtnetlink.c
87066index a08bd2b..4e8f43c 100644
87067--- a/net/core/rtnetlink.c
87068+++ b/net/core/rtnetlink.c
87069@@ -58,7 +58,7 @@ struct rtnl_link {
87070 rtnl_doit_func doit;
87071 rtnl_dumpit_func dumpit;
87072 rtnl_calcit_func calcit;
87073-};
87074+} __no_const;
87075
87076 static DEFINE_MUTEX(rtnl_mutex);
87077
87078@@ -299,10 +299,13 @@ int __rtnl_link_register(struct rtnl_link_ops *ops)
87079 if (rtnl_link_ops_get(ops->kind))
87080 return -EEXIST;
87081
87082- if (!ops->dellink)
87083- ops->dellink = unregister_netdevice_queue;
87084+ if (!ops->dellink) {
87085+ pax_open_kernel();
87086+ *(void **)&ops->dellink = unregister_netdevice_queue;
87087+ pax_close_kernel();
87088+ }
87089
87090- list_add_tail(&ops->list, &link_ops);
87091+ pax_list_add_tail((struct list_head *)&ops->list, &link_ops);
87092 return 0;
87093 }
87094 EXPORT_SYMBOL_GPL(__rtnl_link_register);
87095@@ -349,7 +352,7 @@ void __rtnl_link_unregister(struct rtnl_link_ops *ops)
87096 for_each_net(net) {
87097 __rtnl_kill_links(net, ops);
87098 }
87099- list_del(&ops->list);
87100+ pax_list_del((struct list_head *)&ops->list);
87101 }
87102 EXPORT_SYMBOL_GPL(__rtnl_link_unregister);
87103
87104diff --git a/net/core/scm.c b/net/core/scm.c
87105index 03795d0..eaf7368 100644
87106--- a/net/core/scm.c
87107+++ b/net/core/scm.c
87108@@ -210,7 +210,7 @@ EXPORT_SYMBOL(__scm_send);
87109 int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
87110 {
87111 struct cmsghdr __user *cm
87112- = (__force struct cmsghdr __user *)msg->msg_control;
87113+ = (struct cmsghdr __force_user *)msg->msg_control;
87114 struct cmsghdr cmhdr;
87115 int cmlen = CMSG_LEN(len);
87116 int err;
87117@@ -233,7 +233,7 @@ int put_cmsg(struct msghdr * msg, int level, int type, int len, void *data)
87118 err = -EFAULT;
87119 if (copy_to_user(cm, &cmhdr, sizeof cmhdr))
87120 goto out;
87121- if (copy_to_user(CMSG_DATA(cm), data, cmlen - sizeof(struct cmsghdr)))
87122+ if (copy_to_user((void __force_user *)CMSG_DATA((void __force_kernel *)cm), data, cmlen - sizeof(struct cmsghdr)))
87123 goto out;
87124 cmlen = CMSG_SPACE(len);
87125 if (msg->msg_controllen < cmlen)
87126@@ -249,7 +249,7 @@ EXPORT_SYMBOL(put_cmsg);
87127 void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
87128 {
87129 struct cmsghdr __user *cm
87130- = (__force struct cmsghdr __user*)msg->msg_control;
87131+ = (struct cmsghdr __force_user *)msg->msg_control;
87132
87133 int fdmax = 0;
87134 int fdnum = scm->fp->count;
87135@@ -269,7 +269,7 @@ void scm_detach_fds(struct msghdr *msg, struct scm_cookie *scm)
87136 if (fdnum < fdmax)
87137 fdmax = fdnum;
87138
87139- for (i=0, cmfptr=(__force int __user *)CMSG_DATA(cm); i<fdmax;
87140+ for (i=0, cmfptr=(int __force_user *)CMSG_DATA((void __force_kernel *)cm); i<fdmax;
87141 i++, cmfptr++)
87142 {
87143 struct socket *sock;
87144diff --git a/net/core/skbuff.c b/net/core/skbuff.c
87145index 1c1738c..4cab7f0 100644
87146--- a/net/core/skbuff.c
87147+++ b/net/core/skbuff.c
87148@@ -3087,13 +3087,15 @@ void __init skb_init(void)
87149 skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
87150 sizeof(struct sk_buff),
87151 0,
87152- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
87153+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
87154+ SLAB_NO_SANITIZE,
87155 NULL);
87156 skbuff_fclone_cache = kmem_cache_create("skbuff_fclone_cache",
87157 (2*sizeof(struct sk_buff)) +
87158 sizeof(atomic_t),
87159 0,
87160- SLAB_HWCACHE_ALIGN|SLAB_PANIC,
87161+ SLAB_HWCACHE_ALIGN|SLAB_PANIC|
87162+ SLAB_NO_SANITIZE,
87163 NULL);
87164 }
87165
87166diff --git a/net/core/sock.c b/net/core/sock.c
87167index d6d024c..6ea7ab4 100644
87168--- a/net/core/sock.c
87169+++ b/net/core/sock.c
87170@@ -390,7 +390,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87171 struct sk_buff_head *list = &sk->sk_receive_queue;
87172
87173 if (atomic_read(&sk->sk_rmem_alloc) >= sk->sk_rcvbuf) {
87174- atomic_inc(&sk->sk_drops);
87175+ atomic_inc_unchecked(&sk->sk_drops);
87176 trace_sock_rcvqueue_full(sk, skb);
87177 return -ENOMEM;
87178 }
87179@@ -400,7 +400,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87180 return err;
87181
87182 if (!sk_rmem_schedule(sk, skb, skb->truesize)) {
87183- atomic_inc(&sk->sk_drops);
87184+ atomic_inc_unchecked(&sk->sk_drops);
87185 return -ENOBUFS;
87186 }
87187
87188@@ -420,7 +420,7 @@ int sock_queue_rcv_skb(struct sock *sk, struct sk_buff *skb)
87189 skb_dst_force(skb);
87190
87191 spin_lock_irqsave(&list->lock, flags);
87192- skb->dropcount = atomic_read(&sk->sk_drops);
87193+ skb->dropcount = atomic_read_unchecked(&sk->sk_drops);
87194 __skb_queue_tail(list, skb);
87195 spin_unlock_irqrestore(&list->lock, flags);
87196
87197@@ -440,7 +440,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
87198 skb->dev = NULL;
87199
87200 if (sk_rcvqueues_full(sk, skb, sk->sk_rcvbuf)) {
87201- atomic_inc(&sk->sk_drops);
87202+ atomic_inc_unchecked(&sk->sk_drops);
87203 goto discard_and_relse;
87204 }
87205 if (nested)
87206@@ -458,7 +458,7 @@ int sk_receive_skb(struct sock *sk, struct sk_buff *skb, const int nested)
87207 mutex_release(&sk->sk_lock.dep_map, 1, _RET_IP_);
87208 } else if (sk_add_backlog(sk, skb, sk->sk_rcvbuf)) {
87209 bh_unlock_sock(sk);
87210- atomic_inc(&sk->sk_drops);
87211+ atomic_inc_unchecked(&sk->sk_drops);
87212 goto discard_and_relse;
87213 }
87214
87215@@ -933,12 +933,12 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
87216 struct timeval tm;
87217 } v;
87218
87219- int lv = sizeof(int);
87220- int len;
87221+ unsigned int lv = sizeof(int);
87222+ unsigned int len;
87223
87224 if (get_user(len, optlen))
87225 return -EFAULT;
87226- if (len < 0)
87227+ if (len > INT_MAX)
87228 return -EINVAL;
87229
87230 memset(&v, 0, sizeof(v));
87231@@ -1090,11 +1090,11 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
87232
87233 case SO_PEERNAME:
87234 {
87235- char address[128];
87236+ char address[_K_SS_MAXSIZE];
87237
87238 if (sock->ops->getname(sock, (struct sockaddr *)address, &lv, 2))
87239 return -ENOTCONN;
87240- if (lv < len)
87241+ if (lv < len || sizeof address < len)
87242 return -EINVAL;
87243 if (copy_to_user(optval, address, len))
87244 return -EFAULT;
87245@@ -1161,7 +1161,7 @@ int sock_getsockopt(struct socket *sock, int level, int optname,
87246
87247 if (len > lv)
87248 len = lv;
87249- if (copy_to_user(optval, &v, len))
87250+ if (len > sizeof(v) || copy_to_user(optval, &v, len))
87251 return -EFAULT;
87252 lenout:
87253 if (put_user(len, optlen))
87254@@ -2277,7 +2277,7 @@ void sock_init_data(struct socket *sock, struct sock *sk)
87255 */
87256 smp_wmb();
87257 atomic_set(&sk->sk_refcnt, 1);
87258- atomic_set(&sk->sk_drops, 0);
87259+ atomic_set_unchecked(&sk->sk_drops, 0);
87260 }
87261 EXPORT_SYMBOL(sock_init_data);
87262
87263diff --git a/net/core/sock_diag.c b/net/core/sock_diag.c
87264index a0e9cf6..ef7f9ed 100644
87265--- a/net/core/sock_diag.c
87266+++ b/net/core/sock_diag.c
87267@@ -9,26 +9,33 @@
87268 #include <linux/inet_diag.h>
87269 #include <linux/sock_diag.h>
87270
87271-static const struct sock_diag_handler *sock_diag_handlers[AF_MAX];
87272+static const struct sock_diag_handler *sock_diag_handlers[AF_MAX] __read_only;
87273 static int (*inet_rcv_compat)(struct sk_buff *skb, struct nlmsghdr *nlh);
87274 static DEFINE_MUTEX(sock_diag_table_mutex);
87275
87276 int sock_diag_check_cookie(void *sk, __u32 *cookie)
87277 {
87278+#ifndef CONFIG_GRKERNSEC_HIDESYM
87279 if ((cookie[0] != INET_DIAG_NOCOOKIE ||
87280 cookie[1] != INET_DIAG_NOCOOKIE) &&
87281 ((u32)(unsigned long)sk != cookie[0] ||
87282 (u32)((((unsigned long)sk) >> 31) >> 1) != cookie[1]))
87283 return -ESTALE;
87284 else
87285+#endif
87286 return 0;
87287 }
87288 EXPORT_SYMBOL_GPL(sock_diag_check_cookie);
87289
87290 void sock_diag_save_cookie(void *sk, __u32 *cookie)
87291 {
87292+#ifdef CONFIG_GRKERNSEC_HIDESYM
87293+ cookie[0] = 0;
87294+ cookie[1] = 0;
87295+#else
87296 cookie[0] = (u32)(unsigned long)sk;
87297 cookie[1] = (u32)(((unsigned long)sk >> 31) >> 1);
87298+#endif
87299 }
87300 EXPORT_SYMBOL_GPL(sock_diag_save_cookie);
87301
87302@@ -113,8 +120,11 @@ int sock_diag_register(const struct sock_diag_handler *hndl)
87303 mutex_lock(&sock_diag_table_mutex);
87304 if (sock_diag_handlers[hndl->family])
87305 err = -EBUSY;
87306- else
87307+ else {
87308+ pax_open_kernel();
87309 sock_diag_handlers[hndl->family] = hndl;
87310+ pax_close_kernel();
87311+ }
87312 mutex_unlock(&sock_diag_table_mutex);
87313
87314 return err;
87315@@ -130,7 +140,9 @@ void sock_diag_unregister(const struct sock_diag_handler *hnld)
87316
87317 mutex_lock(&sock_diag_table_mutex);
87318 BUG_ON(sock_diag_handlers[family] != hnld);
87319+ pax_open_kernel();
87320 sock_diag_handlers[family] = NULL;
87321+ pax_close_kernel();
87322 mutex_unlock(&sock_diag_table_mutex);
87323 }
87324 EXPORT_SYMBOL_GPL(sock_diag_unregister);
87325diff --git a/net/core/sysctl_net_core.c b/net/core/sysctl_net_core.c
87326index cfdb46a..cef55e1 100644
87327--- a/net/core/sysctl_net_core.c
87328+++ b/net/core/sysctl_net_core.c
87329@@ -28,7 +28,7 @@ static int rps_sock_flow_sysctl(ctl_table *table, int write,
87330 {
87331 unsigned int orig_size, size;
87332 int ret, i;
87333- ctl_table tmp = {
87334+ ctl_table_no_const tmp = {
87335 .data = &size,
87336 .maxlen = sizeof(size),
87337 .mode = table->mode
87338@@ -211,13 +211,12 @@ static struct ctl_table netns_core_table[] = {
87339
87340 static __net_init int sysctl_core_net_init(struct net *net)
87341 {
87342- struct ctl_table *tbl;
87343+ ctl_table_no_const *tbl = NULL;
87344
87345 net->core.sysctl_somaxconn = SOMAXCONN;
87346
87347- tbl = netns_core_table;
87348 if (!net_eq(net, &init_net)) {
87349- tbl = kmemdup(tbl, sizeof(netns_core_table), GFP_KERNEL);
87350+ tbl = kmemdup(netns_core_table, sizeof(netns_core_table), GFP_KERNEL);
87351 if (tbl == NULL)
87352 goto err_dup;
87353
87354@@ -227,17 +226,16 @@ static __net_init int sysctl_core_net_init(struct net *net)
87355 if (net->user_ns != &init_user_ns) {
87356 tbl[0].procname = NULL;
87357 }
87358- }
87359-
87360- net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
87361+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", tbl);
87362+ } else
87363+ net->core.sysctl_hdr = register_net_sysctl(net, "net/core", netns_core_table);
87364 if (net->core.sysctl_hdr == NULL)
87365 goto err_reg;
87366
87367 return 0;
87368
87369 err_reg:
87370- if (tbl != netns_core_table)
87371- kfree(tbl);
87372+ kfree(tbl);
87373 err_dup:
87374 return -ENOMEM;
87375 }
87376@@ -252,7 +250,7 @@ static __net_exit void sysctl_core_net_exit(struct net *net)
87377 kfree(tbl);
87378 }
87379
87380-static __net_initdata struct pernet_operations sysctl_core_ops = {
87381+static __net_initconst struct pernet_operations sysctl_core_ops = {
87382 .init = sysctl_core_net_init,
87383 .exit = sysctl_core_net_exit,
87384 };
87385diff --git a/net/decnet/af_decnet.c b/net/decnet/af_decnet.c
87386index c21f200..bc4565b 100644
87387--- a/net/decnet/af_decnet.c
87388+++ b/net/decnet/af_decnet.c
87389@@ -465,6 +465,7 @@ static struct proto dn_proto = {
87390 .sysctl_rmem = sysctl_decnet_rmem,
87391 .max_header = DN_MAX_NSP_DATA_HEADER + 64,
87392 .obj_size = sizeof(struct dn_sock),
87393+ .slab_flags = SLAB_USERCOPY,
87394 };
87395
87396 static struct sock *dn_alloc_sock(struct net *net, struct socket *sock, gfp_t gfp)
87397diff --git a/net/decnet/sysctl_net_decnet.c b/net/decnet/sysctl_net_decnet.c
87398index a55eecc..dd8428c 100644
87399--- a/net/decnet/sysctl_net_decnet.c
87400+++ b/net/decnet/sysctl_net_decnet.c
87401@@ -174,7 +174,7 @@ static int dn_node_address_handler(ctl_table *table, int write,
87402
87403 if (len > *lenp) len = *lenp;
87404
87405- if (copy_to_user(buffer, addr, len))
87406+ if (len > sizeof addr || copy_to_user(buffer, addr, len))
87407 return -EFAULT;
87408
87409 *lenp = len;
87410@@ -237,7 +237,7 @@ static int dn_def_dev_handler(ctl_table *table, int write,
87411
87412 if (len > *lenp) len = *lenp;
87413
87414- if (copy_to_user(buffer, devname, len))
87415+ if (len > sizeof devname || copy_to_user(buffer, devname, len))
87416 return -EFAULT;
87417
87418 *lenp = len;
87419diff --git a/net/ipv4/af_inet.c b/net/ipv4/af_inet.c
87420index d01be2a..8976537 100644
87421--- a/net/ipv4/af_inet.c
87422+++ b/net/ipv4/af_inet.c
87423@@ -1703,13 +1703,9 @@ static int __init inet_init(void)
87424
87425 BUILD_BUG_ON(sizeof(struct inet_skb_parm) > FIELD_SIZEOF(struct sk_buff, cb));
87426
87427- sysctl_local_reserved_ports = kzalloc(65536 / 8, GFP_KERNEL);
87428- if (!sysctl_local_reserved_ports)
87429- goto out;
87430-
87431 rc = proto_register(&tcp_prot, 1);
87432 if (rc)
87433- goto out_free_reserved_ports;
87434+ goto out;
87435
87436 rc = proto_register(&udp_prot, 1);
87437 if (rc)
87438@@ -1818,8 +1814,6 @@ out_unregister_udp_proto:
87439 proto_unregister(&udp_prot);
87440 out_unregister_tcp_proto:
87441 proto_unregister(&tcp_prot);
87442-out_free_reserved_ports:
87443- kfree(sysctl_local_reserved_ports);
87444 goto out;
87445 }
87446
87447diff --git a/net/ipv4/ah4.c b/net/ipv4/ah4.c
87448index 2e7f194..0fa4d6d 100644
87449--- a/net/ipv4/ah4.c
87450+++ b/net/ipv4/ah4.c
87451@@ -420,7 +420,7 @@ static void ah4_err(struct sk_buff *skb, u32 info)
87452 return;
87453
87454 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87455- atomic_inc(&flow_cache_genid);
87456+ atomic_inc_unchecked(&flow_cache_genid);
87457 rt_genid_bump(net);
87458
87459 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_AH, 0);
87460diff --git a/net/ipv4/devinet.c b/net/ipv4/devinet.c
87461index dfc39d4..0b82c4d 100644
87462--- a/net/ipv4/devinet.c
87463+++ b/net/ipv4/devinet.c
87464@@ -1529,7 +1529,7 @@ static int inet_dump_ifaddr(struct sk_buff *skb, struct netlink_callback *cb)
87465 idx = 0;
87466 head = &net->dev_index_head[h];
87467 rcu_read_lock();
87468- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
87469+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
87470 net->dev_base_seq;
87471 hlist_for_each_entry_rcu(dev, head, index_hlist) {
87472 if (idx < s_idx)
87473@@ -1840,7 +1840,7 @@ static int inet_netconf_dump_devconf(struct sk_buff *skb,
87474 idx = 0;
87475 head = &net->dev_index_head[h];
87476 rcu_read_lock();
87477- cb->seq = atomic_read(&net->ipv4.dev_addr_genid) ^
87478+ cb->seq = atomic_read_unchecked(&net->ipv4.dev_addr_genid) ^
87479 net->dev_base_seq;
87480 hlist_for_each_entry_rcu(dev, head, index_hlist) {
87481 if (idx < s_idx)
87482@@ -2065,7 +2065,7 @@ static int ipv4_doint_and_flush(ctl_table *ctl, int write,
87483 #define DEVINET_SYSCTL_FLUSHING_ENTRY(attr, name) \
87484 DEVINET_SYSCTL_COMPLEX_ENTRY(attr, name, ipv4_doint_and_flush)
87485
87486-static struct devinet_sysctl_table {
87487+static const struct devinet_sysctl_table {
87488 struct ctl_table_header *sysctl_header;
87489 struct ctl_table devinet_vars[__IPV4_DEVCONF_MAX];
87490 } devinet_sysctl = {
87491@@ -2183,7 +2183,7 @@ static __net_init int devinet_init_net(struct net *net)
87492 int err;
87493 struct ipv4_devconf *all, *dflt;
87494 #ifdef CONFIG_SYSCTL
87495- struct ctl_table *tbl = ctl_forward_entry;
87496+ ctl_table_no_const *tbl = NULL;
87497 struct ctl_table_header *forw_hdr;
87498 #endif
87499
87500@@ -2201,7 +2201,7 @@ static __net_init int devinet_init_net(struct net *net)
87501 goto err_alloc_dflt;
87502
87503 #ifdef CONFIG_SYSCTL
87504- tbl = kmemdup(tbl, sizeof(ctl_forward_entry), GFP_KERNEL);
87505+ tbl = kmemdup(ctl_forward_entry, sizeof(ctl_forward_entry), GFP_KERNEL);
87506 if (tbl == NULL)
87507 goto err_alloc_ctl;
87508
87509@@ -2221,7 +2221,10 @@ static __net_init int devinet_init_net(struct net *net)
87510 goto err_reg_dflt;
87511
87512 err = -ENOMEM;
87513- forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87514+ if (!net_eq(net, &init_net))
87515+ forw_hdr = register_net_sysctl(net, "net/ipv4", tbl);
87516+ else
87517+ forw_hdr = register_net_sysctl(net, "net/ipv4", ctl_forward_entry);
87518 if (forw_hdr == NULL)
87519 goto err_reg_ctl;
87520 net->ipv4.forw_hdr = forw_hdr;
87521@@ -2237,8 +2240,7 @@ err_reg_ctl:
87522 err_reg_dflt:
87523 __devinet_sysctl_unregister(all);
87524 err_reg_all:
87525- if (tbl != ctl_forward_entry)
87526- kfree(tbl);
87527+ kfree(tbl);
87528 err_alloc_ctl:
87529 #endif
87530 if (dflt != &ipv4_devconf_dflt)
87531diff --git a/net/ipv4/esp4.c b/net/ipv4/esp4.c
87532index 4cfe34d..a6ba66e 100644
87533--- a/net/ipv4/esp4.c
87534+++ b/net/ipv4/esp4.c
87535@@ -503,7 +503,7 @@ static void esp4_err(struct sk_buff *skb, u32 info)
87536 return;
87537
87538 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87539- atomic_inc(&flow_cache_genid);
87540+ atomic_inc_unchecked(&flow_cache_genid);
87541 rt_genid_bump(net);
87542
87543 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_ESP, 0);
87544diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c
87545index c7629a2..b62d139 100644
87546--- a/net/ipv4/fib_frontend.c
87547+++ b/net/ipv4/fib_frontend.c
87548@@ -1017,12 +1017,12 @@ static int fib_inetaddr_event(struct notifier_block *this, unsigned long event,
87549 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87550 fib_sync_up(dev);
87551 #endif
87552- atomic_inc(&net->ipv4.dev_addr_genid);
87553+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87554 rt_cache_flush(dev_net(dev));
87555 break;
87556 case NETDEV_DOWN:
87557 fib_del_ifaddr(ifa, NULL);
87558- atomic_inc(&net->ipv4.dev_addr_genid);
87559+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87560 if (ifa->ifa_dev->ifa_list == NULL) {
87561 /* Last address was deleted from this interface.
87562 * Disable IP.
87563@@ -1058,7 +1058,7 @@ static int fib_netdev_event(struct notifier_block *this, unsigned long event, vo
87564 #ifdef CONFIG_IP_ROUTE_MULTIPATH
87565 fib_sync_up(dev);
87566 #endif
87567- atomic_inc(&net->ipv4.dev_addr_genid);
87568+ atomic_inc_unchecked(&net->ipv4.dev_addr_genid);
87569 rt_cache_flush(net);
87570 break;
87571 case NETDEV_DOWN:
87572diff --git a/net/ipv4/fib_semantics.c b/net/ipv4/fib_semantics.c
87573index 8f6cb7a..34507f9 100644
87574--- a/net/ipv4/fib_semantics.c
87575+++ b/net/ipv4/fib_semantics.c
87576@@ -765,7 +765,7 @@ __be32 fib_info_update_nh_saddr(struct net *net, struct fib_nh *nh)
87577 nh->nh_saddr = inet_select_addr(nh->nh_dev,
87578 nh->nh_gw,
87579 nh->nh_parent->fib_scope);
87580- nh->nh_saddr_genid = atomic_read(&net->ipv4.dev_addr_genid);
87581+ nh->nh_saddr_genid = atomic_read_unchecked(&net->ipv4.dev_addr_genid);
87582
87583 return nh->nh_saddr;
87584 }
87585diff --git a/net/ipv4/inet_connection_sock.c b/net/ipv4/inet_connection_sock.c
87586index 6acb541..9ea617d 100644
87587--- a/net/ipv4/inet_connection_sock.c
87588+++ b/net/ipv4/inet_connection_sock.c
87589@@ -37,7 +37,7 @@ struct local_ports sysctl_local_ports __read_mostly = {
87590 .range = { 32768, 61000 },
87591 };
87592
87593-unsigned long *sysctl_local_reserved_ports;
87594+unsigned long sysctl_local_reserved_ports[65536 / 8 / sizeof(unsigned long)];
87595 EXPORT_SYMBOL(sysctl_local_reserved_ports);
87596
87597 void inet_get_local_port_range(int *low, int *high)
87598diff --git a/net/ipv4/inet_hashtables.c b/net/ipv4/inet_hashtables.c
87599index 6af375a..c493c74 100644
87600--- a/net/ipv4/inet_hashtables.c
87601+++ b/net/ipv4/inet_hashtables.c
87602@@ -18,12 +18,15 @@
87603 #include <linux/sched.h>
87604 #include <linux/slab.h>
87605 #include <linux/wait.h>
87606+#include <linux/security.h>
87607
87608 #include <net/inet_connection_sock.h>
87609 #include <net/inet_hashtables.h>
87610 #include <net/secure_seq.h>
87611 #include <net/ip.h>
87612
87613+extern void gr_update_task_in_ip_table(struct task_struct *task, const struct inet_sock *inet);
87614+
87615 /*
87616 * Allocate and initialize a new local port bind bucket.
87617 * The bindhash mutex for snum's hash chain must be held here.
87618@@ -554,6 +557,8 @@ ok:
87619 twrefcnt += inet_twsk_bind_unhash(tw, hinfo);
87620 spin_unlock(&head->lock);
87621
87622+ gr_update_task_in_ip_table(current, inet_sk(sk));
87623+
87624 if (tw) {
87625 inet_twsk_deschedule(tw, death_row);
87626 while (twrefcnt) {
87627diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
87628index 000e3d2..5472da3 100644
87629--- a/net/ipv4/inetpeer.c
87630+++ b/net/ipv4/inetpeer.c
87631@@ -503,8 +503,8 @@ relookup:
87632 if (p) {
87633 p->daddr = *daddr;
87634 atomic_set(&p->refcnt, 1);
87635- atomic_set(&p->rid, 0);
87636- atomic_set(&p->ip_id_count,
87637+ atomic_set_unchecked(&p->rid, 0);
87638+ atomic_set_unchecked(&p->ip_id_count,
87639 (daddr->family == AF_INET) ?
87640 secure_ip_id(daddr->addr.a4) :
87641 secure_ipv6_id(daddr->addr.a6));
87642diff --git a/net/ipv4/ip_fragment.c b/net/ipv4/ip_fragment.c
87643index b66910a..cfe416e 100644
87644--- a/net/ipv4/ip_fragment.c
87645+++ b/net/ipv4/ip_fragment.c
87646@@ -282,7 +282,7 @@ static inline int ip_frag_too_far(struct ipq *qp)
87647 return 0;
87648
87649 start = qp->rid;
87650- end = atomic_inc_return(&peer->rid);
87651+ end = atomic_inc_return_unchecked(&peer->rid);
87652 qp->rid = end;
87653
87654 rc = qp->q.fragments && (end - start) > max;
87655@@ -759,12 +759,11 @@ static struct ctl_table ip4_frags_ctl_table[] = {
87656
87657 static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87658 {
87659- struct ctl_table *table;
87660+ ctl_table_no_const *table = NULL;
87661 struct ctl_table_header *hdr;
87662
87663- table = ip4_frags_ns_ctl_table;
87664 if (!net_eq(net, &init_net)) {
87665- table = kmemdup(table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87666+ table = kmemdup(ip4_frags_ns_ctl_table, sizeof(ip4_frags_ns_ctl_table), GFP_KERNEL);
87667 if (table == NULL)
87668 goto err_alloc;
87669
87670@@ -775,9 +774,10 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87671 /* Don't export sysctls to unprivileged users */
87672 if (net->user_ns != &init_user_ns)
87673 table[0].procname = NULL;
87674- }
87675+ hdr = register_net_sysctl(net, "net/ipv4", table);
87676+ } else
87677+ hdr = register_net_sysctl(net, "net/ipv4", ip4_frags_ns_ctl_table);
87678
87679- hdr = register_net_sysctl(net, "net/ipv4", table);
87680 if (hdr == NULL)
87681 goto err_reg;
87682
87683@@ -785,8 +785,7 @@ static int __net_init ip4_frags_ns_ctl_register(struct net *net)
87684 return 0;
87685
87686 err_reg:
87687- if (!net_eq(net, &init_net))
87688- kfree(table);
87689+ kfree(table);
87690 err_alloc:
87691 return -ENOMEM;
87692 }
87693diff --git a/net/ipv4/ip_gre.c b/net/ipv4/ip_gre.c
87694index 2a83591..68e7458 100644
87695--- a/net/ipv4/ip_gre.c
87696+++ b/net/ipv4/ip_gre.c
87697@@ -115,7 +115,7 @@ static bool log_ecn_error = true;
87698 module_param(log_ecn_error, bool, 0644);
87699 MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87700
87701-static struct rtnl_link_ops ipgre_link_ops __read_mostly;
87702+static struct rtnl_link_ops ipgre_link_ops;
87703 static int ipgre_tunnel_init(struct net_device *dev);
87704
87705 static int ipgre_net_id __read_mostly;
87706@@ -503,10 +503,11 @@ static int ipgre_tunnel_ioctl(struct net_device *dev,
87707
87708 if (copy_from_user(&p, ifr->ifr_ifru.ifru_data, sizeof(p)))
87709 return -EFAULT;
87710- if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
87711- p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
87712- ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING))) {
87713- return -EINVAL;
87714+ if (cmd == SIOCADDTUNNEL || cmd == SIOCCHGTUNNEL) {
87715+ if (p.iph.version != 4 || p.iph.protocol != IPPROTO_GRE ||
87716+ p.iph.ihl != 5 || (p.iph.frag_off&htons(~IP_DF)) ||
87717+ ((p.i_flags|p.o_flags)&(GRE_VERSION|GRE_ROUTING)))
87718+ return -EINVAL;
87719 }
87720 p.i_flags = gre_flags_to_tnl_flags(p.i_flags);
87721 p.o_flags = gre_flags_to_tnl_flags(p.o_flags);
87722@@ -918,7 +919,7 @@ static const struct nla_policy ipgre_policy[IFLA_GRE_MAX + 1] = {
87723 [IFLA_GRE_PMTUDISC] = { .type = NLA_U8 },
87724 };
87725
87726-static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87727+static struct rtnl_link_ops ipgre_link_ops = {
87728 .kind = "gre",
87729 .maxtype = IFLA_GRE_MAX,
87730 .policy = ipgre_policy,
87731@@ -932,7 +933,7 @@ static struct rtnl_link_ops ipgre_link_ops __read_mostly = {
87732 .fill_info = ipgre_fill_info,
87733 };
87734
87735-static struct rtnl_link_ops ipgre_tap_ops __read_mostly = {
87736+static struct rtnl_link_ops ipgre_tap_ops = {
87737 .kind = "gretap",
87738 .maxtype = IFLA_GRE_MAX,
87739 .policy = ipgre_policy,
87740diff --git a/net/ipv4/ip_sockglue.c b/net/ipv4/ip_sockglue.c
87741index d9c4f11..02b82dbc 100644
87742--- a/net/ipv4/ip_sockglue.c
87743+++ b/net/ipv4/ip_sockglue.c
87744@@ -1152,7 +1152,8 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87745 len = min_t(unsigned int, len, opt->optlen);
87746 if (put_user(len, optlen))
87747 return -EFAULT;
87748- if (copy_to_user(optval, opt->__data, len))
87749+ if ((len > (sizeof(optbuf) - sizeof(struct ip_options))) ||
87750+ copy_to_user(optval, opt->__data, len))
87751 return -EFAULT;
87752 return 0;
87753 }
87754@@ -1283,7 +1284,7 @@ static int do_ip_getsockopt(struct sock *sk, int level, int optname,
87755 if (sk->sk_type != SOCK_STREAM)
87756 return -ENOPROTOOPT;
87757
87758- msg.msg_control = optval;
87759+ msg.msg_control = (void __force_kernel *)optval;
87760 msg.msg_controllen = len;
87761 msg.msg_flags = flags;
87762
87763diff --git a/net/ipv4/ip_vti.c b/net/ipv4/ip_vti.c
87764index c118f6b..63856c4 100644
87765--- a/net/ipv4/ip_vti.c
87766+++ b/net/ipv4/ip_vti.c
87767@@ -47,7 +47,7 @@
87768 #define HASH_SIZE 16
87769 #define HASH(addr) (((__force u32)addr^((__force u32)addr>>4))&(HASH_SIZE-1))
87770
87771-static struct rtnl_link_ops vti_link_ops __read_mostly;
87772+static struct rtnl_link_ops vti_link_ops;
87773
87774 static int vti_net_id __read_mostly;
87775 struct vti_net {
87776@@ -606,17 +606,10 @@ static int __net_init vti_fb_tunnel_init(struct net_device *dev)
87777 struct iphdr *iph = &tunnel->parms.iph;
87778 struct vti_net *ipn = net_generic(dev_net(dev), vti_net_id);
87779
87780- tunnel->dev = dev;
87781- strcpy(tunnel->parms.name, dev->name);
87782-
87783 iph->version = 4;
87784 iph->protocol = IPPROTO_IPIP;
87785 iph->ihl = 5;
87786
87787- dev->tstats = alloc_percpu(struct pcpu_tstats);
87788- if (!dev->tstats)
87789- return -ENOMEM;
87790-
87791 dev_hold(dev);
87792 rcu_assign_pointer(ipn->tunnels_wc[0], tunnel);
87793 return 0;
87794@@ -847,7 +840,7 @@ static const struct nla_policy vti_policy[IFLA_VTI_MAX + 1] = {
87795 [IFLA_VTI_REMOTE] = { .len = FIELD_SIZEOF(struct iphdr, daddr) },
87796 };
87797
87798-static struct rtnl_link_ops vti_link_ops __read_mostly = {
87799+static struct rtnl_link_ops vti_link_ops = {
87800 .kind = "vti",
87801 .maxtype = IFLA_VTI_MAX,
87802 .policy = vti_policy,
87803diff --git a/net/ipv4/ipcomp.c b/net/ipv4/ipcomp.c
87804index 59cb8c7..a72160c 100644
87805--- a/net/ipv4/ipcomp.c
87806+++ b/net/ipv4/ipcomp.c
87807@@ -48,7 +48,7 @@ static void ipcomp4_err(struct sk_buff *skb, u32 info)
87808 return;
87809
87810 if (icmp_hdr(skb)->type == ICMP_DEST_UNREACH) {
87811- atomic_inc(&flow_cache_genid);
87812+ atomic_inc_unchecked(&flow_cache_genid);
87813 rt_genid_bump(net);
87814
87815 ipv4_update_pmtu(skb, net, info, 0, 0, IPPROTO_COMP, 0);
87816diff --git a/net/ipv4/ipconfig.c b/net/ipv4/ipconfig.c
87817index efa1138..20dbba0 100644
87818--- a/net/ipv4/ipconfig.c
87819+++ b/net/ipv4/ipconfig.c
87820@@ -334,7 +334,7 @@ static int __init ic_devinet_ioctl(unsigned int cmd, struct ifreq *arg)
87821
87822 mm_segment_t oldfs = get_fs();
87823 set_fs(get_ds());
87824- res = devinet_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87825+ res = devinet_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87826 set_fs(oldfs);
87827 return res;
87828 }
87829@@ -345,7 +345,7 @@ static int __init ic_dev_ioctl(unsigned int cmd, struct ifreq *arg)
87830
87831 mm_segment_t oldfs = get_fs();
87832 set_fs(get_ds());
87833- res = dev_ioctl(&init_net, cmd, (struct ifreq __user *) arg);
87834+ res = dev_ioctl(&init_net, cmd, (struct ifreq __force_user *) arg);
87835 set_fs(oldfs);
87836 return res;
87837 }
87838@@ -356,7 +356,7 @@ static int __init ic_route_ioctl(unsigned int cmd, struct rtentry *arg)
87839
87840 mm_segment_t oldfs = get_fs();
87841 set_fs(get_ds());
87842- res = ip_rt_ioctl(&init_net, cmd, (void __user *) arg);
87843+ res = ip_rt_ioctl(&init_net, cmd, (void __force_user *) arg);
87844 set_fs(oldfs);
87845 return res;
87846 }
87847diff --git a/net/ipv4/ipip.c b/net/ipv4/ipip.c
87848index 77bfcce..64a55d4 100644
87849--- a/net/ipv4/ipip.c
87850+++ b/net/ipv4/ipip.c
87851@@ -124,7 +124,7 @@ MODULE_PARM_DESC(log_ecn_error, "Log packets received with corrupted ECN");
87852 static int ipip_net_id __read_mostly;
87853
87854 static int ipip_tunnel_init(struct net_device *dev);
87855-static struct rtnl_link_ops ipip_link_ops __read_mostly;
87856+static struct rtnl_link_ops ipip_link_ops;
87857
87858 static int ipip_err(struct sk_buff *skb, u32 info)
87859 {
87860@@ -404,7 +404,7 @@ static const struct nla_policy ipip_policy[IFLA_IPTUN_MAX + 1] = {
87861 [IFLA_IPTUN_PMTUDISC] = { .type = NLA_U8 },
87862 };
87863
87864-static struct rtnl_link_ops ipip_link_ops __read_mostly = {
87865+static struct rtnl_link_ops ipip_link_ops = {
87866 .kind = "ipip",
87867 .maxtype = IFLA_IPTUN_MAX,
87868 .policy = ipip_policy,
87869diff --git a/net/ipv4/netfilter/arp_tables.c b/net/ipv4/netfilter/arp_tables.c
87870index 85a4f21..1beb1f5 100644
87871--- a/net/ipv4/netfilter/arp_tables.c
87872+++ b/net/ipv4/netfilter/arp_tables.c
87873@@ -880,14 +880,14 @@ static int compat_table_info(const struct xt_table_info *info,
87874 #endif
87875
87876 static int get_info(struct net *net, void __user *user,
87877- const int *len, int compat)
87878+ int len, int compat)
87879 {
87880 char name[XT_TABLE_MAXNAMELEN];
87881 struct xt_table *t;
87882 int ret;
87883
87884- if (*len != sizeof(struct arpt_getinfo)) {
87885- duprintf("length %u != %Zu\n", *len,
87886+ if (len != sizeof(struct arpt_getinfo)) {
87887+ duprintf("length %u != %Zu\n", len,
87888 sizeof(struct arpt_getinfo));
87889 return -EINVAL;
87890 }
87891@@ -924,7 +924,7 @@ static int get_info(struct net *net, void __user *user,
87892 info.size = private->size;
87893 strcpy(info.name, name);
87894
87895- if (copy_to_user(user, &info, *len) != 0)
87896+ if (copy_to_user(user, &info, len) != 0)
87897 ret = -EFAULT;
87898 else
87899 ret = 0;
87900@@ -1683,7 +1683,7 @@ static int compat_do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user,
87901
87902 switch (cmd) {
87903 case ARPT_SO_GET_INFO:
87904- ret = get_info(sock_net(sk), user, len, 1);
87905+ ret = get_info(sock_net(sk), user, *len, 1);
87906 break;
87907 case ARPT_SO_GET_ENTRIES:
87908 ret = compat_get_entries(sock_net(sk), user, len);
87909@@ -1728,7 +1728,7 @@ static int do_arpt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len
87910
87911 switch (cmd) {
87912 case ARPT_SO_GET_INFO:
87913- ret = get_info(sock_net(sk), user, len, 0);
87914+ ret = get_info(sock_net(sk), user, *len, 0);
87915 break;
87916
87917 case ARPT_SO_GET_ENTRIES:
87918diff --git a/net/ipv4/netfilter/ip_tables.c b/net/ipv4/netfilter/ip_tables.c
87919index d23118d..6ad7277 100644
87920--- a/net/ipv4/netfilter/ip_tables.c
87921+++ b/net/ipv4/netfilter/ip_tables.c
87922@@ -1068,14 +1068,14 @@ static int compat_table_info(const struct xt_table_info *info,
87923 #endif
87924
87925 static int get_info(struct net *net, void __user *user,
87926- const int *len, int compat)
87927+ int len, int compat)
87928 {
87929 char name[XT_TABLE_MAXNAMELEN];
87930 struct xt_table *t;
87931 int ret;
87932
87933- if (*len != sizeof(struct ipt_getinfo)) {
87934- duprintf("length %u != %zu\n", *len,
87935+ if (len != sizeof(struct ipt_getinfo)) {
87936+ duprintf("length %u != %zu\n", len,
87937 sizeof(struct ipt_getinfo));
87938 return -EINVAL;
87939 }
87940@@ -1112,7 +1112,7 @@ static int get_info(struct net *net, void __user *user,
87941 info.size = private->size;
87942 strcpy(info.name, name);
87943
87944- if (copy_to_user(user, &info, *len) != 0)
87945+ if (copy_to_user(user, &info, len) != 0)
87946 ret = -EFAULT;
87947 else
87948 ret = 0;
87949@@ -1966,7 +1966,7 @@ compat_do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87950
87951 switch (cmd) {
87952 case IPT_SO_GET_INFO:
87953- ret = get_info(sock_net(sk), user, len, 1);
87954+ ret = get_info(sock_net(sk), user, *len, 1);
87955 break;
87956 case IPT_SO_GET_ENTRIES:
87957 ret = compat_get_entries(sock_net(sk), user, len);
87958@@ -2013,7 +2013,7 @@ do_ipt_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
87959
87960 switch (cmd) {
87961 case IPT_SO_GET_INFO:
87962- ret = get_info(sock_net(sk), user, len, 0);
87963+ ret = get_info(sock_net(sk), user, *len, 0);
87964 break;
87965
87966 case IPT_SO_GET_ENTRIES:
87967diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
87968index 7d93d62..cbbf2a3 100644
87969--- a/net/ipv4/ping.c
87970+++ b/net/ipv4/ping.c
87971@@ -843,7 +843,7 @@ static void ping_format_sock(struct sock *sp, struct seq_file *f,
87972 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
87973 0, sock_i_ino(sp),
87974 atomic_read(&sp->sk_refcnt), sp,
87975- atomic_read(&sp->sk_drops), len);
87976+ atomic_read_unchecked(&sp->sk_drops), len);
87977 }
87978
87979 static int ping_seq_show(struct seq_file *seq, void *v)
87980diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
87981index dd44e0a..06dcca4 100644
87982--- a/net/ipv4/raw.c
87983+++ b/net/ipv4/raw.c
87984@@ -309,7 +309,7 @@ static int raw_rcv_skb(struct sock *sk, struct sk_buff *skb)
87985 int raw_rcv(struct sock *sk, struct sk_buff *skb)
87986 {
87987 if (!xfrm4_policy_check(sk, XFRM_POLICY_IN, skb)) {
87988- atomic_inc(&sk->sk_drops);
87989+ atomic_inc_unchecked(&sk->sk_drops);
87990 kfree_skb(skb);
87991 return NET_RX_DROP;
87992 }
87993@@ -745,16 +745,20 @@ static int raw_init(struct sock *sk)
87994
87995 static int raw_seticmpfilter(struct sock *sk, char __user *optval, int optlen)
87996 {
87997+ struct icmp_filter filter;
87998+
87999 if (optlen > sizeof(struct icmp_filter))
88000 optlen = sizeof(struct icmp_filter);
88001- if (copy_from_user(&raw_sk(sk)->filter, optval, optlen))
88002+ if (copy_from_user(&filter, optval, optlen))
88003 return -EFAULT;
88004+ raw_sk(sk)->filter = filter;
88005 return 0;
88006 }
88007
88008 static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *optlen)
88009 {
88010 int len, ret = -EFAULT;
88011+ struct icmp_filter filter;
88012
88013 if (get_user(len, optlen))
88014 goto out;
88015@@ -764,8 +768,8 @@ static int raw_geticmpfilter(struct sock *sk, char __user *optval, int __user *o
88016 if (len > sizeof(struct icmp_filter))
88017 len = sizeof(struct icmp_filter);
88018 ret = -EFAULT;
88019- if (put_user(len, optlen) ||
88020- copy_to_user(optval, &raw_sk(sk)->filter, len))
88021+ filter = raw_sk(sk)->filter;
88022+ if (put_user(len, optlen) || len > sizeof filter || copy_to_user(optval, &filter, len))
88023 goto out;
88024 ret = 0;
88025 out: return ret;
88026@@ -994,7 +998,7 @@ static void raw_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
88027 0, 0L, 0,
88028 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
88029 0, sock_i_ino(sp),
88030- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
88031+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
88032 }
88033
88034 static int raw_seq_show(struct seq_file *seq, void *v)
88035diff --git a/net/ipv4/route.c b/net/ipv4/route.c
88036index d35bbf0..faa3ab8 100644
88037--- a/net/ipv4/route.c
88038+++ b/net/ipv4/route.c
88039@@ -2558,34 +2558,34 @@ static struct ctl_table ipv4_route_flush_table[] = {
88040 .maxlen = sizeof(int),
88041 .mode = 0200,
88042 .proc_handler = ipv4_sysctl_rtcache_flush,
88043+ .extra1 = &init_net,
88044 },
88045 { },
88046 };
88047
88048 static __net_init int sysctl_route_net_init(struct net *net)
88049 {
88050- struct ctl_table *tbl;
88051+ ctl_table_no_const *tbl = NULL;
88052
88053- tbl = ipv4_route_flush_table;
88054 if (!net_eq(net, &init_net)) {
88055- tbl = kmemdup(tbl, sizeof(ipv4_route_flush_table), GFP_KERNEL);
88056+ tbl = kmemdup(ipv4_route_flush_table, sizeof(ipv4_route_flush_table), GFP_KERNEL);
88057 if (tbl == NULL)
88058 goto err_dup;
88059
88060 /* Don't export sysctls to unprivileged users */
88061 if (net->user_ns != &init_user_ns)
88062 tbl[0].procname = NULL;
88063- }
88064- tbl[0].extra1 = net;
88065+ tbl[0].extra1 = net;
88066+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
88067+ } else
88068+ net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", ipv4_route_flush_table);
88069
88070- net->ipv4.route_hdr = register_net_sysctl(net, "net/ipv4/route", tbl);
88071 if (net->ipv4.route_hdr == NULL)
88072 goto err_reg;
88073 return 0;
88074
88075 err_reg:
88076- if (tbl != ipv4_route_flush_table)
88077- kfree(tbl);
88078+ kfree(tbl);
88079 err_dup:
88080 return -ENOMEM;
88081 }
88082@@ -2608,7 +2608,7 @@ static __net_initdata struct pernet_operations sysctl_route_ops = {
88083
88084 static __net_init int rt_genid_init(struct net *net)
88085 {
88086- atomic_set(&net->rt_genid, 0);
88087+ atomic_set_unchecked(&net->rt_genid, 0);
88088 get_random_bytes(&net->ipv4.dev_addr_genid,
88089 sizeof(net->ipv4.dev_addr_genid));
88090 return 0;
88091diff --git a/net/ipv4/sysctl_net_ipv4.c b/net/ipv4/sysctl_net_ipv4.c
88092index fa2f63f..6554815 100644
88093--- a/net/ipv4/sysctl_net_ipv4.c
88094+++ b/net/ipv4/sysctl_net_ipv4.c
88095@@ -55,7 +55,7 @@ static int ipv4_local_port_range(ctl_table *table, int write,
88096 {
88097 int ret;
88098 int range[2];
88099- ctl_table tmp = {
88100+ ctl_table_no_const tmp = {
88101 .data = &range,
88102 .maxlen = sizeof(range),
88103 .mode = table->mode,
88104@@ -108,7 +108,7 @@ static int ipv4_ping_group_range(ctl_table *table, int write,
88105 int ret;
88106 gid_t urange[2];
88107 kgid_t low, high;
88108- ctl_table tmp = {
88109+ ctl_table_no_const tmp = {
88110 .data = &urange,
88111 .maxlen = sizeof(urange),
88112 .mode = table->mode,
88113@@ -139,7 +139,7 @@ static int proc_tcp_congestion_control(ctl_table *ctl, int write,
88114 void __user *buffer, size_t *lenp, loff_t *ppos)
88115 {
88116 char val[TCP_CA_NAME_MAX];
88117- ctl_table tbl = {
88118+ ctl_table_no_const tbl = {
88119 .data = val,
88120 .maxlen = TCP_CA_NAME_MAX,
88121 };
88122@@ -158,7 +158,7 @@ static int proc_tcp_available_congestion_control(ctl_table *ctl,
88123 void __user *buffer, size_t *lenp,
88124 loff_t *ppos)
88125 {
88126- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX, };
88127+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX, };
88128 int ret;
88129
88130 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
88131@@ -175,7 +175,7 @@ static int proc_allowed_congestion_control(ctl_table *ctl,
88132 void __user *buffer, size_t *lenp,
88133 loff_t *ppos)
88134 {
88135- ctl_table tbl = { .maxlen = TCP_CA_BUF_MAX };
88136+ ctl_table_no_const tbl = { .maxlen = TCP_CA_BUF_MAX };
88137 int ret;
88138
88139 tbl.data = kmalloc(tbl.maxlen, GFP_USER);
88140@@ -201,15 +201,17 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
88141 struct mem_cgroup *memcg;
88142 #endif
88143
88144- ctl_table tmp = {
88145+ ctl_table_no_const tmp = {
88146 .data = &vec,
88147 .maxlen = sizeof(vec),
88148 .mode = ctl->mode,
88149 };
88150
88151 if (!write) {
88152- ctl->data = &net->ipv4.sysctl_tcp_mem;
88153- return proc_doulongvec_minmax(ctl, write, buffer, lenp, ppos);
88154+ ctl_table_no_const tcp_mem = *ctl;
88155+
88156+ tcp_mem.data = &net->ipv4.sysctl_tcp_mem;
88157+ return proc_doulongvec_minmax(&tcp_mem, write, buffer, lenp, ppos);
88158 }
88159
88160 ret = proc_doulongvec_minmax(&tmp, write, buffer, lenp, ppos);
88161@@ -236,7 +238,7 @@ static int ipv4_tcp_mem(ctl_table *ctl, int write,
88162 static int proc_tcp_fastopen_key(ctl_table *ctl, int write, void __user *buffer,
88163 size_t *lenp, loff_t *ppos)
88164 {
88165- ctl_table tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
88166+ ctl_table_no_const tbl = { .maxlen = (TCP_FASTOPEN_KEY_LENGTH * 2 + 10) };
88167 struct tcp_fastopen_context *ctxt;
88168 int ret;
88169 u32 user_key[4]; /* 16 bytes, matching TCP_FASTOPEN_KEY_LENGTH */
88170@@ -477,7 +479,7 @@ static struct ctl_table ipv4_table[] = {
88171 },
88172 {
88173 .procname = "ip_local_reserved_ports",
88174- .data = NULL, /* initialized in sysctl_ipv4_init */
88175+ .data = sysctl_local_reserved_ports,
88176 .maxlen = 65536,
88177 .mode = 0644,
88178 .proc_handler = proc_do_large_bitmap,
88179@@ -842,11 +844,10 @@ static struct ctl_table ipv4_net_table[] = {
88180
88181 static __net_init int ipv4_sysctl_init_net(struct net *net)
88182 {
88183- struct ctl_table *table;
88184+ ctl_table_no_const *table = NULL;
88185
88186- table = ipv4_net_table;
88187 if (!net_eq(net, &init_net)) {
88188- table = kmemdup(table, sizeof(ipv4_net_table), GFP_KERNEL);
88189+ table = kmemdup(ipv4_net_table, sizeof(ipv4_net_table), GFP_KERNEL);
88190 if (table == NULL)
88191 goto err_alloc;
88192
88193@@ -881,15 +882,17 @@ static __net_init int ipv4_sysctl_init_net(struct net *net)
88194
88195 tcp_init_mem(net);
88196
88197- net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
88198+ if (!net_eq(net, &init_net))
88199+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", table);
88200+ else
88201+ net->ipv4.ipv4_hdr = register_net_sysctl(net, "net/ipv4", ipv4_net_table);
88202 if (net->ipv4.ipv4_hdr == NULL)
88203 goto err_reg;
88204
88205 return 0;
88206
88207 err_reg:
88208- if (!net_eq(net, &init_net))
88209- kfree(table);
88210+ kfree(table);
88211 err_alloc:
88212 return -ENOMEM;
88213 }
88214@@ -911,16 +914,6 @@ static __net_initdata struct pernet_operations ipv4_sysctl_ops = {
88215 static __init int sysctl_ipv4_init(void)
88216 {
88217 struct ctl_table_header *hdr;
88218- struct ctl_table *i;
88219-
88220- for (i = ipv4_table; i->procname; i++) {
88221- if (strcmp(i->procname, "ip_local_reserved_ports") == 0) {
88222- i->data = sysctl_local_reserved_ports;
88223- break;
88224- }
88225- }
88226- if (!i->procname)
88227- return -EINVAL;
88228
88229 hdr = register_net_sysctl(&init_net, "net/ipv4", ipv4_table);
88230 if (hdr == NULL)
88231diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
88232index 9c62257..651cc27 100644
88233--- a/net/ipv4/tcp_input.c
88234+++ b/net/ipv4/tcp_input.c
88235@@ -4436,7 +4436,7 @@ static struct sk_buff *tcp_collapse_one(struct sock *sk, struct sk_buff *skb,
88236 * simplifies code)
88237 */
88238 static void
88239-tcp_collapse(struct sock *sk, struct sk_buff_head *list,
88240+__intentional_overflow(5,6) tcp_collapse(struct sock *sk, struct sk_buff_head *list,
88241 struct sk_buff *head, struct sk_buff *tail,
88242 u32 start, u32 end)
88243 {
88244@@ -5522,6 +5522,7 @@ discard:
88245 tcp_paws_reject(&tp->rx_opt, 0))
88246 goto discard_and_undo;
88247
88248+#ifndef CONFIG_GRKERNSEC_NO_SIMULT_CONNECT
88249 if (th->syn) {
88250 /* We see SYN without ACK. It is attempt of
88251 * simultaneous connect with crossed SYNs.
88252@@ -5572,6 +5573,7 @@ discard:
88253 goto discard;
88254 #endif
88255 }
88256+#endif
88257 /* "fifth, if neither of the SYN or RST bits is set then
88258 * drop the segment and return."
88259 */
88260@@ -5616,7 +5618,7 @@ int tcp_rcv_state_process(struct sock *sk, struct sk_buff *skb,
88261 goto discard;
88262
88263 if (th->syn) {
88264- if (th->fin)
88265+ if (th->fin || th->urg || th->psh)
88266 goto discard;
88267 if (icsk->icsk_af_ops->conn_request(sk, skb) < 0)
88268 return 1;
88269diff --git a/net/ipv4/tcp_ipv4.c b/net/ipv4/tcp_ipv4.c
88270index 7999fc5..c812f42 100644
88271--- a/net/ipv4/tcp_ipv4.c
88272+++ b/net/ipv4/tcp_ipv4.c
88273@@ -90,6 +90,10 @@ int sysctl_tcp_low_latency __read_mostly;
88274 EXPORT_SYMBOL(sysctl_tcp_low_latency);
88275
88276
88277+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88278+extern int grsec_enable_blackhole;
88279+#endif
88280+
88281 #ifdef CONFIG_TCP_MD5SIG
88282 static int tcp_v4_md5_hash_hdr(char *md5_hash, const struct tcp_md5sig_key *key,
88283 __be32 daddr, __be32 saddr, const struct tcphdr *th);
88284@@ -1855,6 +1859,9 @@ int tcp_v4_do_rcv(struct sock *sk, struct sk_buff *skb)
88285 return 0;
88286
88287 reset:
88288+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88289+ if (!grsec_enable_blackhole)
88290+#endif
88291 tcp_v4_send_reset(rsk, skb);
88292 discard:
88293 kfree_skb(skb);
88294@@ -2000,12 +2007,19 @@ int tcp_v4_rcv(struct sk_buff *skb)
88295 TCP_SKB_CB(skb)->sacked = 0;
88296
88297 sk = __inet_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
88298- if (!sk)
88299+ if (!sk) {
88300+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88301+ ret = 1;
88302+#endif
88303 goto no_tcp_socket;
88304-
88305+ }
88306 process:
88307- if (sk->sk_state == TCP_TIME_WAIT)
88308+ if (sk->sk_state == TCP_TIME_WAIT) {
88309+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88310+ ret = 2;
88311+#endif
88312 goto do_time_wait;
88313+ }
88314
88315 if (unlikely(iph->ttl < inet_sk(sk)->min_ttl)) {
88316 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
88317@@ -2058,6 +2072,10 @@ csum_error:
88318 bad_packet:
88319 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
88320 } else {
88321+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88322+ if (!grsec_enable_blackhole || (ret == 1 &&
88323+ (skb->dev->flags & IFF_LOOPBACK)))
88324+#endif
88325 tcp_v4_send_reset(NULL, skb);
88326 }
88327
88328diff --git a/net/ipv4/tcp_minisocks.c b/net/ipv4/tcp_minisocks.c
88329index 0f01788..d52a859 100644
88330--- a/net/ipv4/tcp_minisocks.c
88331+++ b/net/ipv4/tcp_minisocks.c
88332@@ -27,6 +27,10 @@
88333 #include <net/inet_common.h>
88334 #include <net/xfrm.h>
88335
88336+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88337+extern int grsec_enable_blackhole;
88338+#endif
88339+
88340 int sysctl_tcp_syncookies __read_mostly = 1;
88341 EXPORT_SYMBOL(sysctl_tcp_syncookies);
88342
88343@@ -717,7 +721,10 @@ embryonic_reset:
88344 * avoid becoming vulnerable to outside attack aiming at
88345 * resetting legit local connections.
88346 */
88347- req->rsk_ops->send_reset(sk, skb);
88348+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88349+ if (!grsec_enable_blackhole)
88350+#endif
88351+ req->rsk_ops->send_reset(sk, skb);
88352 } else if (fastopen) { /* received a valid RST pkt */
88353 reqsk_fastopen_remove(sk, req, true);
88354 tcp_reset(sk);
88355diff --git a/net/ipv4/tcp_probe.c b/net/ipv4/tcp_probe.c
88356index d4943f6..e7a74a5 100644
88357--- a/net/ipv4/tcp_probe.c
88358+++ b/net/ipv4/tcp_probe.c
88359@@ -204,7 +204,7 @@ static ssize_t tcpprobe_read(struct file *file, char __user *buf,
88360 if (cnt + width >= len)
88361 break;
88362
88363- if (copy_to_user(buf + cnt, tbuf, width))
88364+ if (width > sizeof tbuf || copy_to_user(buf + cnt, tbuf, width))
88365 return -EFAULT;
88366 cnt += width;
88367 }
88368diff --git a/net/ipv4/tcp_timer.c b/net/ipv4/tcp_timer.c
88369index 4b85e6f..22f9ac9 100644
88370--- a/net/ipv4/tcp_timer.c
88371+++ b/net/ipv4/tcp_timer.c
88372@@ -22,6 +22,10 @@
88373 #include <linux/gfp.h>
88374 #include <net/tcp.h>
88375
88376+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88377+extern int grsec_lastack_retries;
88378+#endif
88379+
88380 int sysctl_tcp_syn_retries __read_mostly = TCP_SYN_RETRIES;
88381 int sysctl_tcp_synack_retries __read_mostly = TCP_SYNACK_RETRIES;
88382 int sysctl_tcp_keepalive_time __read_mostly = TCP_KEEPALIVE_TIME;
88383@@ -185,6 +189,13 @@ static int tcp_write_timeout(struct sock *sk)
88384 }
88385 }
88386
88387+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88388+ if ((sk->sk_state == TCP_LAST_ACK) &&
88389+ (grsec_lastack_retries > 0) &&
88390+ (grsec_lastack_retries < retry_until))
88391+ retry_until = grsec_lastack_retries;
88392+#endif
88393+
88394 if (retransmits_timed_out(sk, retry_until,
88395 syn_set ? 0 : icsk->icsk_user_timeout, syn_set)) {
88396 /* Has it gone just too far? */
88397diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
88398index 0bf5d399..5a2dd92 100644
88399--- a/net/ipv4/udp.c
88400+++ b/net/ipv4/udp.c
88401@@ -87,6 +87,7 @@
88402 #include <linux/types.h>
88403 #include <linux/fcntl.h>
88404 #include <linux/module.h>
88405+#include <linux/security.h>
88406 #include <linux/socket.h>
88407 #include <linux/sockios.h>
88408 #include <linux/igmp.h>
88409@@ -111,6 +112,10 @@
88410 #include <trace/events/skb.h>
88411 #include "udp_impl.h"
88412
88413+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88414+extern int grsec_enable_blackhole;
88415+#endif
88416+
88417 struct udp_table udp_table __read_mostly;
88418 EXPORT_SYMBOL(udp_table);
88419
88420@@ -594,6 +599,9 @@ found:
88421 return s;
88422 }
88423
88424+extern int gr_search_udp_recvmsg(struct sock *sk, const struct sk_buff *skb);
88425+extern int gr_search_udp_sendmsg(struct sock *sk, struct sockaddr_in *addr);
88426+
88427 /*
88428 * This routine is called by the ICMP module when it gets some
88429 * sort of error condition. If err < 0 then the socket should
88430@@ -799,7 +807,7 @@ send:
88431 /*
88432 * Push out all pending data as one UDP datagram. Socket is locked.
88433 */
88434-static int udp_push_pending_frames(struct sock *sk)
88435+int udp_push_pending_frames(struct sock *sk)
88436 {
88437 struct udp_sock *up = udp_sk(sk);
88438 struct inet_sock *inet = inet_sk(sk);
88439@@ -818,6 +826,7 @@ out:
88440 up->pending = 0;
88441 return err;
88442 }
88443+EXPORT_SYMBOL(udp_push_pending_frames);
88444
88445 int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
88446 size_t len)
88447@@ -889,9 +898,18 @@ int udp_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *msg,
88448 dport = usin->sin_port;
88449 if (dport == 0)
88450 return -EINVAL;
88451+
88452+ err = gr_search_udp_sendmsg(sk, usin);
88453+ if (err)
88454+ return err;
88455 } else {
88456 if (sk->sk_state != TCP_ESTABLISHED)
88457 return -EDESTADDRREQ;
88458+
88459+ err = gr_search_udp_sendmsg(sk, NULL);
88460+ if (err)
88461+ return err;
88462+
88463 daddr = inet->inet_daddr;
88464 dport = inet->inet_dport;
88465 /* Open fast path for connected socket.
88466@@ -1135,7 +1153,7 @@ static unsigned int first_packet_length(struct sock *sk)
88467 IS_UDPLITE(sk));
88468 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88469 IS_UDPLITE(sk));
88470- atomic_inc(&sk->sk_drops);
88471+ atomic_inc_unchecked(&sk->sk_drops);
88472 __skb_unlink(skb, rcvq);
88473 __skb_queue_tail(&list_kill, skb);
88474 }
88475@@ -1221,6 +1239,10 @@ try_again:
88476 if (!skb)
88477 goto out;
88478
88479+ err = gr_search_udp_recvmsg(sk, skb);
88480+ if (err)
88481+ goto out_free;
88482+
88483 ulen = skb->len - sizeof(struct udphdr);
88484 copied = len;
88485 if (copied > ulen)
88486@@ -1254,7 +1276,7 @@ try_again:
88487 if (unlikely(err)) {
88488 trace_kfree_skb(skb, udp_recvmsg);
88489 if (!peeked) {
88490- atomic_inc(&sk->sk_drops);
88491+ atomic_inc_unchecked(&sk->sk_drops);
88492 UDP_INC_STATS_USER(sock_net(sk),
88493 UDP_MIB_INERRORS, is_udplite);
88494 }
88495@@ -1541,7 +1563,7 @@ csum_error:
88496 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
88497 drop:
88498 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
88499- atomic_inc(&sk->sk_drops);
88500+ atomic_inc_unchecked(&sk->sk_drops);
88501 kfree_skb(skb);
88502 return -1;
88503 }
88504@@ -1560,7 +1582,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
88505 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
88506
88507 if (!skb1) {
88508- atomic_inc(&sk->sk_drops);
88509+ atomic_inc_unchecked(&sk->sk_drops);
88510 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
88511 IS_UDPLITE(sk));
88512 UDP_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
88513@@ -1729,6 +1751,9 @@ int __udp4_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
88514 goto csum_error;
88515
88516 UDP_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
88517+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
88518+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
88519+#endif
88520 icmp_send(skb, ICMP_DEST_UNREACH, ICMP_PORT_UNREACH, 0);
88521
88522 /*
88523@@ -2159,7 +2184,7 @@ static void udp4_format_sock(struct sock *sp, struct seq_file *f,
88524 from_kuid_munged(seq_user_ns(f), sock_i_uid(sp)),
88525 0, sock_i_ino(sp),
88526 atomic_read(&sp->sk_refcnt), sp,
88527- atomic_read(&sp->sk_drops), len);
88528+ atomic_read_unchecked(&sp->sk_drops), len);
88529 }
88530
88531 int udp4_seq_show(struct seq_file *seq, void *v)
88532diff --git a/net/ipv4/xfrm4_policy.c b/net/ipv4/xfrm4_policy.c
88533index 9a459be..086b866 100644
88534--- a/net/ipv4/xfrm4_policy.c
88535+++ b/net/ipv4/xfrm4_policy.c
88536@@ -264,19 +264,18 @@ static struct ctl_table xfrm4_policy_table[] = {
88537
88538 static int __net_init xfrm4_net_init(struct net *net)
88539 {
88540- struct ctl_table *table;
88541+ ctl_table_no_const *table = NULL;
88542 struct ctl_table_header *hdr;
88543
88544- table = xfrm4_policy_table;
88545 if (!net_eq(net, &init_net)) {
88546- table = kmemdup(table, sizeof(xfrm4_policy_table), GFP_KERNEL);
88547+ table = kmemdup(xfrm4_policy_table, sizeof(xfrm4_policy_table), GFP_KERNEL);
88548 if (!table)
88549 goto err_alloc;
88550
88551 table[0].data = &net->xfrm.xfrm4_dst_ops.gc_thresh;
88552- }
88553-
88554- hdr = register_net_sysctl(net, "net/ipv4", table);
88555+ hdr = register_net_sysctl(net, "net/ipv4", table);
88556+ } else
88557+ hdr = register_net_sysctl(net, "net/ipv4", xfrm4_policy_table);
88558 if (!hdr)
88559 goto err_reg;
88560
88561@@ -284,8 +283,7 @@ static int __net_init xfrm4_net_init(struct net *net)
88562 return 0;
88563
88564 err_reg:
88565- if (!net_eq(net, &init_net))
88566- kfree(table);
88567+ kfree(table);
88568 err_alloc:
88569 return -ENOMEM;
88570 }
88571diff --git a/net/ipv6/addrconf.c b/net/ipv6/addrconf.c
88572index 4ab4c38..1533b2d 100644
88573--- a/net/ipv6/addrconf.c
88574+++ b/net/ipv6/addrconf.c
88575@@ -621,7 +621,7 @@ static int inet6_netconf_dump_devconf(struct sk_buff *skb,
88576 idx = 0;
88577 head = &net->dev_index_head[h];
88578 rcu_read_lock();
88579- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^
88580+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^
88581 net->dev_base_seq;
88582 hlist_for_each_entry_rcu(dev, head, index_hlist) {
88583 if (idx < s_idx)
88584@@ -2372,7 +2372,7 @@ int addrconf_set_dstaddr(struct net *net, void __user *arg)
88585 p.iph.ihl = 5;
88586 p.iph.protocol = IPPROTO_IPV6;
88587 p.iph.ttl = 64;
88588- ifr.ifr_ifru.ifru_data = (__force void __user *)&p;
88589+ ifr.ifr_ifru.ifru_data = (void __force_user *)&p;
88590
88591 if (ops->ndo_do_ioctl) {
88592 mm_segment_t oldfs = get_fs();
88593@@ -3994,7 +3994,7 @@ static int inet6_dump_addr(struct sk_buff *skb, struct netlink_callback *cb,
88594 s_ip_idx = ip_idx = cb->args[2];
88595
88596 rcu_read_lock();
88597- cb->seq = atomic_read(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
88598+ cb->seq = atomic_read_unchecked(&net->ipv6.dev_addr_genid) ^ net->dev_base_seq;
88599 for (h = s_h; h < NETDEV_HASHENTRIES; h++, s_idx = 0) {
88600 idx = 0;
88601 head = &net->dev_index_head[h];
88602@@ -4579,7 +4579,7 @@ static void __ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
88603 dst_free(&ifp->rt->dst);
88604 break;
88605 }
88606- atomic_inc(&net->ipv6.dev_addr_genid);
88607+ atomic_inc_unchecked(&net->ipv6.dev_addr_genid);
88608 }
88609
88610 static void ipv6_ifa_notify(int event, struct inet6_ifaddr *ifp)
88611@@ -4599,7 +4599,7 @@ int addrconf_sysctl_forward(ctl_table *ctl, int write,
88612 int *valp = ctl->data;
88613 int val = *valp;
88614 loff_t pos = *ppos;
88615- ctl_table lctl;
88616+ ctl_table_no_const lctl;
88617 int ret;
88618
88619 /*
88620@@ -4681,7 +4681,7 @@ int addrconf_sysctl_disable(ctl_table *ctl, int write,
88621 int *valp = ctl->data;
88622 int val = *valp;
88623 loff_t pos = *ppos;
88624- ctl_table lctl;
88625+ ctl_table_no_const lctl;
88626 int ret;
88627
88628 /*
88629diff --git a/net/ipv6/icmp.c b/net/ipv6/icmp.c
88630index b4ff0a4..db9b764 100644
88631--- a/net/ipv6/icmp.c
88632+++ b/net/ipv6/icmp.c
88633@@ -980,7 +980,7 @@ ctl_table ipv6_icmp_table_template[] = {
88634
88635 struct ctl_table * __net_init ipv6_icmp_sysctl_init(struct net *net)
88636 {
88637- struct ctl_table *table;
88638+ ctl_table_no_const *table;
88639
88640 table = kmemdup(ipv6_icmp_table_template,
88641 sizeof(ipv6_icmp_table_template),
88642diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c
88643index 192dd1a..5fc9c7a 100644
88644--- a/net/ipv6/ip6_fib.c
88645+++ b/net/ipv6/ip6_fib.c
88646@@ -632,6 +632,12 @@ insert_above:
88647 return ln;
88648 }
88649
88650+static inline bool rt6_qualify_for_ecmp(struct rt6_info *rt)
88651+{
88652+ return (rt->rt6i_flags & (RTF_GATEWAY|RTF_ADDRCONF|RTF_DYNAMIC)) ==
88653+ RTF_GATEWAY;
88654+}
88655+
88656 /*
88657 * Insert routing information in a node.
88658 */
88659@@ -646,6 +652,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
88660 int add = (!info->nlh ||
88661 (info->nlh->nlmsg_flags & NLM_F_CREATE));
88662 int found = 0;
88663+ bool rt_can_ecmp = rt6_qualify_for_ecmp(rt);
88664
88665 ins = &fn->leaf;
88666
88667@@ -691,9 +698,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
88668 * To avoid long list, we only had siblings if the
88669 * route have a gateway.
88670 */
88671- if (rt->rt6i_flags & RTF_GATEWAY &&
88672- !(rt->rt6i_flags & RTF_EXPIRES) &&
88673- !(iter->rt6i_flags & RTF_EXPIRES))
88674+ if (rt_can_ecmp &&
88675+ rt6_qualify_for_ecmp(iter))
88676 rt->rt6i_nsiblings++;
88677 }
88678
88679@@ -715,7 +721,8 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
88680 /* Find the first route that have the same metric */
88681 sibling = fn->leaf;
88682 while (sibling) {
88683- if (sibling->rt6i_metric == rt->rt6i_metric) {
88684+ if (sibling->rt6i_metric == rt->rt6i_metric &&
88685+ rt6_qualify_for_ecmp(sibling)) {
88686 list_add_tail(&rt->rt6i_siblings,
88687 &sibling->rt6i_siblings);
88688 break;
88689diff --git a/net/ipv6/ip6_gre.c b/net/ipv6/ip6_gre.c
88690index ecd6073..58162ae 100644
88691--- a/net/ipv6/ip6_gre.c
88692+++ b/net/ipv6/ip6_gre.c
88693@@ -74,7 +74,7 @@ struct ip6gre_net {
88694 struct net_device *fb_tunnel_dev;
88695 };
88696
88697-static struct rtnl_link_ops ip6gre_link_ops __read_mostly;
88698+static struct rtnl_link_ops ip6gre_link_ops;
88699 static int ip6gre_tunnel_init(struct net_device *dev);
88700 static void ip6gre_tunnel_setup(struct net_device *dev);
88701 static void ip6gre_tunnel_link(struct ip6gre_net *ign, struct ip6_tnl *t);
88702@@ -1283,7 +1283,7 @@ static void ip6gre_fb_tunnel_init(struct net_device *dev)
88703 }
88704
88705
88706-static struct inet6_protocol ip6gre_protocol __read_mostly = {
88707+static struct inet6_protocol ip6gre_protocol = {
88708 .handler = ip6gre_rcv,
88709 .err_handler = ip6gre_err,
88710 .flags = INET6_PROTO_NOPOLICY|INET6_PROTO_FINAL,
88711@@ -1617,7 +1617,7 @@ static const struct nla_policy ip6gre_policy[IFLA_GRE_MAX + 1] = {
88712 [IFLA_GRE_FLAGS] = { .type = NLA_U32 },
88713 };
88714
88715-static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88716+static struct rtnl_link_ops ip6gre_link_ops = {
88717 .kind = "ip6gre",
88718 .maxtype = IFLA_GRE_MAX,
88719 .policy = ip6gre_policy,
88720@@ -1630,7 +1630,7 @@ static struct rtnl_link_ops ip6gre_link_ops __read_mostly = {
88721 .fill_info = ip6gre_fill_info,
88722 };
88723
88724-static struct rtnl_link_ops ip6gre_tap_ops __read_mostly = {
88725+static struct rtnl_link_ops ip6gre_tap_ops = {
88726 .kind = "ip6gretap",
88727 .maxtype = IFLA_GRE_MAX,
88728 .policy = ip6gre_policy,
88729diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
88730index d5d20cd..6e3ddf8 100644
88731--- a/net/ipv6/ip6_output.c
88732+++ b/net/ipv6/ip6_output.c
88733@@ -1098,11 +1098,12 @@ static inline struct ipv6_rt_hdr *ip6_rthdr_dup(struct ipv6_rt_hdr *src,
88734 return src ? kmemdup(src, (src->hdrlen + 1) * 8, gfp) : NULL;
88735 }
88736
88737-static void ip6_append_data_mtu(int *mtu,
88738+static void ip6_append_data_mtu(unsigned int *mtu,
88739 int *maxfraglen,
88740 unsigned int fragheaderlen,
88741 struct sk_buff *skb,
88742- struct rt6_info *rt)
88743+ struct rt6_info *rt,
88744+ bool pmtuprobe)
88745 {
88746 if (!(rt->dst.flags & DST_XFRM_TUNNEL)) {
88747 if (skb == NULL) {
88748@@ -1114,7 +1115,9 @@ static void ip6_append_data_mtu(int *mtu,
88749 * this fragment is not first, the headers
88750 * space is regarded as data space.
88751 */
88752- *mtu = dst_mtu(rt->dst.path);
88753+ *mtu = min(*mtu, pmtuprobe ?
88754+ rt->dst.dev->mtu :
88755+ dst_mtu(rt->dst.path));
88756 }
88757 *maxfraglen = ((*mtu - fragheaderlen) & ~7)
88758 + fragheaderlen - sizeof(struct frag_hdr);
88759@@ -1131,11 +1134,10 @@ int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
88760 struct ipv6_pinfo *np = inet6_sk(sk);
88761 struct inet_cork *cork;
88762 struct sk_buff *skb, *skb_prev = NULL;
88763- unsigned int maxfraglen, fragheaderlen;
88764+ unsigned int maxfraglen, fragheaderlen, mtu;
88765 int exthdrlen;
88766 int dst_exthdrlen;
88767 int hh_len;
88768- int mtu;
88769 int copy;
88770 int err;
88771 int offset = 0;
88772@@ -1292,7 +1294,9 @@ alloc_new_skb:
88773 /* update mtu and maxfraglen if necessary */
88774 if (skb == NULL || skb_prev == NULL)
88775 ip6_append_data_mtu(&mtu, &maxfraglen,
88776- fragheaderlen, skb, rt);
88777+ fragheaderlen, skb, rt,
88778+ np->pmtudisc ==
88779+ IPV6_PMTUDISC_PROBE);
88780
88781 skb_prev = skb;
88782
88783diff --git a/net/ipv6/ip6_tunnel.c b/net/ipv6/ip6_tunnel.c
88784index 1e55866..b398dab 100644
88785--- a/net/ipv6/ip6_tunnel.c
88786+++ b/net/ipv6/ip6_tunnel.c
88787@@ -88,7 +88,7 @@ static u32 HASH(const struct in6_addr *addr1, const struct in6_addr *addr2)
88788
88789 static int ip6_tnl_dev_init(struct net_device *dev);
88790 static void ip6_tnl_dev_setup(struct net_device *dev);
88791-static struct rtnl_link_ops ip6_link_ops __read_mostly;
88792+static struct rtnl_link_ops ip6_link_ops;
88793
88794 static int ip6_tnl_net_id __read_mostly;
88795 struct ip6_tnl_net {
88796@@ -1672,7 +1672,7 @@ static const struct nla_policy ip6_tnl_policy[IFLA_IPTUN_MAX + 1] = {
88797 [IFLA_IPTUN_PROTO] = { .type = NLA_U8 },
88798 };
88799
88800-static struct rtnl_link_ops ip6_link_ops __read_mostly = {
88801+static struct rtnl_link_ops ip6_link_ops = {
88802 .kind = "ip6tnl",
88803 .maxtype = IFLA_IPTUN_MAX,
88804 .policy = ip6_tnl_policy,
88805diff --git a/net/ipv6/ipv6_sockglue.c b/net/ipv6/ipv6_sockglue.c
88806index d1e2e8e..51c19ae 100644
88807--- a/net/ipv6/ipv6_sockglue.c
88808+++ b/net/ipv6/ipv6_sockglue.c
88809@@ -991,7 +991,7 @@ static int do_ipv6_getsockopt(struct sock *sk, int level, int optname,
88810 if (sk->sk_type != SOCK_STREAM)
88811 return -ENOPROTOOPT;
88812
88813- msg.msg_control = optval;
88814+ msg.msg_control = (void __force_kernel *)optval;
88815 msg.msg_controllen = len;
88816 msg.msg_flags = flags;
88817
88818diff --git a/net/ipv6/netfilter/ip6_tables.c b/net/ipv6/netfilter/ip6_tables.c
88819index 44400c2..8e11f52 100644
88820--- a/net/ipv6/netfilter/ip6_tables.c
88821+++ b/net/ipv6/netfilter/ip6_tables.c
88822@@ -1078,14 +1078,14 @@ static int compat_table_info(const struct xt_table_info *info,
88823 #endif
88824
88825 static int get_info(struct net *net, void __user *user,
88826- const int *len, int compat)
88827+ int len, int compat)
88828 {
88829 char name[XT_TABLE_MAXNAMELEN];
88830 struct xt_table *t;
88831 int ret;
88832
88833- if (*len != sizeof(struct ip6t_getinfo)) {
88834- duprintf("length %u != %zu\n", *len,
88835+ if (len != sizeof(struct ip6t_getinfo)) {
88836+ duprintf("length %u != %zu\n", len,
88837 sizeof(struct ip6t_getinfo));
88838 return -EINVAL;
88839 }
88840@@ -1122,7 +1122,7 @@ static int get_info(struct net *net, void __user *user,
88841 info.size = private->size;
88842 strcpy(info.name, name);
88843
88844- if (copy_to_user(user, &info, *len) != 0)
88845+ if (copy_to_user(user, &info, len) != 0)
88846 ret = -EFAULT;
88847 else
88848 ret = 0;
88849@@ -1976,7 +1976,7 @@ compat_do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88850
88851 switch (cmd) {
88852 case IP6T_SO_GET_INFO:
88853- ret = get_info(sock_net(sk), user, len, 1);
88854+ ret = get_info(sock_net(sk), user, *len, 1);
88855 break;
88856 case IP6T_SO_GET_ENTRIES:
88857 ret = compat_get_entries(sock_net(sk), user, len);
88858@@ -2023,7 +2023,7 @@ do_ip6t_get_ctl(struct sock *sk, int cmd, void __user *user, int *len)
88859
88860 switch (cmd) {
88861 case IP6T_SO_GET_INFO:
88862- ret = get_info(sock_net(sk), user, len, 0);
88863+ ret = get_info(sock_net(sk), user, *len, 0);
88864 break;
88865
88866 case IP6T_SO_GET_ENTRIES:
88867diff --git a/net/ipv6/netfilter/nf_conntrack_reasm.c b/net/ipv6/netfilter/nf_conntrack_reasm.c
88868index dffdc1a..ccc6678 100644
88869--- a/net/ipv6/netfilter/nf_conntrack_reasm.c
88870+++ b/net/ipv6/netfilter/nf_conntrack_reasm.c
88871@@ -90,12 +90,11 @@ static struct ctl_table nf_ct_frag6_sysctl_table[] = {
88872
88873 static int nf_ct_frag6_sysctl_register(struct net *net)
88874 {
88875- struct ctl_table *table;
88876+ ctl_table_no_const *table = NULL;
88877 struct ctl_table_header *hdr;
88878
88879- table = nf_ct_frag6_sysctl_table;
88880 if (!net_eq(net, &init_net)) {
88881- table = kmemdup(table, sizeof(nf_ct_frag6_sysctl_table),
88882+ table = kmemdup(nf_ct_frag6_sysctl_table, sizeof(nf_ct_frag6_sysctl_table),
88883 GFP_KERNEL);
88884 if (table == NULL)
88885 goto err_alloc;
88886@@ -103,9 +102,9 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88887 table[0].data = &net->nf_frag.frags.timeout;
88888 table[1].data = &net->nf_frag.frags.low_thresh;
88889 table[2].data = &net->nf_frag.frags.high_thresh;
88890- }
88891-
88892- hdr = register_net_sysctl(net, "net/netfilter", table);
88893+ hdr = register_net_sysctl(net, "net/netfilter", table);
88894+ } else
88895+ hdr = register_net_sysctl(net, "net/netfilter", nf_ct_frag6_sysctl_table);
88896 if (hdr == NULL)
88897 goto err_reg;
88898
88899@@ -113,8 +112,7 @@ static int nf_ct_frag6_sysctl_register(struct net *net)
88900 return 0;
88901
88902 err_reg:
88903- if (!net_eq(net, &init_net))
88904- kfree(table);
88905+ kfree(table);
88906 err_alloc:
88907 return -ENOMEM;
88908 }
88909diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
88910index eedff8c..6e13a47 100644
88911--- a/net/ipv6/raw.c
88912+++ b/net/ipv6/raw.c
88913@@ -378,7 +378,7 @@ static inline int rawv6_rcv_skb(struct sock *sk, struct sk_buff *skb)
88914 {
88915 if ((raw6_sk(sk)->checksum || rcu_access_pointer(sk->sk_filter)) &&
88916 skb_checksum_complete(skb)) {
88917- atomic_inc(&sk->sk_drops);
88918+ atomic_inc_unchecked(&sk->sk_drops);
88919 kfree_skb(skb);
88920 return NET_RX_DROP;
88921 }
88922@@ -406,7 +406,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88923 struct raw6_sock *rp = raw6_sk(sk);
88924
88925 if (!xfrm6_policy_check(sk, XFRM_POLICY_IN, skb)) {
88926- atomic_inc(&sk->sk_drops);
88927+ atomic_inc_unchecked(&sk->sk_drops);
88928 kfree_skb(skb);
88929 return NET_RX_DROP;
88930 }
88931@@ -430,7 +430,7 @@ int rawv6_rcv(struct sock *sk, struct sk_buff *skb)
88932
88933 if (inet->hdrincl) {
88934 if (skb_checksum_complete(skb)) {
88935- atomic_inc(&sk->sk_drops);
88936+ atomic_inc_unchecked(&sk->sk_drops);
88937 kfree_skb(skb);
88938 return NET_RX_DROP;
88939 }
88940@@ -602,7 +602,7 @@ out:
88941 return err;
88942 }
88943
88944-static int rawv6_send_hdrinc(struct sock *sk, void *from, int length,
88945+static int rawv6_send_hdrinc(struct sock *sk, void *from, unsigned int length,
88946 struct flowi6 *fl6, struct dst_entry **dstp,
88947 unsigned int flags)
88948 {
88949@@ -914,12 +914,15 @@ do_confirm:
88950 static int rawv6_seticmpfilter(struct sock *sk, int level, int optname,
88951 char __user *optval, int optlen)
88952 {
88953+ struct icmp6_filter filter;
88954+
88955 switch (optname) {
88956 case ICMPV6_FILTER:
88957 if (optlen > sizeof(struct icmp6_filter))
88958 optlen = sizeof(struct icmp6_filter);
88959- if (copy_from_user(&raw6_sk(sk)->filter, optval, optlen))
88960+ if (copy_from_user(&filter, optval, optlen))
88961 return -EFAULT;
88962+ raw6_sk(sk)->filter = filter;
88963 return 0;
88964 default:
88965 return -ENOPROTOOPT;
88966@@ -932,6 +935,7 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88967 char __user *optval, int __user *optlen)
88968 {
88969 int len;
88970+ struct icmp6_filter filter;
88971
88972 switch (optname) {
88973 case ICMPV6_FILTER:
88974@@ -943,7 +947,8 @@ static int rawv6_geticmpfilter(struct sock *sk, int level, int optname,
88975 len = sizeof(struct icmp6_filter);
88976 if (put_user(len, optlen))
88977 return -EFAULT;
88978- if (copy_to_user(optval, &raw6_sk(sk)->filter, len))
88979+ filter = raw6_sk(sk)->filter;
88980+ if (len > sizeof filter || copy_to_user(optval, &filter, len))
88981 return -EFAULT;
88982 return 0;
88983 default:
88984@@ -1251,7 +1256,7 @@ static void raw6_sock_seq_show(struct seq_file *seq, struct sock *sp, int i)
88985 from_kuid_munged(seq_user_ns(seq), sock_i_uid(sp)),
88986 0,
88987 sock_i_ino(sp),
88988- atomic_read(&sp->sk_refcnt), sp, atomic_read(&sp->sk_drops));
88989+ atomic_read(&sp->sk_refcnt), sp, atomic_read_unchecked(&sp->sk_drops));
88990 }
88991
88992 static int raw6_seq_show(struct seq_file *seq, void *v)
88993diff --git a/net/ipv6/reassembly.c b/net/ipv6/reassembly.c
88994index 790d9f4..68ae078 100644
88995--- a/net/ipv6/reassembly.c
88996+++ b/net/ipv6/reassembly.c
88997@@ -621,12 +621,11 @@ static struct ctl_table ip6_frags_ctl_table[] = {
88998
88999 static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
89000 {
89001- struct ctl_table *table;
89002+ ctl_table_no_const *table = NULL;
89003 struct ctl_table_header *hdr;
89004
89005- table = ip6_frags_ns_ctl_table;
89006 if (!net_eq(net, &init_net)) {
89007- table = kmemdup(table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
89008+ table = kmemdup(ip6_frags_ns_ctl_table, sizeof(ip6_frags_ns_ctl_table), GFP_KERNEL);
89009 if (table == NULL)
89010 goto err_alloc;
89011
89012@@ -637,9 +636,10 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
89013 /* Don't export sysctls to unprivileged users */
89014 if (net->user_ns != &init_user_ns)
89015 table[0].procname = NULL;
89016- }
89017+ hdr = register_net_sysctl(net, "net/ipv6", table);
89018+ } else
89019+ hdr = register_net_sysctl(net, "net/ipv6", ip6_frags_ns_ctl_table);
89020
89021- hdr = register_net_sysctl(net, "net/ipv6", table);
89022 if (hdr == NULL)
89023 goto err_reg;
89024
89025@@ -647,8 +647,7 @@ static int __net_init ip6_frags_ns_sysctl_register(struct net *net)
89026 return 0;
89027
89028 err_reg:
89029- if (!net_eq(net, &init_net))
89030- kfree(table);
89031+ kfree(table);
89032 err_alloc:
89033 return -ENOMEM;
89034 }
89035diff --git a/net/ipv6/route.c b/net/ipv6/route.c
89036index ad0aa6b..beaef03 100644
89037--- a/net/ipv6/route.c
89038+++ b/net/ipv6/route.c
89039@@ -2881,7 +2881,7 @@ ctl_table ipv6_route_table_template[] = {
89040
89041 struct ctl_table * __net_init ipv6_route_sysctl_init(struct net *net)
89042 {
89043- struct ctl_table *table;
89044+ ctl_table_no_const *table;
89045
89046 table = kmemdup(ipv6_route_table_template,
89047 sizeof(ipv6_route_table_template),
89048diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
89049index 3353634..3d5084a 100644
89050--- a/net/ipv6/sit.c
89051+++ b/net/ipv6/sit.c
89052@@ -74,7 +74,7 @@ static void ipip6_tunnel_setup(struct net_device *dev);
89053 static void ipip6_dev_free(struct net_device *dev);
89054 static bool check_6rd(struct ip_tunnel *tunnel, const struct in6_addr *v6dst,
89055 __be32 *v4dst);
89056-static struct rtnl_link_ops sit_link_ops __read_mostly;
89057+static struct rtnl_link_ops sit_link_ops;
89058
89059 static int sit_net_id __read_mostly;
89060 struct sit_net {
89061@@ -1453,7 +1453,7 @@ static const struct nla_policy ipip6_policy[IFLA_IPTUN_MAX + 1] = {
89062 #endif
89063 };
89064
89065-static struct rtnl_link_ops sit_link_ops __read_mostly = {
89066+static struct rtnl_link_ops sit_link_ops = {
89067 .kind = "sit",
89068 .maxtype = IFLA_IPTUN_MAX,
89069 .policy = ipip6_policy,
89070diff --git a/net/ipv6/sysctl_net_ipv6.c b/net/ipv6/sysctl_net_ipv6.c
89071index e85c48b..b8268d3 100644
89072--- a/net/ipv6/sysctl_net_ipv6.c
89073+++ b/net/ipv6/sysctl_net_ipv6.c
89074@@ -40,7 +40,7 @@ static ctl_table ipv6_rotable[] = {
89075
89076 static int __net_init ipv6_sysctl_net_init(struct net *net)
89077 {
89078- struct ctl_table *ipv6_table;
89079+ ctl_table_no_const *ipv6_table;
89080 struct ctl_table *ipv6_route_table;
89081 struct ctl_table *ipv6_icmp_table;
89082 int err;
89083diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
89084index 0a17ed9..2526cc3 100644
89085--- a/net/ipv6/tcp_ipv6.c
89086+++ b/net/ipv6/tcp_ipv6.c
89087@@ -103,6 +103,10 @@ static void inet6_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb)
89088 inet6_sk(sk)->rx_dst_cookie = rt->rt6i_node->fn_sernum;
89089 }
89090
89091+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89092+extern int grsec_enable_blackhole;
89093+#endif
89094+
89095 static void tcp_v6_hash(struct sock *sk)
89096 {
89097 if (sk->sk_state != TCP_CLOSE) {
89098@@ -1398,6 +1402,9 @@ static int tcp_v6_do_rcv(struct sock *sk, struct sk_buff *skb)
89099 return 0;
89100
89101 reset:
89102+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89103+ if (!grsec_enable_blackhole)
89104+#endif
89105 tcp_v6_send_reset(sk, skb);
89106 discard:
89107 if (opt_skb)
89108@@ -1480,12 +1487,20 @@ static int tcp_v6_rcv(struct sk_buff *skb)
89109 TCP_SKB_CB(skb)->sacked = 0;
89110
89111 sk = __inet6_lookup_skb(&tcp_hashinfo, skb, th->source, th->dest);
89112- if (!sk)
89113+ if (!sk) {
89114+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89115+ ret = 1;
89116+#endif
89117 goto no_tcp_socket;
89118+ }
89119
89120 process:
89121- if (sk->sk_state == TCP_TIME_WAIT)
89122+ if (sk->sk_state == TCP_TIME_WAIT) {
89123+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89124+ ret = 2;
89125+#endif
89126 goto do_time_wait;
89127+ }
89128
89129 if (hdr->hop_limit < inet6_sk(sk)->min_hopcount) {
89130 NET_INC_STATS_BH(net, LINUX_MIB_TCPMINTTLDROP);
89131@@ -1536,6 +1551,10 @@ csum_error:
89132 bad_packet:
89133 TCP_INC_STATS_BH(net, TCP_MIB_INERRS);
89134 } else {
89135+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89136+ if (!grsec_enable_blackhole || (ret == 1 &&
89137+ (skb->dev->flags & IFF_LOOPBACK)))
89138+#endif
89139 tcp_v6_send_reset(NULL, skb);
89140 }
89141
89142diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
89143index 42923b1..d09c290 100644
89144--- a/net/ipv6/udp.c
89145+++ b/net/ipv6/udp.c
89146@@ -52,6 +52,10 @@
89147 #include <trace/events/skb.h>
89148 #include "udp_impl.h"
89149
89150+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89151+extern int grsec_enable_blackhole;
89152+#endif
89153+
89154 int ipv6_rcv_saddr_equal(const struct sock *sk, const struct sock *sk2)
89155 {
89156 const struct in6_addr *sk_rcv_saddr6 = &inet6_sk(sk)->rcv_saddr;
89157@@ -419,7 +423,7 @@ try_again:
89158 if (unlikely(err)) {
89159 trace_kfree_skb(skb, udpv6_recvmsg);
89160 if (!peeked) {
89161- atomic_inc(&sk->sk_drops);
89162+ atomic_inc_unchecked(&sk->sk_drops);
89163 if (is_udp4)
89164 UDP_INC_STATS_USER(sock_net(sk),
89165 UDP_MIB_INERRORS,
89166@@ -665,7 +669,7 @@ csum_error:
89167 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_CSUMERRORS, is_udplite);
89168 drop:
89169 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS, is_udplite);
89170- atomic_inc(&sk->sk_drops);
89171+ atomic_inc_unchecked(&sk->sk_drops);
89172 kfree_skb(skb);
89173 return -1;
89174 }
89175@@ -723,7 +727,7 @@ static void flush_stack(struct sock **stack, unsigned int count,
89176 if (likely(skb1 == NULL))
89177 skb1 = (i == final) ? skb : skb_clone(skb, GFP_ATOMIC);
89178 if (!skb1) {
89179- atomic_inc(&sk->sk_drops);
89180+ atomic_inc_unchecked(&sk->sk_drops);
89181 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_RCVBUFERRORS,
89182 IS_UDPLITE(sk));
89183 UDP6_INC_STATS_BH(sock_net(sk), UDP_MIB_INERRORS,
89184@@ -860,6 +864,9 @@ int __udp6_lib_rcv(struct sk_buff *skb, struct udp_table *udptable,
89185 goto csum_error;
89186
89187 UDP6_INC_STATS_BH(net, UDP_MIB_NOPORTS, proto == IPPROTO_UDPLITE);
89188+#ifdef CONFIG_GRKERNSEC_BLACKHOLE
89189+ if (!grsec_enable_blackhole || (skb->dev->flags & IFF_LOOPBACK))
89190+#endif
89191 icmpv6_send(skb, ICMPV6_DEST_UNREACH, ICMPV6_PORT_UNREACH, 0);
89192
89193 kfree_skb(skb);
89194@@ -955,11 +962,16 @@ static int udp_v6_push_pending_frames(struct sock *sk)
89195 struct udphdr *uh;
89196 struct udp_sock *up = udp_sk(sk);
89197 struct inet_sock *inet = inet_sk(sk);
89198- struct flowi6 *fl6 = &inet->cork.fl.u.ip6;
89199+ struct flowi6 *fl6;
89200 int err = 0;
89201 int is_udplite = IS_UDPLITE(sk);
89202 __wsum csum = 0;
89203
89204+ if (up->pending == AF_INET)
89205+ return udp_push_pending_frames(sk);
89206+
89207+ fl6 = &inet->cork.fl.u.ip6;
89208+
89209 /* Grab the skbuff where UDP header space exists. */
89210 if ((skb = skb_peek(&sk->sk_write_queue)) == NULL)
89211 goto out;
89212@@ -1387,7 +1399,7 @@ static void udp6_sock_seq_show(struct seq_file *seq, struct sock *sp, int bucket
89213 0,
89214 sock_i_ino(sp),
89215 atomic_read(&sp->sk_refcnt), sp,
89216- atomic_read(&sp->sk_drops));
89217+ atomic_read_unchecked(&sp->sk_drops));
89218 }
89219
89220 int udp6_seq_show(struct seq_file *seq, void *v)
89221diff --git a/net/ipv6/xfrm6_policy.c b/net/ipv6/xfrm6_policy.c
89222index 23ed03d..465a71d 100644
89223--- a/net/ipv6/xfrm6_policy.c
89224+++ b/net/ipv6/xfrm6_policy.c
89225@@ -324,19 +324,19 @@ static struct ctl_table xfrm6_policy_table[] = {
89226
89227 static int __net_init xfrm6_net_init(struct net *net)
89228 {
89229- struct ctl_table *table;
89230+ ctl_table_no_const *table = NULL;
89231 struct ctl_table_header *hdr;
89232
89233- table = xfrm6_policy_table;
89234 if (!net_eq(net, &init_net)) {
89235- table = kmemdup(table, sizeof(xfrm6_policy_table), GFP_KERNEL);
89236+ table = kmemdup(xfrm6_policy_table, sizeof(xfrm6_policy_table), GFP_KERNEL);
89237 if (!table)
89238 goto err_alloc;
89239
89240 table[0].data = &net->xfrm.xfrm6_dst_ops.gc_thresh;
89241- }
89242+ hdr = register_net_sysctl(net, "net/ipv6", table);
89243+ } else
89244+ hdr = register_net_sysctl(net, "net/ipv6", xfrm6_policy_table);
89245
89246- hdr = register_net_sysctl(net, "net/ipv6", table);
89247 if (!hdr)
89248 goto err_reg;
89249
89250@@ -344,8 +344,7 @@ static int __net_init xfrm6_net_init(struct net *net)
89251 return 0;
89252
89253 err_reg:
89254- if (!net_eq(net, &init_net))
89255- kfree(table);
89256+ kfree(table);
89257 err_alloc:
89258 return -ENOMEM;
89259 }
89260diff --git a/net/irda/ircomm/ircomm_tty.c b/net/irda/ircomm/ircomm_tty.c
89261index 41ac7938..75e3bb1 100644
89262--- a/net/irda/ircomm/ircomm_tty.c
89263+++ b/net/irda/ircomm/ircomm_tty.c
89264@@ -319,11 +319,11 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
89265 add_wait_queue(&port->open_wait, &wait);
89266
89267 IRDA_DEBUG(2, "%s(%d):block_til_ready before block on %s open_count=%d\n",
89268- __FILE__, __LINE__, tty->driver->name, port->count);
89269+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
89270
89271 spin_lock_irqsave(&port->lock, flags);
89272 if (!tty_hung_up_p(filp))
89273- port->count--;
89274+ atomic_dec(&port->count);
89275 port->blocked_open++;
89276 spin_unlock_irqrestore(&port->lock, flags);
89277
89278@@ -358,7 +358,7 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
89279 }
89280
89281 IRDA_DEBUG(1, "%s(%d):block_til_ready blocking on %s open_count=%d\n",
89282- __FILE__, __LINE__, tty->driver->name, port->count);
89283+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
89284
89285 schedule();
89286 }
89287@@ -368,12 +368,12 @@ static int ircomm_tty_block_til_ready(struct ircomm_tty_cb *self,
89288
89289 spin_lock_irqsave(&port->lock, flags);
89290 if (!tty_hung_up_p(filp))
89291- port->count++;
89292+ atomic_inc(&port->count);
89293 port->blocked_open--;
89294 spin_unlock_irqrestore(&port->lock, flags);
89295
89296 IRDA_DEBUG(1, "%s(%d):block_til_ready after blocking on %s open_count=%d\n",
89297- __FILE__, __LINE__, tty->driver->name, port->count);
89298+ __FILE__, __LINE__, tty->driver->name, atomic_read(&port->count));
89299
89300 if (!retval)
89301 port->flags |= ASYNC_NORMAL_ACTIVE;
89302@@ -447,12 +447,12 @@ static int ircomm_tty_open(struct tty_struct *tty, struct file *filp)
89303
89304 /* ++ is not atomic, so this should be protected - Jean II */
89305 spin_lock_irqsave(&self->port.lock, flags);
89306- self->port.count++;
89307+ atomic_inc(&self->port.count);
89308 spin_unlock_irqrestore(&self->port.lock, flags);
89309 tty_port_tty_set(&self->port, tty);
89310
89311 IRDA_DEBUG(1, "%s(), %s%d, count = %d\n", __func__ , tty->driver->name,
89312- self->line, self->port.count);
89313+ self->line, atomic_read(&self->port.count));
89314
89315 /* Not really used by us, but lets do it anyway */
89316 self->port.low_latency = (self->port.flags & ASYNC_LOW_LATENCY) ? 1 : 0;
89317@@ -989,7 +989,7 @@ static void ircomm_tty_hangup(struct tty_struct *tty)
89318 tty_kref_put(port->tty);
89319 }
89320 port->tty = NULL;
89321- port->count = 0;
89322+ atomic_set(&port->count, 0);
89323 spin_unlock_irqrestore(&port->lock, flags);
89324
89325 wake_up_interruptible(&port->open_wait);
89326@@ -1346,7 +1346,7 @@ static void ircomm_tty_line_info(struct ircomm_tty_cb *self, struct seq_file *m)
89327 seq_putc(m, '\n');
89328
89329 seq_printf(m, "Role: %s\n", self->client ? "client" : "server");
89330- seq_printf(m, "Open count: %d\n", self->port.count);
89331+ seq_printf(m, "Open count: %d\n", atomic_read(&self->port.count));
89332 seq_printf(m, "Max data size: %d\n", self->max_data_size);
89333 seq_printf(m, "Max header size: %d\n", self->max_header_size);
89334
89335diff --git a/net/iucv/af_iucv.c b/net/iucv/af_iucv.c
89336index ae69165..c8b82d8 100644
89337--- a/net/iucv/af_iucv.c
89338+++ b/net/iucv/af_iucv.c
89339@@ -773,10 +773,10 @@ static int iucv_sock_autobind(struct sock *sk)
89340
89341 write_lock_bh(&iucv_sk_list.lock);
89342
89343- sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name));
89344+ sprintf(name, "%08x", atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
89345 while (__iucv_get_sock_by_name(name)) {
89346 sprintf(name, "%08x",
89347- atomic_inc_return(&iucv_sk_list.autobind_name));
89348+ atomic_inc_return_unchecked(&iucv_sk_list.autobind_name));
89349 }
89350
89351 write_unlock_bh(&iucv_sk_list.lock);
89352diff --git a/net/iucv/iucv.c b/net/iucv/iucv.c
89353index 4fe76ff..426a904 100644
89354--- a/net/iucv/iucv.c
89355+++ b/net/iucv/iucv.c
89356@@ -690,7 +690,7 @@ static int __cpuinit iucv_cpu_notify(struct notifier_block *self,
89357 return NOTIFY_OK;
89358 }
89359
89360-static struct notifier_block __refdata iucv_cpu_notifier = {
89361+static struct notifier_block iucv_cpu_notifier = {
89362 .notifier_call = iucv_cpu_notify,
89363 };
89364
89365diff --git a/net/key/af_key.c b/net/key/af_key.c
89366index 9da8620..97070ad 100644
89367--- a/net/key/af_key.c
89368+++ b/net/key/af_key.c
89369@@ -3047,10 +3047,10 @@ static int pfkey_send_policy_notify(struct xfrm_policy *xp, int dir, const struc
89370 static u32 get_acqseq(void)
89371 {
89372 u32 res;
89373- static atomic_t acqseq;
89374+ static atomic_unchecked_t acqseq;
89375
89376 do {
89377- res = atomic_inc_return(&acqseq);
89378+ res = atomic_inc_return_unchecked(&acqseq);
89379 } while (!res);
89380 return res;
89381 }
89382diff --git a/net/l2tp/l2tp_ppp.c b/net/l2tp/l2tp_ppp.c
89383index 8dec687..5ebee2d 100644
89384--- a/net/l2tp/l2tp_ppp.c
89385+++ b/net/l2tp/l2tp_ppp.c
89386@@ -1793,7 +1793,8 @@ static const struct proto_ops pppol2tp_ops = {
89387
89388 static const struct pppox_proto pppol2tp_proto = {
89389 .create = pppol2tp_create,
89390- .ioctl = pppol2tp_ioctl
89391+ .ioctl = pppol2tp_ioctl,
89392+ .owner = THIS_MODULE,
89393 };
89394
89395 #ifdef CONFIG_L2TP_V3
89396diff --git a/net/mac80211/cfg.c b/net/mac80211/cfg.c
89397index 4fdb306e..920086a 100644
89398--- a/net/mac80211/cfg.c
89399+++ b/net/mac80211/cfg.c
89400@@ -804,7 +804,7 @@ static int ieee80211_set_monitor_channel(struct wiphy *wiphy,
89401 ret = ieee80211_vif_use_channel(sdata, chandef,
89402 IEEE80211_CHANCTX_EXCLUSIVE);
89403 }
89404- } else if (local->open_count == local->monitors) {
89405+ } else if (local_read(&local->open_count) == local->monitors) {
89406 local->_oper_chandef = *chandef;
89407 ieee80211_hw_config(local, 0);
89408 }
89409@@ -2920,7 +2920,7 @@ static void ieee80211_mgmt_frame_register(struct wiphy *wiphy,
89410 else
89411 local->probe_req_reg--;
89412
89413- if (!local->open_count)
89414+ if (!local_read(&local->open_count))
89415 break;
89416
89417 ieee80211_queue_work(&local->hw, &local->reconfig_filter);
89418@@ -3383,8 +3383,8 @@ static int ieee80211_cfg_get_channel(struct wiphy *wiphy,
89419 if (chanctx_conf) {
89420 *chandef = chanctx_conf->def;
89421 ret = 0;
89422- } else if (local->open_count > 0 &&
89423- local->open_count == local->monitors &&
89424+ } else if (local_read(&local->open_count) > 0 &&
89425+ local_read(&local->open_count) == local->monitors &&
89426 sdata->vif.type == NL80211_IFTYPE_MONITOR) {
89427 if (local->use_chanctx)
89428 *chandef = local->monitor_chandef;
89429diff --git a/net/mac80211/ieee80211_i.h b/net/mac80211/ieee80211_i.h
89430index 9ca8e32..48e4a9b 100644
89431--- a/net/mac80211/ieee80211_i.h
89432+++ b/net/mac80211/ieee80211_i.h
89433@@ -28,6 +28,7 @@
89434 #include <net/ieee80211_radiotap.h>
89435 #include <net/cfg80211.h>
89436 #include <net/mac80211.h>
89437+#include <asm/local.h>
89438 #include "key.h"
89439 #include "sta_info.h"
89440 #include "debug.h"
89441@@ -891,7 +892,7 @@ struct ieee80211_local {
89442 /* also used to protect ampdu_ac_queue and amdpu_ac_stop_refcnt */
89443 spinlock_t queue_stop_reason_lock;
89444
89445- int open_count;
89446+ local_t open_count;
89447 int monitors, cooked_mntrs;
89448 /* number of interfaces with corresponding FIF_ flags */
89449 int fif_fcsfail, fif_plcpfail, fif_control, fif_other_bss, fif_pspoll,
89450diff --git a/net/mac80211/iface.c b/net/mac80211/iface.c
89451index 514e90f..56f22bf 100644
89452--- a/net/mac80211/iface.c
89453+++ b/net/mac80211/iface.c
89454@@ -502,7 +502,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89455 break;
89456 }
89457
89458- if (local->open_count == 0) {
89459+ if (local_read(&local->open_count) == 0) {
89460 res = drv_start(local);
89461 if (res)
89462 goto err_del_bss;
89463@@ -545,7 +545,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89464 break;
89465 }
89466
89467- if (local->monitors == 0 && local->open_count == 0) {
89468+ if (local->monitors == 0 && local_read(&local->open_count) == 0) {
89469 res = ieee80211_add_virtual_monitor(local);
89470 if (res)
89471 goto err_stop;
89472@@ -653,7 +653,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89473 atomic_inc(&local->iff_promiscs);
89474
89475 if (coming_up)
89476- local->open_count++;
89477+ local_inc(&local->open_count);
89478
89479 if (hw_reconf_flags)
89480 ieee80211_hw_config(local, hw_reconf_flags);
89481@@ -691,7 +691,7 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
89482 err_del_interface:
89483 drv_remove_interface(local, sdata);
89484 err_stop:
89485- if (!local->open_count)
89486+ if (!local_read(&local->open_count))
89487 drv_stop(local);
89488 err_del_bss:
89489 sdata->bss = NULL;
89490@@ -828,7 +828,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89491 }
89492
89493 if (going_down)
89494- local->open_count--;
89495+ local_dec(&local->open_count);
89496
89497 switch (sdata->vif.type) {
89498 case NL80211_IFTYPE_AP_VLAN:
89499@@ -895,7 +895,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89500 }
89501 spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags);
89502
89503- if (local->open_count == 0)
89504+ if (local_read(&local->open_count) == 0)
89505 ieee80211_clear_tx_pending(local);
89506
89507 /*
89508@@ -931,7 +931,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89509
89510 ieee80211_recalc_ps(local, -1);
89511
89512- if (local->open_count == 0) {
89513+ if (local_read(&local->open_count) == 0) {
89514 ieee80211_stop_device(local);
89515
89516 /* no reconfiguring after stop! */
89517@@ -942,7 +942,7 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
89518 ieee80211_configure_filter(local);
89519 ieee80211_hw_config(local, hw_reconf_flags);
89520
89521- if (local->monitors == local->open_count)
89522+ if (local->monitors == local_read(&local->open_count))
89523 ieee80211_add_virtual_monitor(local);
89524 }
89525
89526diff --git a/net/mac80211/main.c b/net/mac80211/main.c
89527index 8a7bfc4..4407cd0 100644
89528--- a/net/mac80211/main.c
89529+++ b/net/mac80211/main.c
89530@@ -181,7 +181,7 @@ int ieee80211_hw_config(struct ieee80211_local *local, u32 changed)
89531 changed &= ~(IEEE80211_CONF_CHANGE_CHANNEL |
89532 IEEE80211_CONF_CHANGE_POWER);
89533
89534- if (changed && local->open_count) {
89535+ if (changed && local_read(&local->open_count)) {
89536 ret = drv_config(local, changed);
89537 /*
89538 * Goal:
89539diff --git a/net/mac80211/pm.c b/net/mac80211/pm.c
89540index 7fc5d0d..07ea536 100644
89541--- a/net/mac80211/pm.c
89542+++ b/net/mac80211/pm.c
89543@@ -12,7 +12,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89544 struct ieee80211_sub_if_data *sdata;
89545 struct sta_info *sta;
89546
89547- if (!local->open_count)
89548+ if (!local_read(&local->open_count))
89549 goto suspend;
89550
89551 ieee80211_scan_cancel(local);
89552@@ -59,7 +59,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89553 cancel_work_sync(&local->dynamic_ps_enable_work);
89554 del_timer_sync(&local->dynamic_ps_timer);
89555
89556- local->wowlan = wowlan && local->open_count;
89557+ local->wowlan = wowlan && local_read(&local->open_count);
89558 if (local->wowlan) {
89559 int err = drv_suspend(local, wowlan);
89560 if (err < 0) {
89561@@ -113,7 +113,7 @@ int __ieee80211_suspend(struct ieee80211_hw *hw, struct cfg80211_wowlan *wowlan)
89562 WARN_ON(!list_empty(&local->chanctx_list));
89563
89564 /* stop hardware - this must stop RX */
89565- if (local->open_count)
89566+ if (local_read(&local->open_count))
89567 ieee80211_stop_device(local);
89568
89569 suspend:
89570diff --git a/net/mac80211/rate.c b/net/mac80211/rate.c
89571index a02bef3..f2f38dd 100644
89572--- a/net/mac80211/rate.c
89573+++ b/net/mac80211/rate.c
89574@@ -712,7 +712,7 @@ int ieee80211_init_rate_ctrl_alg(struct ieee80211_local *local,
89575
89576 ASSERT_RTNL();
89577
89578- if (local->open_count)
89579+ if (local_read(&local->open_count))
89580 return -EBUSY;
89581
89582 if (local->hw.flags & IEEE80211_HW_HAS_RATE_CONTROL) {
89583diff --git a/net/mac80211/rc80211_pid_debugfs.c b/net/mac80211/rc80211_pid_debugfs.c
89584index c97a065..ff61928 100644
89585--- a/net/mac80211/rc80211_pid_debugfs.c
89586+++ b/net/mac80211/rc80211_pid_debugfs.c
89587@@ -193,7 +193,7 @@ static ssize_t rate_control_pid_events_read(struct file *file, char __user *buf,
89588
89589 spin_unlock_irqrestore(&events->lock, status);
89590
89591- if (copy_to_user(buf, pb, p))
89592+ if (p > sizeof(pb) || copy_to_user(buf, pb, p))
89593 return -EFAULT;
89594
89595 return p;
89596diff --git a/net/mac80211/util.c b/net/mac80211/util.c
89597index 72e6292..e6319eb 100644
89598--- a/net/mac80211/util.c
89599+++ b/net/mac80211/util.c
89600@@ -1472,7 +1472,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
89601 }
89602 #endif
89603 /* everything else happens only if HW was up & running */
89604- if (!local->open_count)
89605+ if (!local_read(&local->open_count))
89606 goto wake_up;
89607
89608 /*
89609@@ -1696,7 +1696,7 @@ int ieee80211_reconfig(struct ieee80211_local *local)
89610 local->in_reconfig = false;
89611 barrier();
89612
89613- if (local->monitors == local->open_count && local->monitors > 0)
89614+ if (local->monitors == local_read(&local->open_count) && local->monitors > 0)
89615 ieee80211_add_virtual_monitor(local);
89616
89617 /*
89618diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
89619index 56d22ca..87c778f 100644
89620--- a/net/netfilter/Kconfig
89621+++ b/net/netfilter/Kconfig
89622@@ -958,6 +958,16 @@ config NETFILTER_XT_MATCH_ESP
89623
89624 To compile it as a module, choose M here. If unsure, say N.
89625
89626+config NETFILTER_XT_MATCH_GRADM
89627+ tristate '"gradm" match support'
89628+ depends on NETFILTER_XTABLES && NETFILTER_ADVANCED
89629+ depends on GRKERNSEC && !GRKERNSEC_NO_RBAC
89630+ ---help---
89631+ The gradm match allows to match on grsecurity RBAC being enabled.
89632+ It is useful when iptables rules are applied early on bootup to
89633+ prevent connections to the machine (except from a trusted host)
89634+ while the RBAC system is disabled.
89635+
89636 config NETFILTER_XT_MATCH_HASHLIMIT
89637 tristate '"hashlimit" match support'
89638 depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
89639diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
89640index a1abf87..dbcb7ee 100644
89641--- a/net/netfilter/Makefile
89642+++ b/net/netfilter/Makefile
89643@@ -112,6 +112,7 @@ obj-$(CONFIG_NETFILTER_XT_MATCH_DEVGROUP) += xt_devgroup.o
89644 obj-$(CONFIG_NETFILTER_XT_MATCH_DSCP) += xt_dscp.o
89645 obj-$(CONFIG_NETFILTER_XT_MATCH_ECN) += xt_ecn.o
89646 obj-$(CONFIG_NETFILTER_XT_MATCH_ESP) += xt_esp.o
89647+obj-$(CONFIG_NETFILTER_XT_MATCH_GRADM) += xt_gradm.o
89648 obj-$(CONFIG_NETFILTER_XT_MATCH_HASHLIMIT) += xt_hashlimit.o
89649 obj-$(CONFIG_NETFILTER_XT_MATCH_HELPER) += xt_helper.o
89650 obj-$(CONFIG_NETFILTER_XT_MATCH_HL) += xt_hl.o
89651diff --git a/net/netfilter/ipset/ip_set_core.c b/net/netfilter/ipset/ip_set_core.c
89652index f771390..145b765 100644
89653--- a/net/netfilter/ipset/ip_set_core.c
89654+++ b/net/netfilter/ipset/ip_set_core.c
89655@@ -1820,7 +1820,7 @@ done:
89656 return ret;
89657 }
89658
89659-static struct nf_sockopt_ops so_set __read_mostly = {
89660+static struct nf_sockopt_ops so_set = {
89661 .pf = PF_INET,
89662 .get_optmin = SO_IP_SET,
89663 .get_optmax = SO_IP_SET + 1,
89664diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c
89665index a083bda..da661c3 100644
89666--- a/net/netfilter/ipvs/ip_vs_conn.c
89667+++ b/net/netfilter/ipvs/ip_vs_conn.c
89668@@ -556,7 +556,7 @@ ip_vs_bind_dest(struct ip_vs_conn *cp, struct ip_vs_dest *dest)
89669 /* Increase the refcnt counter of the dest */
89670 ip_vs_dest_hold(dest);
89671
89672- conn_flags = atomic_read(&dest->conn_flags);
89673+ conn_flags = atomic_read_unchecked(&dest->conn_flags);
89674 if (cp->protocol != IPPROTO_UDP)
89675 conn_flags &= ~IP_VS_CONN_F_ONE_PACKET;
89676 flags = cp->flags;
89677@@ -900,7 +900,7 @@ ip_vs_conn_new(const struct ip_vs_conn_param *p,
89678
89679 cp->control = NULL;
89680 atomic_set(&cp->n_control, 0);
89681- atomic_set(&cp->in_pkts, 0);
89682+ atomic_set_unchecked(&cp->in_pkts, 0);
89683
89684 cp->packet_xmit = NULL;
89685 cp->app = NULL;
89686@@ -1190,7 +1190,7 @@ static inline int todrop_entry(struct ip_vs_conn *cp)
89687
89688 /* Don't drop the entry if its number of incoming packets is not
89689 located in [0, 8] */
89690- i = atomic_read(&cp->in_pkts);
89691+ i = atomic_read_unchecked(&cp->in_pkts);
89692 if (i > 8 || i < 0) return 0;
89693
89694 if (!todrop_rate[i]) return 0;
89695diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c
89696index 23b8eb5..48a8959 100644
89697--- a/net/netfilter/ipvs/ip_vs_core.c
89698+++ b/net/netfilter/ipvs/ip_vs_core.c
89699@@ -559,7 +559,7 @@ int ip_vs_leave(struct ip_vs_service *svc, struct sk_buff *skb,
89700 ret = cp->packet_xmit(skb, cp, pd->pp, iph);
89701 /* do not touch skb anymore */
89702
89703- atomic_inc(&cp->in_pkts);
89704+ atomic_inc_unchecked(&cp->in_pkts);
89705 ip_vs_conn_put(cp);
89706 return ret;
89707 }
89708@@ -1711,7 +1711,7 @@ ip_vs_in(unsigned int hooknum, struct sk_buff *skb, int af)
89709 if (cp->flags & IP_VS_CONN_F_ONE_PACKET)
89710 pkts = sysctl_sync_threshold(ipvs);
89711 else
89712- pkts = atomic_add_return(1, &cp->in_pkts);
89713+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89714
89715 if (ipvs->sync_state & IP_VS_STATE_MASTER)
89716 ip_vs_sync_conn(net, cp, pkts);
89717diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c
89718index 9e6c2a0..28552e2 100644
89719--- a/net/netfilter/ipvs/ip_vs_ctl.c
89720+++ b/net/netfilter/ipvs/ip_vs_ctl.c
89721@@ -789,7 +789,7 @@ __ip_vs_update_dest(struct ip_vs_service *svc, struct ip_vs_dest *dest,
89722 */
89723 ip_vs_rs_hash(ipvs, dest);
89724 }
89725- atomic_set(&dest->conn_flags, conn_flags);
89726+ atomic_set_unchecked(&dest->conn_flags, conn_flags);
89727
89728 /* bind the service */
89729 if (!dest->svc) {
89730@@ -1657,7 +1657,7 @@ proc_do_sync_ports(ctl_table *table, int write,
89731 * align with netns init in ip_vs_control_net_init()
89732 */
89733
89734-static struct ctl_table vs_vars[] = {
89735+static ctl_table_no_const vs_vars[] __read_only = {
89736 {
89737 .procname = "amemthresh",
89738 .maxlen = sizeof(int),
89739@@ -2060,7 +2060,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89740 " %-7s %-6d %-10d %-10d\n",
89741 &dest->addr.in6,
89742 ntohs(dest->port),
89743- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89744+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89745 atomic_read(&dest->weight),
89746 atomic_read(&dest->activeconns),
89747 atomic_read(&dest->inactconns));
89748@@ -2071,7 +2071,7 @@ static int ip_vs_info_seq_show(struct seq_file *seq, void *v)
89749 "%-7s %-6d %-10d %-10d\n",
89750 ntohl(dest->addr.ip),
89751 ntohs(dest->port),
89752- ip_vs_fwd_name(atomic_read(&dest->conn_flags)),
89753+ ip_vs_fwd_name(atomic_read_unchecked(&dest->conn_flags)),
89754 atomic_read(&dest->weight),
89755 atomic_read(&dest->activeconns),
89756 atomic_read(&dest->inactconns));
89757@@ -2549,7 +2549,7 @@ __ip_vs_get_dest_entries(struct net *net, const struct ip_vs_get_dests *get,
89758
89759 entry.addr = dest->addr.ip;
89760 entry.port = dest->port;
89761- entry.conn_flags = atomic_read(&dest->conn_flags);
89762+ entry.conn_flags = atomic_read_unchecked(&dest->conn_flags);
89763 entry.weight = atomic_read(&dest->weight);
89764 entry.u_threshold = dest->u_threshold;
89765 entry.l_threshold = dest->l_threshold;
89766@@ -3092,7 +3092,7 @@ static int ip_vs_genl_fill_dest(struct sk_buff *skb, struct ip_vs_dest *dest)
89767 if (nla_put(skb, IPVS_DEST_ATTR_ADDR, sizeof(dest->addr), &dest->addr) ||
89768 nla_put_be16(skb, IPVS_DEST_ATTR_PORT, dest->port) ||
89769 nla_put_u32(skb, IPVS_DEST_ATTR_FWD_METHOD,
89770- (atomic_read(&dest->conn_flags) &
89771+ (atomic_read_unchecked(&dest->conn_flags) &
89772 IP_VS_CONN_F_FWD_MASK)) ||
89773 nla_put_u32(skb, IPVS_DEST_ATTR_WEIGHT,
89774 atomic_read(&dest->weight)) ||
89775@@ -3682,7 +3682,7 @@ static int __net_init ip_vs_control_net_init_sysctl(struct net *net)
89776 {
89777 int idx;
89778 struct netns_ipvs *ipvs = net_ipvs(net);
89779- struct ctl_table *tbl;
89780+ ctl_table_no_const *tbl;
89781
89782 atomic_set(&ipvs->dropentry, 0);
89783 spin_lock_init(&ipvs->dropentry_lock);
89784diff --git a/net/netfilter/ipvs/ip_vs_lblc.c b/net/netfilter/ipvs/ip_vs_lblc.c
89785index 5ea26bd..c9bc65f 100644
89786--- a/net/netfilter/ipvs/ip_vs_lblc.c
89787+++ b/net/netfilter/ipvs/ip_vs_lblc.c
89788@@ -118,7 +118,7 @@ struct ip_vs_lblc_table {
89789 * IPVS LBLC sysctl table
89790 */
89791 #ifdef CONFIG_SYSCTL
89792-static ctl_table vs_vars_table[] = {
89793+static ctl_table_no_const vs_vars_table[] __read_only = {
89794 {
89795 .procname = "lblc_expiration",
89796 .data = NULL,
89797diff --git a/net/netfilter/ipvs/ip_vs_lblcr.c b/net/netfilter/ipvs/ip_vs_lblcr.c
89798index 50123c2..067c773 100644
89799--- a/net/netfilter/ipvs/ip_vs_lblcr.c
89800+++ b/net/netfilter/ipvs/ip_vs_lblcr.c
89801@@ -299,7 +299,7 @@ struct ip_vs_lblcr_table {
89802 * IPVS LBLCR sysctl table
89803 */
89804
89805-static ctl_table vs_vars_table[] = {
89806+static ctl_table_no_const vs_vars_table[] __read_only = {
89807 {
89808 .procname = "lblcr_expiration",
89809 .data = NULL,
89810diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c
89811index f6046d9..4f10cfd 100644
89812--- a/net/netfilter/ipvs/ip_vs_sync.c
89813+++ b/net/netfilter/ipvs/ip_vs_sync.c
89814@@ -596,7 +596,7 @@ static void ip_vs_sync_conn_v0(struct net *net, struct ip_vs_conn *cp,
89815 cp = cp->control;
89816 if (cp) {
89817 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89818- pkts = atomic_add_return(1, &cp->in_pkts);
89819+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89820 else
89821 pkts = sysctl_sync_threshold(ipvs);
89822 ip_vs_sync_conn(net, cp->control, pkts);
89823@@ -758,7 +758,7 @@ control:
89824 if (!cp)
89825 return;
89826 if (cp->flags & IP_VS_CONN_F_TEMPLATE)
89827- pkts = atomic_add_return(1, &cp->in_pkts);
89828+ pkts = atomic_add_return_unchecked(1, &cp->in_pkts);
89829 else
89830 pkts = sysctl_sync_threshold(ipvs);
89831 goto sloop;
89832@@ -882,7 +882,7 @@ static void ip_vs_proc_conn(struct net *net, struct ip_vs_conn_param *param,
89833
89834 if (opt)
89835 memcpy(&cp->in_seq, opt, sizeof(*opt));
89836- atomic_set(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89837+ atomic_set_unchecked(&cp->in_pkts, sysctl_sync_threshold(ipvs));
89838 cp->state = state;
89839 cp->old_state = cp->state;
89840 /*
89841diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c
89842index b75ff64..0c51bbe 100644
89843--- a/net/netfilter/ipvs/ip_vs_xmit.c
89844+++ b/net/netfilter/ipvs/ip_vs_xmit.c
89845@@ -1102,7 +1102,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp,
89846 else
89847 rc = NF_ACCEPT;
89848 /* do not touch skb anymore */
89849- atomic_inc(&cp->in_pkts);
89850+ atomic_inc_unchecked(&cp->in_pkts);
89851 goto out;
89852 }
89853
89854@@ -1194,7 +1194,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp,
89855 else
89856 rc = NF_ACCEPT;
89857 /* do not touch skb anymore */
89858- atomic_inc(&cp->in_pkts);
89859+ atomic_inc_unchecked(&cp->in_pkts);
89860 goto out;
89861 }
89862
89863diff --git a/net/netfilter/nf_conntrack_acct.c b/net/netfilter/nf_conntrack_acct.c
89864index 2d3030a..7ba1c0a 100644
89865--- a/net/netfilter/nf_conntrack_acct.c
89866+++ b/net/netfilter/nf_conntrack_acct.c
89867@@ -60,7 +60,7 @@ static struct nf_ct_ext_type acct_extend __read_mostly = {
89868 #ifdef CONFIG_SYSCTL
89869 static int nf_conntrack_acct_init_sysctl(struct net *net)
89870 {
89871- struct ctl_table *table;
89872+ ctl_table_no_const *table;
89873
89874 table = kmemdup(acct_sysctl_table, sizeof(acct_sysctl_table),
89875 GFP_KERNEL);
89876diff --git a/net/netfilter/nf_conntrack_core.c b/net/netfilter/nf_conntrack_core.c
89877index 0283bae..5febcb0 100644
89878--- a/net/netfilter/nf_conntrack_core.c
89879+++ b/net/netfilter/nf_conntrack_core.c
89880@@ -1614,6 +1614,10 @@ void nf_conntrack_init_end(void)
89881 #define DYING_NULLS_VAL ((1<<30)+1)
89882 #define TEMPLATE_NULLS_VAL ((1<<30)+2)
89883
89884+#ifdef CONFIG_GRKERNSEC_HIDESYM
89885+static atomic_unchecked_t conntrack_cache_id = ATOMIC_INIT(0);
89886+#endif
89887+
89888 int nf_conntrack_init_net(struct net *net)
89889 {
89890 int ret;
89891@@ -1628,7 +1632,11 @@ int nf_conntrack_init_net(struct net *net)
89892 goto err_stat;
89893 }
89894
89895+#ifdef CONFIG_GRKERNSEC_HIDESYM
89896+ net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%08lx", atomic_inc_return_unchecked(&conntrack_cache_id));
89897+#else
89898 net->ct.slabname = kasprintf(GFP_KERNEL, "nf_conntrack_%p", net);
89899+#endif
89900 if (!net->ct.slabname) {
89901 ret = -ENOMEM;
89902 goto err_slabname;
89903diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
89904index 1df1761..ce8b88a 100644
89905--- a/net/netfilter/nf_conntrack_ecache.c
89906+++ b/net/netfilter/nf_conntrack_ecache.c
89907@@ -188,7 +188,7 @@ static struct nf_ct_ext_type event_extend __read_mostly = {
89908 #ifdef CONFIG_SYSCTL
89909 static int nf_conntrack_event_init_sysctl(struct net *net)
89910 {
89911- struct ctl_table *table;
89912+ ctl_table_no_const *table;
89913
89914 table = kmemdup(event_sysctl_table, sizeof(event_sysctl_table),
89915 GFP_KERNEL);
89916diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
89917index 974a2a4..52cc6ff 100644
89918--- a/net/netfilter/nf_conntrack_helper.c
89919+++ b/net/netfilter/nf_conntrack_helper.c
89920@@ -57,7 +57,7 @@ static struct ctl_table helper_sysctl_table[] = {
89921
89922 static int nf_conntrack_helper_init_sysctl(struct net *net)
89923 {
89924- struct ctl_table *table;
89925+ ctl_table_no_const *table;
89926
89927 table = kmemdup(helper_sysctl_table, sizeof(helper_sysctl_table),
89928 GFP_KERNEL);
89929diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c
89930index 0ab9636..cea3c6a 100644
89931--- a/net/netfilter/nf_conntrack_proto.c
89932+++ b/net/netfilter/nf_conntrack_proto.c
89933@@ -52,7 +52,7 @@ nf_ct_register_sysctl(struct net *net,
89934
89935 static void
89936 nf_ct_unregister_sysctl(struct ctl_table_header **header,
89937- struct ctl_table **table,
89938+ ctl_table_no_const **table,
89939 unsigned int users)
89940 {
89941 if (users > 0)
89942diff --git a/net/netfilter/nf_conntrack_proto_dccp.c b/net/netfilter/nf_conntrack_proto_dccp.c
89943index a99b6c3..3841268 100644
89944--- a/net/netfilter/nf_conntrack_proto_dccp.c
89945+++ b/net/netfilter/nf_conntrack_proto_dccp.c
89946@@ -457,7 +457,7 @@ static bool dccp_new(struct nf_conn *ct, const struct sk_buff *skb,
89947 out_invalid:
89948 if (LOG_INVALID(net, IPPROTO_DCCP))
89949 nf_log_packet(net, nf_ct_l3num(ct), 0, skb, NULL, NULL,
89950- NULL, msg);
89951+ NULL, "%s", msg);
89952 return false;
89953 }
89954
89955@@ -614,7 +614,7 @@ static int dccp_error(struct net *net, struct nf_conn *tmpl,
89956
89957 out_invalid:
89958 if (LOG_INVALID(net, IPPROTO_DCCP))
89959- nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, msg);
89960+ nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL, "%s", msg);
89961 return -NF_ACCEPT;
89962 }
89963
89964diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
89965index bd700b4..4a3dc61 100644
89966--- a/net/netfilter/nf_conntrack_standalone.c
89967+++ b/net/netfilter/nf_conntrack_standalone.c
89968@@ -471,7 +471,7 @@ static ctl_table nf_ct_netfilter_table[] = {
89969
89970 static int nf_conntrack_standalone_init_sysctl(struct net *net)
89971 {
89972- struct ctl_table *table;
89973+ ctl_table_no_const *table;
89974
89975 table = kmemdup(nf_ct_sysctl_table, sizeof(nf_ct_sysctl_table),
89976 GFP_KERNEL);
89977diff --git a/net/netfilter/nf_conntrack_timestamp.c b/net/netfilter/nf_conntrack_timestamp.c
89978index 902fb0a..87f7fdb 100644
89979--- a/net/netfilter/nf_conntrack_timestamp.c
89980+++ b/net/netfilter/nf_conntrack_timestamp.c
89981@@ -42,7 +42,7 @@ static struct nf_ct_ext_type tstamp_extend __read_mostly = {
89982 #ifdef CONFIG_SYSCTL
89983 static int nf_conntrack_tstamp_init_sysctl(struct net *net)
89984 {
89985- struct ctl_table *table;
89986+ ctl_table_no_const *table;
89987
89988 table = kmemdup(tstamp_sysctl_table, sizeof(tstamp_sysctl_table),
89989 GFP_KERNEL);
89990diff --git a/net/netfilter/nf_log.c b/net/netfilter/nf_log.c
89991index 3b18dd1..f79e0ca 100644
89992--- a/net/netfilter/nf_log.c
89993+++ b/net/netfilter/nf_log.c
89994@@ -243,7 +243,7 @@ static const struct file_operations nflog_file_ops = {
89995
89996 #ifdef CONFIG_SYSCTL
89997 static char nf_log_sysctl_fnames[NFPROTO_NUMPROTO-NFPROTO_UNSPEC][3];
89998-static struct ctl_table nf_log_sysctl_table[NFPROTO_NUMPROTO+1];
89999+static ctl_table_no_const nf_log_sysctl_table[NFPROTO_NUMPROTO+1] __read_only;
90000
90001 static int nf_log_proc_dostring(ctl_table *table, int write,
90002 void __user *buffer, size_t *lenp, loff_t *ppos)
90003@@ -274,14 +274,16 @@ static int nf_log_proc_dostring(ctl_table *table, int write,
90004 rcu_assign_pointer(net->nf.nf_loggers[tindex], logger);
90005 mutex_unlock(&nf_log_mutex);
90006 } else {
90007+ ctl_table_no_const nf_log_table = *table;
90008+
90009 mutex_lock(&nf_log_mutex);
90010 logger = rcu_dereference_protected(net->nf.nf_loggers[tindex],
90011 lockdep_is_held(&nf_log_mutex));
90012 if (!logger)
90013- table->data = "NONE";
90014+ nf_log_table.data = "NONE";
90015 else
90016- table->data = logger->name;
90017- r = proc_dostring(table, write, buffer, lenp, ppos);
90018+ nf_log_table.data = logger->name;
90019+ r = proc_dostring(&nf_log_table, write, buffer, lenp, ppos);
90020 mutex_unlock(&nf_log_mutex);
90021 }
90022
90023diff --git a/net/netfilter/nf_sockopt.c b/net/netfilter/nf_sockopt.c
90024index f042ae5..30ea486 100644
90025--- a/net/netfilter/nf_sockopt.c
90026+++ b/net/netfilter/nf_sockopt.c
90027@@ -45,7 +45,7 @@ int nf_register_sockopt(struct nf_sockopt_ops *reg)
90028 }
90029 }
90030
90031- list_add(&reg->list, &nf_sockopts);
90032+ pax_list_add((struct list_head *)&reg->list, &nf_sockopts);
90033 out:
90034 mutex_unlock(&nf_sockopt_mutex);
90035 return ret;
90036@@ -55,7 +55,7 @@ EXPORT_SYMBOL(nf_register_sockopt);
90037 void nf_unregister_sockopt(struct nf_sockopt_ops *reg)
90038 {
90039 mutex_lock(&nf_sockopt_mutex);
90040- list_del(&reg->list);
90041+ pax_list_del((struct list_head *)&reg->list);
90042 mutex_unlock(&nf_sockopt_mutex);
90043 }
90044 EXPORT_SYMBOL(nf_unregister_sockopt);
90045diff --git a/net/netfilter/nfnetlink_log.c b/net/netfilter/nfnetlink_log.c
90046index 962e979..d4ae2e9 100644
90047--- a/net/netfilter/nfnetlink_log.c
90048+++ b/net/netfilter/nfnetlink_log.c
90049@@ -82,7 +82,7 @@ static int nfnl_log_net_id __read_mostly;
90050 struct nfnl_log_net {
90051 spinlock_t instances_lock;
90052 struct hlist_head instance_table[INSTANCE_BUCKETS];
90053- atomic_t global_seq;
90054+ atomic_unchecked_t global_seq;
90055 };
90056
90057 static struct nfnl_log_net *nfnl_log_pernet(struct net *net)
90058@@ -559,7 +559,7 @@ __build_packet_message(struct nfnl_log_net *log,
90059 /* global sequence number */
90060 if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
90061 nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
90062- htonl(atomic_inc_return(&log->global_seq))))
90063+ htonl(atomic_inc_return_unchecked(&log->global_seq))))
90064 goto nla_put_failure;
90065
90066 if (data_len) {
90067diff --git a/net/netfilter/xt_gradm.c b/net/netfilter/xt_gradm.c
90068new file mode 100644
90069index 0000000..c566332
90070--- /dev/null
90071+++ b/net/netfilter/xt_gradm.c
90072@@ -0,0 +1,51 @@
90073+/*
90074+ * gradm match for netfilter
90075